mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-23 03:48:20 +00:00
Compare commits
196 Commits
v0.1.2-dev
...
v0.6.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ffe153efa7 | ||
|
|
91f4ed9825 | ||
|
|
aa9556aa59 | ||
|
|
91f0fe5740 | ||
|
|
b0fecc9f87 | ||
|
|
53cccd405f | ||
|
|
5b84184921 | ||
|
|
af1df425a2 | ||
|
|
8e170cf327 | ||
|
|
b55cfee8c8 | ||
|
|
420c3d4258 | ||
|
|
b92943a98c | ||
|
|
e1318aa326 | ||
|
|
2bd4a71913 | ||
|
|
5b206f4c9d | ||
|
|
3f969a2921 | ||
|
|
90be14fd57 | ||
|
|
1a5d9fc65c | ||
|
|
ec03a094e5 | ||
|
|
9d60bb1ee7 | ||
|
|
cd10de2dce | ||
|
|
658fb08c02 | ||
|
|
3b40488877 | ||
|
|
d3d0ad0cf3 | ||
|
|
473cc37a75 | ||
|
|
966cba4a4e | ||
|
|
da90755530 | ||
|
|
fa58623815 | ||
|
|
26af4da507 | ||
|
|
b527470153 | ||
|
|
e70561141d | ||
|
|
20b547984e | ||
|
|
16a658a5be | ||
|
|
42e50e6dc2 | ||
|
|
3d942ce355 | ||
|
|
94f617b06a | ||
|
|
211c4d05e8 | ||
|
|
a9f3bdf4ab | ||
|
|
2303aecab4 | ||
|
|
7655841e9f | ||
|
|
c4bbcf9de6 | ||
|
|
0cec1ce23e | ||
|
|
089fe828aa | ||
|
|
24a09fb3df | ||
|
|
b2901454d6 | ||
|
|
6cf589dc9b | ||
|
|
683ceda3a7 | ||
|
|
6a18b56587 | ||
|
|
2c9e5be816 | ||
|
|
5d5a0ef335 | ||
|
|
428f16ffef | ||
|
|
f93e54b63c | ||
|
|
c30b350e8e | ||
|
|
8fdb5aa024 | ||
|
|
83a3c30d01 | ||
|
|
63646c8c92 | ||
|
|
097e7ab42a | ||
|
|
3d45c8de50 | ||
|
|
8e1958c20b | ||
|
|
3e6c1792ef | ||
|
|
6b5b4bfb2a | ||
|
|
b797436884 | ||
|
|
2de3c1d0d4 | ||
|
|
7e81757e2f | ||
|
|
4773f87875 | ||
|
|
aa5bc34280 | ||
|
|
b9a25c1141 | ||
|
|
b42b8b16fd | ||
|
|
e0aac68759 | ||
|
|
9939671ccc | ||
|
|
eaa8515442 | ||
|
|
04b578cee1 | ||
|
|
f8e53d309c | ||
|
|
6076309b3e | ||
|
|
05db135d23 | ||
|
|
433cdb6006 | ||
|
|
4a4dca1926 | ||
|
|
6d591dde74 | ||
|
|
8e624e057e | ||
|
|
eb2642ba90 | ||
|
|
1a43cabfb9 | ||
|
|
580e37943b | ||
|
|
749775c7ea | ||
|
|
8ff8c30fb4 | ||
|
|
9893b7396c | ||
|
|
8c90344f28 | ||
|
|
e4955729d2 | ||
|
|
8a7b0314e5 | ||
|
|
e87d00c9cf | ||
|
|
336347b3c5 | ||
|
|
15d0899406 | ||
|
|
ad096f9781 | ||
|
|
d3c6a3dffc | ||
|
|
57b1653383 | ||
|
|
a86255ba51 | ||
|
|
0a7a4ce7d6 | ||
|
|
4c3735a897 | ||
|
|
22fd38c053 | ||
|
|
895f67a8d4 | ||
|
|
56e807b663 | ||
|
|
af64c7dc2d | ||
|
|
1e6458973b | ||
|
|
7bf8bb5436 | ||
|
|
1358911d95 | ||
|
|
1271d2f113 | ||
|
|
bc0227b49b | ||
|
|
dc643c2d76 | ||
|
|
0744e8ebc0 | ||
|
|
d4c9fdf6ac | ||
|
|
829979b6c7 | ||
|
|
32cd29bf70 | ||
|
|
03cb6cbd4d | ||
|
|
ba4a89488e | ||
|
|
b0d4a92e47 | ||
|
|
3e5a840c5a | ||
|
|
d6d34238d2 | ||
|
|
8bbced5925 | ||
|
|
20da1b9c9a | ||
|
|
b6a6e577c4 | ||
|
|
84888221ae | ||
|
|
222477b33e | ||
|
|
4a50d94633 | ||
|
|
b4dba782fb | ||
|
|
9c78a797e4 | ||
|
|
35c733a4c1 | ||
|
|
e5810d023e | ||
|
|
96930bd6ea | ||
|
|
e09ce32146 | ||
|
|
d15c009b3c | ||
|
|
95c8b8e9d8 | ||
|
|
2d798a5611 | ||
|
|
3a22249be9 | ||
|
|
a4c1898624 | ||
|
|
672f02490a | ||
|
|
fc00275d9c | ||
|
|
6219b93430 | ||
|
|
3a4571d671 | ||
|
|
96052ac69a | ||
|
|
6463a4b5d0 | ||
|
|
0ca127853d | ||
|
|
b884ba128e | ||
|
|
fe25ea3d8c | ||
|
|
e0f587f599 | ||
|
|
e9e1ef4772 | ||
|
|
eb8b841850 | ||
|
|
28681affda | ||
|
|
378f0b659a | ||
|
|
35b943e04f | ||
|
|
65f75c17fc | ||
|
|
806eab817c | ||
|
|
585510d76c | ||
|
|
c8a381d5bb | ||
|
|
3d04e6bded | ||
|
|
f8e851a6ed | ||
|
|
e70a615135 | ||
|
|
73ad0adf72 | ||
|
|
5b74e51db1 | ||
|
|
2e2492cc5d | ||
|
|
2ef5c2cbac | ||
|
|
3c89e1f7b3 | ||
|
|
2910724b49 | ||
|
|
3af945692e | ||
|
|
5fe9dae557 | ||
|
|
42c53ec3e2 | ||
|
|
291df8bfef | ||
|
|
d015286f65 | ||
|
|
fe91b4c878 | ||
|
|
7609c50641 | ||
|
|
df934990d7 | ||
|
|
3c4a80f16d | ||
|
|
a31139d4a5 | ||
|
|
6da3606721 | ||
|
|
bfbc72724d | ||
|
|
956b6f7d95 | ||
|
|
c1a039de3f | ||
|
|
f8b18e09d6 | ||
|
|
b20a7a679b | ||
|
|
36d866375e | ||
|
|
024edc30a3 | ||
|
|
6aa5e0b5a8 | ||
|
|
1a38550fdd | ||
|
|
3e7ebb5a84 | ||
|
|
4bca7342d3 | ||
|
|
f80908fb4e | ||
|
|
e000e10738 | ||
|
|
d83862f36c | ||
|
|
1020402b34 | ||
|
|
bc6ce6ed53 | ||
|
|
d3b1953deb | ||
|
|
3c67215e76 | ||
|
|
586624c836 | ||
|
|
49855e6333 | ||
|
|
624249c0f3 | ||
|
|
1cf443a63b | ||
|
|
8909679f44 | ||
|
|
e58efbf0ea |
1421
addressmanager/addressmanager.go
Normal file
1421
addressmanager/addressmanager.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,28 +2,31 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
)
|
||||
|
||||
// naTest is used to describe a test to be performed against the NetAddressKey
|
||||
// method.
|
||||
type naTest struct {
|
||||
in wire.NetAddress
|
||||
want string
|
||||
in domainmessage.NetAddress
|
||||
want AddressKey
|
||||
}
|
||||
|
||||
// naTests houses all of the tests to be performed against the NetAddressKey
|
||||
@@ -94,36 +97,57 @@ func addNaTests() {
|
||||
addNaTest("fef3::4:4", 8336, "[fef3::4:4]:8336")
|
||||
}
|
||||
|
||||
func addNaTest(ip string, port uint16, want string) {
|
||||
func addNaTest(ip string, port uint16, want AddressKey) {
|
||||
nip := net.ParseIP(ip)
|
||||
na := *wire.NewNetAddressIPPort(nip, port, wire.SFNodeNetwork)
|
||||
na := *domainmessage.NewNetAddressIPPort(nip, port, domainmessage.SFNodeNetwork)
|
||||
test := naTest{na, want}
|
||||
naTests = append(naTests, test)
|
||||
}
|
||||
|
||||
func lookupFunc(host string) ([]net.IP, error) {
|
||||
func lookupFuncForTest(host string) ([]net.IP, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func newAddrManagerForTest(t *testing.T, testName string,
|
||||
localSubnetworkID *subnetworkid.SubnetworkID) (addressManager *AddressManager, teardown func()) {
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.SubnetworkID = localSubnetworkID
|
||||
|
||||
dbPath, err := ioutil.TempDir("", testName)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
}
|
||||
|
||||
databaseContext, err := dbaccess.New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
addressManager = New(cfg, databaseContext)
|
||||
|
||||
return addressManager, func() {
|
||||
err := databaseContext.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("error closing the database: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartStop(t *testing.T) {
|
||||
n := New("teststartstop", lookupFunc, nil)
|
||||
n.Start()
|
||||
err := n.Stop()
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestStartStop", nil)
|
||||
defer teardown()
|
||||
err := amgr.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("Address Manager failed to start: %v", err)
|
||||
}
|
||||
err = amgr.Stop()
|
||||
if err != nil {
|
||||
t.Fatalf("Address Manager failed to stop: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddAddressByIP(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
fmtErr := errors.Errorf("")
|
||||
addrErr := &net.AddrError{}
|
||||
var tests = []struct {
|
||||
@@ -148,7 +172,8 @@ func TestAddAddressByIP(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
amgr := New("testaddressbyip", nil, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
|
||||
defer teardown()
|
||||
for i, test := range tests {
|
||||
err := amgr.AddAddressByIP(test.addrIP, nil)
|
||||
if test.err != nil && err == nil {
|
||||
@@ -168,52 +193,44 @@ func TestAddAddressByIP(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddLocalAddress(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
var tests = []struct {
|
||||
address wire.NetAddress
|
||||
address domainmessage.NetAddress
|
||||
priority AddressPriority
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
wire.NetAddress{IP: net.ParseIP("192.168.0.100")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("192.168.0.100")},
|
||||
InterfacePrio,
|
||||
false,
|
||||
},
|
||||
{
|
||||
wire.NetAddress{IP: net.ParseIP("204.124.1.1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("204.124.1.1")},
|
||||
InterfacePrio,
|
||||
true,
|
||||
},
|
||||
{
|
||||
wire.NetAddress{IP: net.ParseIP("204.124.1.1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("204.124.1.1")},
|
||||
BoundPrio,
|
||||
true,
|
||||
},
|
||||
{
|
||||
wire.NetAddress{IP: net.ParseIP("::1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("::1")},
|
||||
InterfacePrio,
|
||||
false,
|
||||
},
|
||||
{
|
||||
wire.NetAddress{IP: net.ParseIP("fe80::1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("fe80::1")},
|
||||
InterfacePrio,
|
||||
false,
|
||||
},
|
||||
{
|
||||
wire.NetAddress{IP: net.ParseIP("2620:100::1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("2620:100::1")},
|
||||
InterfacePrio,
|
||||
true,
|
||||
},
|
||||
}
|
||||
amgr := New("testaddlocaladdress", nil, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddLocalAddress", nil)
|
||||
defer teardown()
|
||||
for x, test := range tests {
|
||||
result := amgr.AddLocalAddress(&test.address, test.priority)
|
||||
if result == nil && !test.valid {
|
||||
@@ -230,30 +247,22 @@ func TestAddLocalAddress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttempt(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
n := New("testattempt", lookupFunc, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAttempt", nil)
|
||||
defer teardown()
|
||||
|
||||
// Add a new address and get it
|
||||
err := n.AddAddressByIP(someIP+":8333", nil)
|
||||
err := amgr.AddAddressByIP(someIP+":8333", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka := n.GetAddress()
|
||||
ka := amgr.GetAddress()
|
||||
|
||||
if !ka.LastAttempt().IsZero() {
|
||||
t.Errorf("Address should not have attempts, but does")
|
||||
}
|
||||
|
||||
na := ka.NetAddress()
|
||||
n.Attempt(na)
|
||||
amgr.Attempt(na)
|
||||
|
||||
if ka.LastAttempt().IsZero() {
|
||||
t.Errorf("Address should have an attempt, but does not")
|
||||
@@ -261,28 +270,20 @@ func TestAttempt(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConnected(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
n := New("testconnected", lookupFunc, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestConnected", nil)
|
||||
defer teardown()
|
||||
|
||||
// Add a new address and get it
|
||||
err := n.AddAddressByIP(someIP+":8333", nil)
|
||||
err := amgr.AddAddressByIP(someIP+":8333", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka := n.GetAddress()
|
||||
ka := amgr.GetAddress()
|
||||
na := ka.NetAddress()
|
||||
// make it an hour ago
|
||||
na.Timestamp = time.Unix(time.Now().Add(time.Hour*-1).Unix(), 0)
|
||||
na.Timestamp = mstime.Now().Add(time.Hour * -1)
|
||||
|
||||
n.Connected(na)
|
||||
amgr.Connected(na)
|
||||
|
||||
if !ka.NetAddress().Timestamp.After(na.Timestamp) {
|
||||
t.Errorf("Address should have a new timestamp, but does not")
|
||||
@@ -290,66 +291,50 @@ func TestConnected(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNeedMoreAddresses(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
n := New("testneedmoreaddresses", lookupFunc, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestNeedMoreAddresses", nil)
|
||||
defer teardown()
|
||||
addrsToAdd := 1500
|
||||
b := n.NeedMoreAddresses()
|
||||
b := amgr.NeedMoreAddresses()
|
||||
if !b {
|
||||
t.Errorf("Expected that we need more addresses")
|
||||
}
|
||||
addrs := make([]*wire.NetAddress, addrsToAdd)
|
||||
addrs := make([]*domainmessage.NetAddress, addrsToAdd)
|
||||
|
||||
var err error
|
||||
for i := 0; i < addrsToAdd; i++ {
|
||||
s := fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60)
|
||||
addrs[i], err = n.DeserializeNetAddress(s)
|
||||
s := AddressKey(fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60))
|
||||
addrs[i], err = amgr.DeserializeNetAddress(s)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to turn %s into an address: %v", s, err)
|
||||
}
|
||||
}
|
||||
|
||||
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
srcAddr := domainmessage.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
|
||||
n.AddAddresses(addrs, srcAddr, nil)
|
||||
numAddrs := n.TotalNumAddresses()
|
||||
amgr.AddAddresses(addrs, srcAddr, nil)
|
||||
numAddrs := amgr.TotalNumAddresses()
|
||||
if numAddrs > addrsToAdd {
|
||||
t.Errorf("Number of addresses is too many %d vs %d", numAddrs, addrsToAdd)
|
||||
}
|
||||
|
||||
b = n.NeedMoreAddresses()
|
||||
b = amgr.NeedMoreAddresses()
|
||||
if b {
|
||||
t.Errorf("Expected that we don't need more addresses")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGood(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
n := New("testgood", lookupFunc, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGood", nil)
|
||||
defer teardown()
|
||||
addrsToAdd := 64 * 64
|
||||
addrs := make([]*wire.NetAddress, addrsToAdd)
|
||||
addrs := make([]*domainmessage.NetAddress, addrsToAdd)
|
||||
subnetworkCount := 32
|
||||
subnetworkIDs := make([]*subnetworkid.SubnetworkID, subnetworkCount)
|
||||
|
||||
var err error
|
||||
for i := 0; i < addrsToAdd; i++ {
|
||||
s := fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60)
|
||||
addrs[i], err = n.DeserializeNetAddress(s)
|
||||
s := AddressKey(fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60))
|
||||
addrs[i], err = amgr.DeserializeNetAddress(s)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to turn %s into an address: %v", s, err)
|
||||
}
|
||||
@@ -359,26 +344,26 @@ func TestGood(t *testing.T) {
|
||||
subnetworkIDs[i] = &subnetworkid.SubnetworkID{0xff - byte(i)}
|
||||
}
|
||||
|
||||
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
srcAddr := domainmessage.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
|
||||
n.AddAddresses(addrs, srcAddr, nil)
|
||||
amgr.AddAddresses(addrs, srcAddr, nil)
|
||||
for i, addr := range addrs {
|
||||
n.Good(addr, subnetworkIDs[i%subnetworkCount])
|
||||
amgr.Good(addr, subnetworkIDs[i%subnetworkCount])
|
||||
}
|
||||
|
||||
numAddrs := n.TotalNumAddresses()
|
||||
numAddrs := amgr.TotalNumAddresses()
|
||||
if numAddrs >= addrsToAdd {
|
||||
t.Errorf("Number of addresses is too many: %d vs %d", numAddrs, addrsToAdd)
|
||||
}
|
||||
|
||||
numCache := len(n.AddressCache(true, nil))
|
||||
numCache := len(amgr.AddressCache(true, nil))
|
||||
if numCache == 0 || numCache >= numAddrs/4 {
|
||||
t.Errorf("Number of addresses in cache: got %d, want positive and less than %d",
|
||||
numCache, numAddrs/4)
|
||||
}
|
||||
|
||||
for i := 0; i < subnetworkCount; i++ {
|
||||
numCache = len(n.AddressCache(false, subnetworkIDs[i]))
|
||||
numCache = len(amgr.AddressCache(false, subnetworkIDs[i]))
|
||||
if numCache == 0 || numCache >= numAddrs/subnetworkCount {
|
||||
t.Errorf("Number of addresses in subnetwork cache: got %d, want positive and less than %d",
|
||||
numCache, numAddrs/4/subnetworkCount)
|
||||
@@ -387,26 +372,18 @@ func TestGood(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
n := New("test_good_change_subnetwork_id", lookupFunc, nil)
|
||||
addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGoodChangeSubnetworkID", nil)
|
||||
defer teardown()
|
||||
addr := domainmessage.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
addrKey := NetAddressKey(addr)
|
||||
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
srcAddr := domainmessage.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
|
||||
oldSubnetwork := subnetworkid.SubnetworkIDNative
|
||||
n.AddAddress(addr, srcAddr, oldSubnetwork)
|
||||
n.Good(addr, oldSubnetwork)
|
||||
amgr.AddAddress(addr, srcAddr, oldSubnetwork)
|
||||
amgr.Good(addr, oldSubnetwork)
|
||||
|
||||
// make sure address was saved to addrIndex under oldSubnetwork
|
||||
ka := n.find(addr)
|
||||
// make sure address was saved to addressIndex under oldSubnetwork
|
||||
ka := amgr.knownAddress(addr)
|
||||
if ka == nil {
|
||||
t.Fatalf("Address was not found after first time .Good called")
|
||||
}
|
||||
@@ -415,10 +392,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
}
|
||||
|
||||
// make sure address was added to correct bucket under oldSubnetwork
|
||||
bucket := n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)]
|
||||
bucket := amgr.subnetworkTriedAddresBucketArrays[*oldSubnetwork][amgr.triedAddressBucketIndex(addr)]
|
||||
wasFound := false
|
||||
for e := bucket.Front(); e != nil; e = e.Next() {
|
||||
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
|
||||
for _, ka := range bucket {
|
||||
if NetAddressKey(ka.NetAddress()) == addrKey {
|
||||
wasFound = true
|
||||
}
|
||||
}
|
||||
@@ -428,10 +405,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
|
||||
// now call .Good again with a different subnetwork
|
||||
newSubnetwork := subnetworkid.SubnetworkIDRegistry
|
||||
n.Good(addr, newSubnetwork)
|
||||
amgr.Good(addr, newSubnetwork)
|
||||
|
||||
// make sure address was updated in addrIndex under newSubnetwork
|
||||
ka = n.find(addr)
|
||||
// make sure address was updated in addressIndex under newSubnetwork
|
||||
ka = amgr.knownAddress(addr)
|
||||
if ka == nil {
|
||||
t.Fatalf("Address was not found after second time .Good called")
|
||||
}
|
||||
@@ -440,10 +417,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
}
|
||||
|
||||
// make sure address was removed from bucket under oldSubnetwork
|
||||
bucket = n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)]
|
||||
bucket = amgr.subnetworkTriedAddresBucketArrays[*oldSubnetwork][amgr.triedAddressBucketIndex(addr)]
|
||||
wasFound = false
|
||||
for e := bucket.Front(); e != nil; e = e.Next() {
|
||||
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
|
||||
for _, ka := range bucket {
|
||||
if NetAddressKey(ka.NetAddress()) == addrKey {
|
||||
wasFound = true
|
||||
}
|
||||
}
|
||||
@@ -452,10 +429,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
}
|
||||
|
||||
// make sure address was added to correct bucket under newSubnetwork
|
||||
bucket = n.addrTried[*newSubnetwork][n.getTriedBucket(addr)]
|
||||
bucket = amgr.subnetworkTriedAddresBucketArrays[*newSubnetwork][amgr.triedAddressBucketIndex(addr)]
|
||||
wasFound = false
|
||||
for e := bucket.Front(); e != nil; e = e.Next() {
|
||||
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
|
||||
for _, ka := range bucket {
|
||||
if NetAddressKey(ka.NetAddress()) == addrKey {
|
||||
wasFound = true
|
||||
}
|
||||
}
|
||||
@@ -465,44 +442,36 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAddress(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
|
||||
n := New("testgetaddress", lookupFunc, localSubnetworkID)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGetAddress", localSubnetworkID)
|
||||
defer teardown()
|
||||
|
||||
// Get an address from an empty set (should error)
|
||||
if rv := n.GetAddress(); rv != nil {
|
||||
if rv := amgr.GetAddress(); rv != nil {
|
||||
t.Errorf("GetAddress failed: got: %v want: %v\n", rv, nil)
|
||||
}
|
||||
|
||||
// Add a new address and get it
|
||||
err := n.AddAddressByIP(someIP+":8332", localSubnetworkID)
|
||||
err := amgr.AddAddressByIP(someIP+":8332", localSubnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka := n.GetAddress()
|
||||
ka := amgr.GetAddress()
|
||||
if ka == nil {
|
||||
t.Fatalf("Did not get an address where there is one in the pool")
|
||||
}
|
||||
n.Attempt(ka.NetAddress())
|
||||
amgr.Attempt(ka.NetAddress())
|
||||
|
||||
// Checks that we don't get it if we find that it has other subnetwork ID than expected.
|
||||
actualSubnetworkID := &subnetworkid.SubnetworkID{0xfe}
|
||||
n.Good(ka.NetAddress(), actualSubnetworkID)
|
||||
ka = n.GetAddress()
|
||||
amgr.Good(ka.NetAddress(), actualSubnetworkID)
|
||||
ka = amgr.GetAddress()
|
||||
if ka != nil {
|
||||
t.Errorf("Didn't expect to get an address because there shouldn't be any address from subnetwork ID %s or nil", localSubnetworkID)
|
||||
}
|
||||
|
||||
// Checks that the total number of addresses incremented although the new address is not full node or a partial node of the same subnetwork as the local node.
|
||||
numAddrs := n.TotalNumAddresses()
|
||||
numAddrs := amgr.TotalNumAddresses()
|
||||
if numAddrs != 1 {
|
||||
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
|
||||
}
|
||||
@@ -510,11 +479,11 @@ func TestGetAddress(t *testing.T) {
|
||||
// Now we repeat the same process, but now the address has the expected subnetwork ID.
|
||||
|
||||
// Add a new address and get it
|
||||
err = n.AddAddressByIP(someIP+":8333", localSubnetworkID)
|
||||
err = amgr.AddAddressByIP(someIP+":8333", localSubnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka = n.GetAddress()
|
||||
ka = amgr.GetAddress()
|
||||
if ka == nil {
|
||||
t.Fatalf("Did not get an address where there is one in the pool")
|
||||
}
|
||||
@@ -524,11 +493,11 @@ func TestGetAddress(t *testing.T) {
|
||||
if !ka.SubnetworkID().IsEqual(localSubnetworkID) {
|
||||
t.Errorf("Wrong Subnetwork ID: got %v, want %v", *ka.SubnetworkID(), localSubnetworkID)
|
||||
}
|
||||
n.Attempt(ka.NetAddress())
|
||||
amgr.Attempt(ka.NetAddress())
|
||||
|
||||
// Mark this as a good address and get it
|
||||
n.Good(ka.NetAddress(), localSubnetworkID)
|
||||
ka = n.GetAddress()
|
||||
amgr.Good(ka.NetAddress(), localSubnetworkID)
|
||||
ka = amgr.GetAddress()
|
||||
if ka == nil {
|
||||
t.Fatalf("Did not get an address where there is one in the pool")
|
||||
}
|
||||
@@ -539,23 +508,14 @@ func TestGetAddress(t *testing.T) {
|
||||
t.Errorf("Wrong Subnetwork ID: got %v, want %v", ka.SubnetworkID(), localSubnetworkID)
|
||||
}
|
||||
|
||||
numAddrs = n.TotalNumAddresses()
|
||||
numAddrs = amgr.TotalNumAddresses()
|
||||
if numAddrs != 2 {
|
||||
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBestLocalAddress(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
localAddrs := []wire.NetAddress{
|
||||
localAddrs := []domainmessage.NetAddress{
|
||||
{IP: net.ParseIP("192.168.0.100")},
|
||||
{IP: net.ParseIP("::1")},
|
||||
{IP: net.ParseIP("fe80::1")},
|
||||
@@ -563,48 +523,49 @@ func TestGetBestLocalAddress(t *testing.T) {
|
||||
}
|
||||
|
||||
var tests = []struct {
|
||||
remoteAddr wire.NetAddress
|
||||
want0 wire.NetAddress
|
||||
want1 wire.NetAddress
|
||||
want2 wire.NetAddress
|
||||
want3 wire.NetAddress
|
||||
remoteAddr domainmessage.NetAddress
|
||||
want0 domainmessage.NetAddress
|
||||
want1 domainmessage.NetAddress
|
||||
want2 domainmessage.NetAddress
|
||||
want3 domainmessage.NetAddress
|
||||
}{
|
||||
{
|
||||
// Remote connection from public IPv4
|
||||
wire.NetAddress{IP: net.ParseIP("204.124.8.1")},
|
||||
wire.NetAddress{IP: net.IPv4zero},
|
||||
wire.NetAddress{IP: net.IPv4zero},
|
||||
wire.NetAddress{IP: net.ParseIP("204.124.8.100")},
|
||||
wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("204.124.8.1")},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("204.124.8.100")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
|
||||
},
|
||||
{
|
||||
// Remote connection from private IPv4
|
||||
wire.NetAddress{IP: net.ParseIP("172.16.0.254")},
|
||||
wire.NetAddress{IP: net.IPv4zero},
|
||||
wire.NetAddress{IP: net.IPv4zero},
|
||||
wire.NetAddress{IP: net.IPv4zero},
|
||||
wire.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("172.16.0.254")},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
},
|
||||
{
|
||||
// Remote connection from public IPv6
|
||||
wire.NetAddress{IP: net.ParseIP("2602:100:abcd::102")},
|
||||
wire.NetAddress{IP: net.IPv6zero},
|
||||
wire.NetAddress{IP: net.ParseIP("2001:470::1")},
|
||||
wire.NetAddress{IP: net.ParseIP("2001:470::1")},
|
||||
wire.NetAddress{IP: net.ParseIP("2001:470::1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("2602:100:abcd::102")},
|
||||
domainmessage.NetAddress{IP: net.IPv6zero},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("2001:470::1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("2001:470::1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("2001:470::1")},
|
||||
},
|
||||
/* XXX
|
||||
{
|
||||
// Remote connection from Tor
|
||||
wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43::100")},
|
||||
wire.NetAddress{IP: net.IPv4zero},
|
||||
wire.NetAddress{IP: net.ParseIP("204.124.8.100")},
|
||||
wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43::100")},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("204.124.8.100")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
|
||||
},
|
||||
*/
|
||||
}
|
||||
|
||||
amgr := New("testgetbestlocaladdress", nil, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGetBestLocalAddress", nil)
|
||||
defer teardown()
|
||||
|
||||
// Test against default when there's no address
|
||||
for x, test := range tests {
|
||||
@@ -631,7 +592,7 @@ func TestGetBestLocalAddress(t *testing.T) {
|
||||
}
|
||||
|
||||
// Add a public IP to the list of local addresses.
|
||||
localAddr := wire.NetAddress{IP: net.ParseIP("204.124.8.100")}
|
||||
localAddr := domainmessage.NetAddress{IP: net.ParseIP("204.124.8.100")}
|
||||
amgr.AddLocalAddress(&localAddr, InterfacePrio)
|
||||
|
||||
// Test against want2
|
||||
@@ -645,7 +606,7 @@ func TestGetBestLocalAddress(t *testing.T) {
|
||||
}
|
||||
/*
|
||||
// Add a Tor generated IP address
|
||||
localAddr = wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}
|
||||
localAddr = domainmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}
|
||||
amgr.AddLocalAddress(&localAddr, ManualPrio)
|
||||
// Test against want3
|
||||
for x, test := range tests {
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Package addrmgr implements concurrency safe Kaspa address manager.
|
||||
Package addressmanager implements concurrency safe Kaspa address manager.
|
||||
|
||||
Address Manager Overview
|
||||
|
||||
@@ -31,4 +31,4 @@ peers which no longer appear to be good peers as well as bias the selection
|
||||
toward known good peers. The general idea is to make a best effort at only
|
||||
providing usable addresses.
|
||||
*/
|
||||
package addrmgr
|
||||
package addressmanager
|
||||
24
addressmanager/internal_test.go
Normal file
24
addressmanager/internal_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
)
|
||||
|
||||
func TstKnownAddressIsBad(ka *KnownAddress) bool {
|
||||
return ka.isBad()
|
||||
}
|
||||
|
||||
func TstKnownAddressChance(ka *KnownAddress) float64 {
|
||||
return ka.chance()
|
||||
}
|
||||
|
||||
func TstNewKnownAddress(na *domainmessage.NetAddress, attempts int,
|
||||
lastattempt, lastsuccess mstime.Time, tried bool, refs int) *KnownAddress {
|
||||
return &KnownAddress{netAddress: na, attempts: attempts, lastAttempt: lastattempt,
|
||||
lastSuccess: lastsuccess, tried: tried, referenceCount: refs}
|
||||
}
|
||||
@@ -2,33 +2,36 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
)
|
||||
|
||||
// KnownAddress tracks information about a known network address that is used
|
||||
// to determine how viable an address is.
|
||||
type KnownAddress struct {
|
||||
na *wire.NetAddress
|
||||
srcAddr *wire.NetAddress
|
||||
attempts int
|
||||
lastattempt time.Time
|
||||
lastsuccess time.Time
|
||||
tried bool
|
||||
refs int // reference count of new buckets
|
||||
subnetworkID *subnetworkid.SubnetworkID
|
||||
netAddress *domainmessage.NetAddress
|
||||
sourceAddress *domainmessage.NetAddress
|
||||
attempts int
|
||||
lastAttempt mstime.Time
|
||||
lastSuccess mstime.Time
|
||||
tried bool
|
||||
referenceCount int // reference count of new buckets
|
||||
subnetworkID *subnetworkid.SubnetworkID
|
||||
isBanned bool
|
||||
bannedTime mstime.Time
|
||||
}
|
||||
|
||||
// NetAddress returns the underlying wire.NetAddress associated with the
|
||||
// NetAddress returns the underlying domainmessage.NetAddress associated with the
|
||||
// known address.
|
||||
func (ka *KnownAddress) NetAddress() *wire.NetAddress {
|
||||
return ka.na
|
||||
func (ka *KnownAddress) NetAddress() *domainmessage.NetAddress {
|
||||
return ka.netAddress
|
||||
}
|
||||
|
||||
// SubnetworkID returns the subnetwork ID of the known address.
|
||||
@@ -37,16 +40,16 @@ func (ka *KnownAddress) SubnetworkID() *subnetworkid.SubnetworkID {
|
||||
}
|
||||
|
||||
// LastAttempt returns the last time the known address was attempted.
|
||||
func (ka *KnownAddress) LastAttempt() time.Time {
|
||||
return ka.lastattempt
|
||||
func (ka *KnownAddress) LastAttempt() mstime.Time {
|
||||
return ka.lastAttempt
|
||||
}
|
||||
|
||||
// chance returns the selection probability for a known address. The priority
|
||||
// depends upon how recently the address has been seen, how recently it was last
|
||||
// attempted and how often attempts to connect to it have failed.
|
||||
func (ka *KnownAddress) chance() float64 {
|
||||
now := time.Now()
|
||||
lastAttempt := now.Sub(ka.lastattempt)
|
||||
now := mstime.Now()
|
||||
lastAttempt := now.Sub(ka.lastAttempt)
|
||||
|
||||
if lastAttempt < 0 {
|
||||
lastAttempt = 0
|
||||
@@ -76,27 +79,27 @@ func (ka *KnownAddress) chance() float64 {
|
||||
// All addresses that meet these criteria are assumed to be worthless and not
|
||||
// worth keeping hold of.
|
||||
func (ka *KnownAddress) isBad() bool {
|
||||
if ka.lastattempt.After(time.Now().Add(-1 * time.Minute)) {
|
||||
if ka.lastAttempt.After(mstime.Now().Add(-1 * time.Minute)) {
|
||||
return false
|
||||
}
|
||||
|
||||
// From the future?
|
||||
if ka.na.Timestamp.After(time.Now().Add(10 * time.Minute)) {
|
||||
if ka.netAddress.Timestamp.After(mstime.Now().Add(10 * time.Minute)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Over a month old?
|
||||
if ka.na.Timestamp.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
|
||||
if ka.netAddress.Timestamp.Before(mstime.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Never succeeded?
|
||||
if ka.lastsuccess.IsZero() && ka.attempts >= numRetries {
|
||||
if ka.lastSuccess.IsZero() && ka.attempts >= numRetries {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hasn't succeeded in too long?
|
||||
if !ka.lastsuccess.After(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
|
||||
if !ka.lastSuccess.After(mstime.Now().Add(-1*minBadDays*time.Hour*24)) &&
|
||||
ka.attempts >= maxFailures {
|
||||
return true
|
||||
}
|
||||
115
addressmanager/knownaddress_test.go
Normal file
115
addressmanager/knownaddress_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addressmanager_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/addressmanager"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
)
|
||||
|
||||
func TestChance(t *testing.T) {
|
||||
now := mstime.Now()
|
||||
var tests = []struct {
|
||||
addr *addressmanager.KnownAddress
|
||||
expected float64
|
||||
}{
|
||||
{
|
||||
//Test normal case
|
||||
addressmanager.TstNewKnownAddress(&domainmessage.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastseen < 0
|
||||
addressmanager.TstNewKnownAddress(&domainmessage.NetAddress{Timestamp: now.Add(20 * time.Second)},
|
||||
0, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastAttempt < 0
|
||||
addressmanager.TstNewKnownAddress(&domainmessage.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, mstime.Now().Add(30*time.Minute), mstime.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case in which lastAttempt < ten minutes
|
||||
addressmanager.TstNewKnownAddress(&domainmessage.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, mstime.Now().Add(-5*time.Minute), mstime.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case with several failed attempts.
|
||||
addressmanager.TstNewKnownAddress(&domainmessage.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
2, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
|
||||
1 / 1.5 / 1.5,
|
||||
},
|
||||
}
|
||||
|
||||
err := .0001
|
||||
for i, test := range tests {
|
||||
chance := addressmanager.TstKnownAddressChance(test.addr)
|
||||
if math.Abs(test.expected-chance) >= err {
|
||||
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBad(t *testing.T) {
|
||||
now := mstime.Now()
|
||||
future := now.Add(35 * time.Minute)
|
||||
monthOld := now.Add(-43 * time.Hour * 24)
|
||||
secondsOld := now.Add(-2 * time.Second)
|
||||
minutesOld := now.Add(-27 * time.Minute)
|
||||
hoursOld := now.Add(-5 * time.Hour)
|
||||
zeroTime := mstime.Time{}
|
||||
|
||||
futureNa := &domainmessage.NetAddress{Timestamp: future}
|
||||
minutesOldNa := &domainmessage.NetAddress{Timestamp: minutesOld}
|
||||
monthOldNa := &domainmessage.NetAddress{Timestamp: monthOld}
|
||||
currentNa := &domainmessage.NetAddress{Timestamp: secondsOld}
|
||||
|
||||
//Test addresses that have been tried in the last minute.
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
|
||||
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
|
||||
//Test address that claims to be from the future.
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
|
||||
}
|
||||
|
||||
//Test address that has not been seen in over a month.
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 7: addresses more than a month old are bad.")
|
||||
}
|
||||
|
||||
//It has failed at least three times and never succeeded.
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
|
||||
t.Errorf("test case 8: addresses that have never succeeded are bad.")
|
||||
}
|
||||
|
||||
//It has failed ten times in the last week
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
|
||||
}
|
||||
|
||||
//Test an address that should work.
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 10: This should be a valid address.")
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
@@ -2,14 +2,12 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -88,19 +86,19 @@ func ipNet(ip string, ones, bits int) net.IPNet {
|
||||
}
|
||||
|
||||
// IsIPv4 returns whether or not the given address is an IPv4 address.
|
||||
func IsIPv4(na *wire.NetAddress) bool {
|
||||
func IsIPv4(na *domainmessage.NetAddress) bool {
|
||||
return na.IP.To4() != nil
|
||||
}
|
||||
|
||||
// IsLocal returns whether or not the given address is a local address.
|
||||
func IsLocal(na *wire.NetAddress) bool {
|
||||
func IsLocal(na *domainmessage.NetAddress) bool {
|
||||
return na.IP.IsLoopback() || zero4Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC1918 returns whether or not the passed address is part of the IPv4
|
||||
// private network address space as defined by RFC1918 (10.0.0.0/8,
|
||||
// 172.16.0.0/12, or 192.168.0.0/16).
|
||||
func IsRFC1918(na *wire.NetAddress) bool {
|
||||
func IsRFC1918(na *domainmessage.NetAddress) bool {
|
||||
for _, rfc := range rfc1918Nets {
|
||||
if rfc.Contains(na.IP) {
|
||||
return true
|
||||
@@ -111,56 +109,56 @@ func IsRFC1918(na *wire.NetAddress) bool {
|
||||
|
||||
// IsRFC2544 returns whether or not the passed address is part of the IPv4
|
||||
// address space as defined by RFC2544 (198.18.0.0/15)
|
||||
func IsRFC2544(na *wire.NetAddress) bool {
|
||||
func IsRFC2544(na *domainmessage.NetAddress) bool {
|
||||
return rfc2544Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC3849 returns whether or not the passed address is part of the IPv6
|
||||
// documentation range as defined by RFC3849 (2001:DB8::/32).
|
||||
func IsRFC3849(na *wire.NetAddress) bool {
|
||||
func IsRFC3849(na *domainmessage.NetAddress) bool {
|
||||
return rfc3849Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC3927 returns whether or not the passed address is part of the IPv4
|
||||
// autoconfiguration range as defined by RFC3927 (169.254.0.0/16).
|
||||
func IsRFC3927(na *wire.NetAddress) bool {
|
||||
func IsRFC3927(na *domainmessage.NetAddress) bool {
|
||||
return rfc3927Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC3964 returns whether or not the passed address is part of the IPv6 to
|
||||
// IPv4 encapsulation range as defined by RFC3964 (2002::/16).
|
||||
func IsRFC3964(na *wire.NetAddress) bool {
|
||||
func IsRFC3964(na *domainmessage.NetAddress) bool {
|
||||
return rfc3964Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC4193 returns whether or not the passed address is part of the IPv6
|
||||
// unique local range as defined by RFC4193 (FC00::/7).
|
||||
func IsRFC4193(na *wire.NetAddress) bool {
|
||||
func IsRFC4193(na *domainmessage.NetAddress) bool {
|
||||
return rfc4193Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC4380 returns whether or not the passed address is part of the IPv6
|
||||
// teredo tunneling over UDP range as defined by RFC4380 (2001::/32).
|
||||
func IsRFC4380(na *wire.NetAddress) bool {
|
||||
func IsRFC4380(na *domainmessage.NetAddress) bool {
|
||||
return rfc4380Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC4843 returns whether or not the passed address is part of the IPv6
|
||||
// ORCHID range as defined by RFC4843 (2001:10::/28).
|
||||
func IsRFC4843(na *wire.NetAddress) bool {
|
||||
func IsRFC4843(na *domainmessage.NetAddress) bool {
|
||||
return rfc4843Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC4862 returns whether or not the passed address is part of the IPv6
|
||||
// stateless address autoconfiguration range as defined by RFC4862 (FE80::/64).
|
||||
func IsRFC4862(na *wire.NetAddress) bool {
|
||||
func IsRFC4862(na *domainmessage.NetAddress) bool {
|
||||
return rfc4862Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC5737 returns whether or not the passed address is part of the IPv4
|
||||
// documentation address space as defined by RFC5737 (192.0.2.0/24,
|
||||
// 198.51.100.0/24, 203.0.113.0/24)
|
||||
func IsRFC5737(na *wire.NetAddress) bool {
|
||||
func IsRFC5737(na *domainmessage.NetAddress) bool {
|
||||
for _, rfc := range rfc5737Net {
|
||||
if rfc.Contains(na.IP) {
|
||||
return true
|
||||
@@ -172,19 +170,19 @@ func IsRFC5737(na *wire.NetAddress) bool {
|
||||
|
||||
// IsRFC6052 returns whether or not the passed address is part of the IPv6
|
||||
// well-known prefix range as defined by RFC6052 (64:FF9B::/96).
|
||||
func IsRFC6052(na *wire.NetAddress) bool {
|
||||
func IsRFC6052(na *domainmessage.NetAddress) bool {
|
||||
return rfc6052Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC6145 returns whether or not the passed address is part of the IPv6 to
|
||||
// IPv4 translated address range as defined by RFC6145 (::FFFF:0:0:0/96).
|
||||
func IsRFC6145(na *wire.NetAddress) bool {
|
||||
func IsRFC6145(na *domainmessage.NetAddress) bool {
|
||||
return rfc6145Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC6598 returns whether or not the passed address is part of the IPv4
|
||||
// shared address space specified by RFC6598 (100.64.0.0/10)
|
||||
func IsRFC6598(na *wire.NetAddress) bool {
|
||||
func IsRFC6598(na *domainmessage.NetAddress) bool {
|
||||
return rfc6598Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
@@ -192,7 +190,7 @@ func IsRFC6598(na *wire.NetAddress) bool {
|
||||
// considered invalid under the following circumstances:
|
||||
// IPv4: It is either a zero or all bits set address.
|
||||
// IPv6: It is either a zero or RFC3849 documentation address.
|
||||
func IsValid(na *wire.NetAddress) bool {
|
||||
func IsValid(na *domainmessage.NetAddress) bool {
|
||||
// IsUnspecified returns if address is 0, so only all bits set, and
|
||||
// RFC3849 need to be explicitly checked.
|
||||
return na.IP != nil && !(na.IP.IsUnspecified() ||
|
||||
@@ -202,8 +200,8 @@ func IsValid(na *wire.NetAddress) bool {
|
||||
// IsRoutable returns whether or not the passed address is routable over
|
||||
// the public internet. This is true as long as the address is valid and is not
|
||||
// in any reserved ranges.
|
||||
func IsRoutable(na *wire.NetAddress) bool {
|
||||
if config.ActiveConfig().NetParams().AcceptUnroutable {
|
||||
func (am *AddressManager) IsRoutable(na *domainmessage.NetAddress) bool {
|
||||
if am.cfg.NetParams().AcceptUnroutable {
|
||||
return !IsLocal(na)
|
||||
}
|
||||
|
||||
@@ -217,11 +215,11 @@ func IsRoutable(na *wire.NetAddress) bool {
|
||||
// of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
|
||||
// "local" for a local address, and the string "unroutable" for an unroutable
|
||||
// address.
|
||||
func GroupKey(na *wire.NetAddress) string {
|
||||
func (am *AddressManager) GroupKey(na *domainmessage.NetAddress) string {
|
||||
if IsLocal(na) {
|
||||
return "local"
|
||||
}
|
||||
if !IsRoutable(na) {
|
||||
if !am.IsRoutable(na) {
|
||||
return "unroutable"
|
||||
}
|
||||
if IsIPv4(na) {
|
||||
@@ -2,32 +2,22 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr_test
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/addrmgr"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
)
|
||||
|
||||
// TestIPTypes ensures the various functions which determine the type of an IP
|
||||
// address based on RFCs work as intended.
|
||||
func TestIPTypes(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
|
||||
defer teardown()
|
||||
type ipTest struct {
|
||||
in wire.NetAddress
|
||||
in domainmessage.NetAddress
|
||||
rfc1918 bool
|
||||
rfc2544 bool
|
||||
rfc3849 bool
|
||||
@@ -50,7 +40,7 @@ func TestIPTypes(t *testing.T) {
|
||||
rfc4193, rfc4380, rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598,
|
||||
local, valid, routable bool) ipTest {
|
||||
nip := net.ParseIP(ip)
|
||||
na := *wire.NewNetAddressIPPort(nip, 16111, wire.SFNodeNetwork)
|
||||
na := *domainmessage.NewNetAddressIPPort(nip, 16111, domainmessage.SFNodeNetwork)
|
||||
test := ipTest{na, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964, rfc4193, rfc4380,
|
||||
rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598, local, valid, routable}
|
||||
return test
|
||||
@@ -99,55 +89,55 @@ func TestIPTypes(t *testing.T) {
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for _, test := range tests {
|
||||
if rv := addrmgr.IsRFC1918(&test.in); rv != test.rfc1918 {
|
||||
if rv := IsRFC1918(&test.in); rv != test.rfc1918 {
|
||||
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc1918)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC3849(&test.in); rv != test.rfc3849 {
|
||||
if rv := IsRFC3849(&test.in); rv != test.rfc3849 {
|
||||
t.Errorf("IsRFC3849 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3849)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC3927(&test.in); rv != test.rfc3927 {
|
||||
if rv := IsRFC3927(&test.in); rv != test.rfc3927 {
|
||||
t.Errorf("IsRFC3927 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3927)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC3964(&test.in); rv != test.rfc3964 {
|
||||
if rv := IsRFC3964(&test.in); rv != test.rfc3964 {
|
||||
t.Errorf("IsRFC3964 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3964)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC4193(&test.in); rv != test.rfc4193 {
|
||||
if rv := IsRFC4193(&test.in); rv != test.rfc4193 {
|
||||
t.Errorf("IsRFC4193 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4193)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC4380(&test.in); rv != test.rfc4380 {
|
||||
if rv := IsRFC4380(&test.in); rv != test.rfc4380 {
|
||||
t.Errorf("IsRFC4380 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4380)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC4843(&test.in); rv != test.rfc4843 {
|
||||
if rv := IsRFC4843(&test.in); rv != test.rfc4843 {
|
||||
t.Errorf("IsRFC4843 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4843)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC4862(&test.in); rv != test.rfc4862 {
|
||||
if rv := IsRFC4862(&test.in); rv != test.rfc4862 {
|
||||
t.Errorf("IsRFC4862 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4862)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC6052(&test.in); rv != test.rfc6052 {
|
||||
if rv := IsRFC6052(&test.in); rv != test.rfc6052 {
|
||||
t.Errorf("isRFC6052 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6052)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC6145(&test.in); rv != test.rfc6145 {
|
||||
if rv := IsRFC6145(&test.in); rv != test.rfc6145 {
|
||||
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6145)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsLocal(&test.in); rv != test.local {
|
||||
if rv := IsLocal(&test.in); rv != test.local {
|
||||
t.Errorf("IsLocal %s\n got: %v want: %v", test.in.IP, rv, test.local)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsValid(&test.in); rv != test.valid {
|
||||
if rv := IsValid(&test.in); rv != test.valid {
|
||||
t.Errorf("IsValid %s\n got: %v want: %v", test.in.IP, rv, test.valid)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRoutable(&test.in); rv != test.routable {
|
||||
if rv := amgr.IsRoutable(&test.in); rv != test.routable {
|
||||
t.Errorf("IsRoutable %s\n got: %v want: %v", test.in.IP, rv, test.routable)
|
||||
}
|
||||
}
|
||||
@@ -156,14 +146,8 @@ func TestIPTypes(t *testing.T) {
|
||||
// TestGroupKey tests the GroupKey function to ensure it properly groups various
|
||||
// IP addresses.
|
||||
func TestGroupKey(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
|
||||
defer teardown()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -212,8 +196,8 @@ func TestGroupKey(t *testing.T) {
|
||||
|
||||
for i, test := range tests {
|
||||
nip := net.ParseIP(test.ip)
|
||||
na := *wire.NewNetAddressIPPort(nip, 8333, wire.SFNodeNetwork)
|
||||
if key := addrmgr.GroupKey(&na); key != test.expected {
|
||||
na := *domainmessage.NewNetAddressIPPort(nip, 8333, domainmessage.SFNodeNetwork)
|
||||
if key := amgr.GroupKey(&na); key != test.expected {
|
||||
t.Errorf("TestGroupKey #%d (%s): unexpected group key "+
|
||||
"- got '%s', want '%s'", i, test.name,
|
||||
key, test.expected)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,25 +0,0 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func TstKnownAddressIsBad(ka *KnownAddress) bool {
|
||||
return ka.isBad()
|
||||
}
|
||||
|
||||
func TstKnownAddressChance(ka *KnownAddress) float64 {
|
||||
return ka.chance()
|
||||
}
|
||||
|
||||
func TstNewKnownAddress(na *wire.NetAddress, attempts int,
|
||||
lastattempt, lastsuccess time.Time, tried bool, refs int) *KnownAddress {
|
||||
return &KnownAddress{na: na, attempts: attempts, lastattempt: lastattempt,
|
||||
lastsuccess: lastsuccess, tried: tried, refs: refs}
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/addrmgr"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func TestChance(t *testing.T) {
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
var tests = []struct {
|
||||
addr *addrmgr.KnownAddress
|
||||
expected float64
|
||||
}{
|
||||
{
|
||||
//Test normal case
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastseen < 0
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
|
||||
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastattempt < 0
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(30*time.Minute), time.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case in which lastattempt < ten minutes
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(-5*time.Minute), time.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case with several failed attempts.
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
2, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1 / 1.5 / 1.5,
|
||||
},
|
||||
}
|
||||
|
||||
err := .0001
|
||||
for i, test := range tests {
|
||||
chance := addrmgr.TstKnownAddressChance(test.addr)
|
||||
if math.Abs(test.expected-chance) >= err {
|
||||
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBad(t *testing.T) {
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
future := now.Add(35 * time.Minute)
|
||||
monthOld := now.Add(-43 * time.Hour * 24)
|
||||
secondsOld := now.Add(-2 * time.Second)
|
||||
minutesOld := now.Add(-27 * time.Minute)
|
||||
hoursOld := now.Add(-5 * time.Hour)
|
||||
zeroTime := time.Time{}
|
||||
|
||||
futureNa := &wire.NetAddress{Timestamp: future}
|
||||
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
|
||||
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
|
||||
currentNa := &wire.NetAddress{Timestamp: secondsOld}
|
||||
|
||||
//Test addresses that have been tried in the last minute.
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
|
||||
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
|
||||
//Test address that claims to be from the future.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
|
||||
}
|
||||
|
||||
//Test address that has not been seen in over a month.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 7: addresses more than a month old are bad.")
|
||||
}
|
||||
|
||||
//It has failed at least three times and never succeeded.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
|
||||
t.Errorf("test case 8: addresses that have never succeeded are bad.")
|
||||
}
|
||||
|
||||
//It has failed ten times in the last week
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
|
||||
}
|
||||
|
||||
//Test an address that should work.
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 10: This should be a valid address.")
|
||||
}
|
||||
}
|
||||
249
app/app.go
Normal file
249
app/app.go
Normal file
@@ -0,0 +1,249 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/addressmanager"
|
||||
|
||||
"github.com/kaspanet/kaspad/netadapter/id"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/blockdag/indexers"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/connmanager"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/dnsseed"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/mempool"
|
||||
"github.com/kaspanet/kaspad/mining"
|
||||
"github.com/kaspanet/kaspad/netadapter"
|
||||
"github.com/kaspanet/kaspad/protocol"
|
||||
"github.com/kaspanet/kaspad/rpc"
|
||||
"github.com/kaspanet/kaspad/signal"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
// App is a wrapper for all the kaspad services
|
||||
type App struct {
|
||||
cfg *config.Config
|
||||
rpcServer *rpc.Server
|
||||
addressManager *addressmanager.AddressManager
|
||||
protocolManager *protocol.Manager
|
||||
connectionManager *connmanager.ConnectionManager
|
||||
netAdapter *netadapter.NetAdapter
|
||||
|
||||
started, shutdown int32
|
||||
}
|
||||
|
||||
// Start launches all the kaspad services.
|
||||
func (a *App) Start() {
|
||||
// Already started?
|
||||
if atomic.AddInt32(&a.started, 1) != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace("Starting kaspad")
|
||||
|
||||
err := a.protocolManager.Start()
|
||||
if err != nil {
|
||||
panics.Exit(log, fmt.Sprintf("Error starting the p2p protocol: %+v", err))
|
||||
}
|
||||
|
||||
a.maybeSeedFromDNS()
|
||||
|
||||
a.connectionManager.Start()
|
||||
|
||||
if !a.cfg.DisableRPC {
|
||||
a.rpcServer.Start()
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down all the kaspad services.
|
||||
func (a *App) Stop() error {
|
||||
// Make sure this only happens once.
|
||||
if atomic.AddInt32(&a.shutdown, 1) != 1 {
|
||||
log.Infof("Kaspad is already in the process of shutting down")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Warnf("Kaspad shutting down")
|
||||
|
||||
a.connectionManager.Stop()
|
||||
|
||||
err := a.protocolManager.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping the p2p protocol: %+v", err)
|
||||
}
|
||||
|
||||
// Shutdown the RPC server if it's not disabled.
|
||||
if !a.cfg.DisableRPC {
|
||||
err := a.rpcServer.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping rpcServer: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New returns a new App instance configured to listen on addr for the
|
||||
// kaspa network type specified by dagParams. Use start to begin accepting
|
||||
// connections from peers.
|
||||
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{}) (*App, error) {
|
||||
indexManager, acceptanceIndex := setupIndexes(cfg)
|
||||
|
||||
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
|
||||
|
||||
// Create a new block DAG instance with the appropriate configuration.
|
||||
dag, err := setupDAG(cfg, databaseContext, interrupt, sigCache, indexManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txMempool := setupMempool(cfg, dag, sigCache)
|
||||
|
||||
netAdapter, err := netadapter.NewNetAdapter(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addressManager := addressmanager.New(cfg, databaseContext)
|
||||
|
||||
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcServer, err := setupRPC(
|
||||
cfg, dag, txMempool, sigCache, acceptanceIndex, connectionManager, addressManager, protocolManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &App{
|
||||
cfg: cfg,
|
||||
rpcServer: rpcServer,
|
||||
protocolManager: protocolManager,
|
||||
connectionManager: connectionManager,
|
||||
netAdapter: netAdapter,
|
||||
addressManager: addressManager,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *App) maybeSeedFromDNS() {
|
||||
if !a.cfg.DisableDNSSeed {
|
||||
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, domainmessage.SFNodeNetwork, false, nil,
|
||||
a.cfg.Lookup, func(addresses []*domainmessage.NetAddress) {
|
||||
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
|
||||
// IPs of nodes and not its own IP, we can not know real IP of
|
||||
// source. So we'll take first returned address as source.
|
||||
a.addressManager.AddAddresses(addresses, addresses[0], nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{},
|
||||
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
|
||||
|
||||
dag, err := blockdag.New(&blockdag.Config{
|
||||
Interrupt: interrupt,
|
||||
DatabaseContext: databaseContext,
|
||||
DAGParams: cfg.NetParams(),
|
||||
TimeSource: blockdag.NewTimeSource(),
|
||||
SigCache: sigCache,
|
||||
IndexManager: indexManager,
|
||||
SubnetworkID: cfg.SubnetworkID,
|
||||
})
|
||||
return dag, err
|
||||
}
|
||||
|
||||
func setupIndexes(cfg *config.Config) (blockdag.IndexManager, *indexers.AcceptanceIndex) {
|
||||
// Create indexes if needed.
|
||||
var indexes []indexers.Indexer
|
||||
var acceptanceIndex *indexers.AcceptanceIndex
|
||||
if cfg.AcceptanceIndex {
|
||||
log.Info("acceptance index is enabled")
|
||||
acceptanceIndex = indexers.NewAcceptanceIndex()
|
||||
indexes = append(indexes, acceptanceIndex)
|
||||
}
|
||||
|
||||
// Create an index manager if any of the optional indexes are enabled.
|
||||
if len(indexes) < 0 {
|
||||
return nil, nil
|
||||
}
|
||||
indexManager := indexers.NewManager(indexes)
|
||||
return indexManager, acceptanceIndex
|
||||
}
|
||||
|
||||
func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
|
||||
mempoolConfig := mempool.Config{
|
||||
Policy: mempool.Policy{
|
||||
AcceptNonStd: cfg.RelayNonStd,
|
||||
MaxOrphanTxs: cfg.MaxOrphanTxs,
|
||||
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
|
||||
MinRelayTxFee: cfg.MinRelayTxFee,
|
||||
MaxTxVersion: 1,
|
||||
},
|
||||
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
|
||||
return dag.CalcSequenceLockNoLock(tx, utxoSet, true)
|
||||
},
|
||||
IsDeploymentActive: dag.IsDeploymentActive,
|
||||
SigCache: sigCache,
|
||||
DAG: dag,
|
||||
}
|
||||
|
||||
return mempool.New(&mempoolConfig)
|
||||
}
|
||||
|
||||
func setupRPC(cfg *config.Config,
|
||||
dag *blockdag.BlockDAG,
|
||||
txMempool *mempool.TxPool,
|
||||
sigCache *txscript.SigCache,
|
||||
acceptanceIndex *indexers.AcceptanceIndex,
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
protocolManager *protocol.Manager) (*rpc.Server, error) {
|
||||
|
||||
if !cfg.DisableRPC {
|
||||
policy := mining.Policy{
|
||||
BlockMaxMass: cfg.BlockMaxMass,
|
||||
}
|
||||
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, txMempool, dag, sigCache)
|
||||
|
||||
rpcServer, err := rpc.NewRPCServer(cfg, dag, txMempool, acceptanceIndex, blockTemplateGenerator,
|
||||
connectionManager, addressManager, protocolManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Signal process shutdown when the RPC server requests it.
|
||||
spawn("setupRPC-handleShutdownRequest", func() {
|
||||
<-rpcServer.RequestedProcessShutdown()
|
||||
signal.ShutdownRequestChannel <- struct{}{}
|
||||
})
|
||||
|
||||
return rpcServer, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// P2PNodeID returns the network ID associated with this App
|
||||
func (a *App) P2PNodeID() *id.ID {
|
||||
return a.netAdapter.ID()
|
||||
}
|
||||
|
||||
// AddressManager returns the AddressManager associated with this App
|
||||
func (a *App) AddressManager() *addressmanager.AddressManager {
|
||||
return a.addressManager
|
||||
}
|
||||
|
||||
// WaitForShutdown blocks until the main listener and peer handlers are stopped.
|
||||
func (a *App) WaitForShutdown() {
|
||||
// TODO(libp2p)
|
||||
// a.p2pServer.WaitForShutdown()
|
||||
}
|
||||
@@ -1,13 +1,14 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Copyright (c) 2017 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package netsync
|
||||
package app
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.SYNC)
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KASD)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
@@ -6,7 +6,8 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -16,7 +17,17 @@ func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error
|
||||
newNode, _ := dag.newBlockNode(blockHeader, newBlockSet())
|
||||
newNode.status = statusInvalidAncestor
|
||||
dag.index.AddNode(newNode)
|
||||
return dag.index.flushToDB()
|
||||
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.index.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbTx.Commit()
|
||||
}
|
||||
|
||||
// maybeAcceptBlock potentially accepts a block into the block DAG. It
|
||||
@@ -62,13 +73,26 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
// expensive connection logic. It also has some other nice properties
|
||||
// such as making blocks that never become part of the DAG or
|
||||
// blocks that fail to connect available for further analysis.
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
err := dbStoreBlock(dbTx, block)
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
blockExists, err := dbaccess.HasBlock(dbTx, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockExists {
|
||||
err := storeBlock(dbTx, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dag.index.flushToDBWithTx(dbTx)
|
||||
})
|
||||
}
|
||||
err = dag.index.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -82,8 +106,6 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
}
|
||||
}
|
||||
|
||||
block.SetBlueScore(newNode.blueScore)
|
||||
|
||||
// Connect the passed block to the DAG. This also handles validation of the
|
||||
// transaction scripts.
|
||||
chainUpdates, err := dag.addBlock(newNode, block, selectedParentAnticone, flags)
|
||||
@@ -110,17 +132,17 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) {
|
||||
func lookupParentNodes(block *util.Block, dag *BlockDAG) (blockSet, error) {
|
||||
header := block.MsgBlock().Header
|
||||
parentHashes := header.ParentHashes
|
||||
|
||||
nodes := newBlockSet()
|
||||
for _, parentHash := range parentHashes {
|
||||
node := blockDAG.index.LookupNode(parentHash)
|
||||
if node == nil {
|
||||
node, ok := dag.index.LookupNode(parentHash)
|
||||
if !ok {
|
||||
str := fmt.Sprintf("parent block %s is unknown", parentHash)
|
||||
return nil, ruleError(ErrParentBlockUnknown, str)
|
||||
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
|
||||
} else if dag.index.NodeStatus(node).KnownInvalid() {
|
||||
str := fmt.Sprintf("parent block %s is known to be invalid", parentHash)
|
||||
return nil, ruleError(ErrInvalidAncestorBlock, str)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -63,7 +63,10 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
if isOrphan {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan")
|
||||
}
|
||||
blockNode1 := dag.index.LookupNode(block1.Hash())
|
||||
blockNode1, ok := dag.index.LookupNode(block1.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", block1.Hash())
|
||||
}
|
||||
dag.index.SetStatusFlags(blockNode1, statusValidateFailed)
|
||||
|
||||
block2 := blocks[2]
|
||||
|
||||
@@ -10,15 +10,15 @@ import (
|
||||
// TestBlockHeap tests pushing, popping, and determining the length of the heap.
|
||||
func TestBlockHeap(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockHeap", Config{
|
||||
DAGParams: &dagconfig.MainnetParams,
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockHeap", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestBlockHeap: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block0Header := dagconfig.MainnetParams.GenesisBlock.Header
|
||||
block0Header := dagconfig.SimnetParams.GenesisBlock.Header
|
||||
block0, _ := dag.newBlockNode(&block0Header, newBlockSet())
|
||||
|
||||
block100000Header := Block100000.Header
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// idByHashIndexBucketName is the name of the db bucket used to house
|
||||
// the block hash -> block id index.
|
||||
idByHashIndexBucketName = []byte("idbyhashidx")
|
||||
|
||||
// hashByIDIndexBucketName is the name of the db bucket used to house
|
||||
// the block id -> block hash index.
|
||||
hashByIDIndexBucketName = []byte("hashbyididx")
|
||||
|
||||
currentBlockIDKey = []byte("currentblockid")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// This is a mapping between block hashes and unique IDs. The ID
|
||||
// is simply a sequentially incremented uint64 that is used instead of block hash
|
||||
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
|
||||
// hashes and thus saves a ton of space when a block is referenced in an index.
|
||||
// It consists of three buckets: the first bucket maps the hash of each
|
||||
// block to the unique ID and the second maps that ID back to the block hash.
|
||||
// The third bucket contains the last received block ID, and is used
|
||||
// when starting the node to check that the enabled indexes are up to date
|
||||
// with the latest received block, and if not, initiate recovery process.
|
||||
//
|
||||
// The serialized format for keys and values in the block hash to ID bucket is:
|
||||
// <hash> = <ID>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash 32 bytes
|
||||
// ID uint64 8 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the ID to block hash bucket is:
|
||||
// <ID> = <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// ID uint64 8 bytes
|
||||
// hash daghash.Hash 32 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const blockIDSize = 8 // 8 bytes for block ID
|
||||
|
||||
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
|
||||
// block id for the provided hash from the index.
|
||||
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
|
||||
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
|
||||
serializedID := hashIndex.Get(hash[:])
|
||||
if serializedID == nil {
|
||||
return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash)
|
||||
}
|
||||
|
||||
return DeserializeBlockID(serializedID), nil
|
||||
}
|
||||
|
||||
// DBFetchBlockHashBySerializedID uses an existing database transaction to
|
||||
// retrieve the hash for the provided serialized block id from the index.
|
||||
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
|
||||
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
|
||||
hashBytes := idIndex.Get(serializedID)
|
||||
if hashBytes == nil {
|
||||
return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
|
||||
}
|
||||
|
||||
var hash daghash.Hash
|
||||
copy(hash[:], hashBytes)
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
|
||||
// the index entries for the hash to id and id to hash mappings for the provided
|
||||
// values.
|
||||
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
|
||||
// Add the block hash to ID mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(idByHashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block ID to hash mapping to the index.
|
||||
idIndex := meta.Bucket(hashByIDIndexBucketName)
|
||||
return idIndex.Put(serializedID[:], hash[:])
|
||||
}
|
||||
|
||||
// DBFetchCurrentBlockID returns the last known block ID.
|
||||
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
|
||||
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
|
||||
if serializedID == nil {
|
||||
return 0
|
||||
}
|
||||
return DeserializeBlockID(serializedID)
|
||||
}
|
||||
|
||||
// DeserializeBlockID returns a deserialized block id
|
||||
func DeserializeBlockID(serializedID []byte) uint64 {
|
||||
return byteOrder.Uint64(serializedID)
|
||||
}
|
||||
|
||||
// SerializeBlockID returns a serialized block id
|
||||
func SerializeBlockID(blockID uint64) []byte {
|
||||
serializedBlockID := make([]byte, blockIDSize)
|
||||
byteOrder.PutUint64(serializedBlockID, blockID)
|
||||
return serializedBlockID
|
||||
}
|
||||
|
||||
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
|
||||
// hash for the provided block id from the index.
|
||||
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
|
||||
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
|
||||
}
|
||||
|
||||
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
|
||||
currentBlockID := DBFetchCurrentBlockID(dbTx)
|
||||
newBlockID := currentBlockID + 1
|
||||
serializedNewBlockID := SerializeBlockID(newBlockID)
|
||||
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return newBlockID, nil
|
||||
}
|
||||
@@ -5,10 +5,10 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
@@ -18,7 +18,6 @@ type blockIndex struct {
|
||||
// The following fields are set when the instance is created and can't
|
||||
// be changed afterwards, so there is no need to protect them with a
|
||||
// separate mutex.
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
|
||||
sync.RWMutex
|
||||
@@ -29,9 +28,8 @@ type blockIndex struct {
|
||||
// newBlockIndex returns a new empty instance of a block index. The index will
|
||||
// be dynamically populated as block nodes are loaded from the database and
|
||||
// manually added.
|
||||
func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
|
||||
func newBlockIndex(dagParams *dagconfig.Params) *blockIndex {
|
||||
return &blockIndex{
|
||||
db: db,
|
||||
dagParams: dagParams,
|
||||
index: make(map[daghash.Hash]*blockNode),
|
||||
dirty: make(map[*blockNode]struct{}),
|
||||
@@ -52,11 +50,11 @@ func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool {
|
||||
// return nil if there is no entry for the hash.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
|
||||
func (bi *blockIndex) LookupNode(hash *daghash.Hash) (*blockNode, bool) {
|
||||
bi.RLock()
|
||||
defer bi.RUnlock()
|
||||
node := bi.index[*hash]
|
||||
return node
|
||||
node, ok := bi.index[*hash]
|
||||
return node, ok
|
||||
}
|
||||
|
||||
// AddNode adds the provided node to the block index and marks it as dirty.
|
||||
@@ -111,17 +109,8 @@ func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
bi.dirty[node] = struct{}{}
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty block nodes to the database. If all writes
|
||||
// succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDB() error {
|
||||
return bi.db.Update(func(dbTx database.Tx) error {
|
||||
return bi.flushToDBWithTx(dbTx)
|
||||
})
|
||||
}
|
||||
|
||||
// flushToDBWithTx writes all dirty block nodes to the database. If all
|
||||
// writes succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
|
||||
// flushToDB writes all dirty block nodes to the database.
|
||||
func (bi *blockIndex) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
if len(bi.dirty) == 0 {
|
||||
@@ -129,7 +118,12 @@ func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
|
||||
}
|
||||
|
||||
for node := range bi.dirty {
|
||||
err := dbStoreBlockNode(dbTx, node)
|
||||
serializedBlockNode, err := serializeBlockNode(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := blockIndexKey(node.hash, node.blueScore)
|
||||
err = dbaccess.StoreIndexBlock(dbContext, key, serializedBlockNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAncestorErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -18,7 +17,7 @@ func TestAncestorErrors(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
node := newTestNode(dag, newBlockSet(), int32(0x10000000), 0, time.Unix(0, 0))
|
||||
node := newTestNode(dag, newBlockSet(), int32(0x10000000), 0, mstime.Now())
|
||||
node.blueScore = 2
|
||||
ancestor := node.SelectedAncestor(3)
|
||||
if ancestor != nil {
|
||||
|
||||
@@ -29,8 +29,15 @@ func (dag *BlockDAG) BlockLocatorFromHashes(highHash, lowHash *daghash.Hash) (Bl
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
|
||||
highNode := dag.index.LookupNode(highHash)
|
||||
lowNode := dag.index.LookupNode(lowHash)
|
||||
highNode, ok := dag.index.LookupNode(highHash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("block %s is unknown", highHash)
|
||||
}
|
||||
|
||||
lowNode, ok := dag.index.LookupNode(lowHash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("block %s is unknown", lowHash)
|
||||
}
|
||||
|
||||
return dag.blockLocator(highNode, lowNode)
|
||||
}
|
||||
@@ -88,8 +95,8 @@ func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (highHash,
|
||||
lowNode := dag.genesis
|
||||
nextBlockLocatorIndex := int64(len(locator) - 1)
|
||||
for i, hash := range locator {
|
||||
node := dag.index.LookupNode(hash)
|
||||
if node != nil {
|
||||
node, ok := dag.index.LookupNode(hash)
|
||||
if ok {
|
||||
lowNode = node
|
||||
nextBlockLocatorIndex = int64(i) - 1
|
||||
break
|
||||
|
||||
@@ -6,13 +6,14 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// blockStatus is a bit field representing the validation state of the block.
|
||||
@@ -105,12 +106,12 @@ type blockNode struct {
|
||||
// anticone of its selected parent (parent with highest blue score).
|
||||
// selectedParentAnticone is used to update reachability data we store for future reachability queries.
|
||||
// This function is NOT safe for concurrent access.
|
||||
func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSet) (node *blockNode, selectedParentAnticone []*blockNode) {
|
||||
func (dag *BlockDAG) newBlockNode(blockHeader *domainmessage.BlockHeader, parents blockSet) (node *blockNode, selectedParentAnticone []*blockNode) {
|
||||
node = &blockNode{
|
||||
parents: parents,
|
||||
children: make(blockSet),
|
||||
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
|
||||
timestamp: dag.AdjustedTime().Unix(),
|
||||
timestamp: dag.Now().UnixMilliseconds(),
|
||||
bluesAnticoneSizes: make(map[*blockNode]dagconfig.KType),
|
||||
}
|
||||
|
||||
@@ -120,7 +121,7 @@ func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSe
|
||||
node.version = blockHeader.Version
|
||||
node.bits = blockHeader.Bits
|
||||
node.nonce = blockHeader.Nonce
|
||||
node.timestamp = blockHeader.Timestamp.Unix()
|
||||
node.timestamp = blockHeader.Timestamp.UnixMilliseconds()
|
||||
node.hashMerkleRoot = blockHeader.HashMerkleRoot
|
||||
node.acceptedIDMerkleRoot = blockHeader.AcceptedIDMerkleRoot
|
||||
node.utxoCommitment = blockHeader.UTXOCommitment
|
||||
@@ -159,15 +160,15 @@ func (node *blockNode) less(other *blockNode) bool {
|
||||
// Header constructs a block header from the node and returns it.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) Header() *wire.BlockHeader {
|
||||
func (node *blockNode) Header() *domainmessage.BlockHeader {
|
||||
// No lock is needed because all accessed fields are immutable.
|
||||
return &wire.BlockHeader{
|
||||
return &domainmessage.BlockHeader{
|
||||
Version: node.version,
|
||||
ParentHashes: node.ParentHashes(),
|
||||
HashMerkleRoot: node.hashMerkleRoot,
|
||||
AcceptedIDMerkleRoot: node.acceptedIDMerkleRoot,
|
||||
UTXOCommitment: node.utxoCommitment,
|
||||
Timestamp: time.Unix(node.timestamp, 0),
|
||||
Timestamp: node.time(),
|
||||
Bits: node.bits,
|
||||
Nonce: node.nonce,
|
||||
}
|
||||
@@ -204,13 +205,13 @@ func (node *blockNode) RelativeAncestor(distance uint64) *blockNode {
|
||||
// prior to, and including, the block node.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) PastMedianTime(dag *BlockDAG) time.Time {
|
||||
func (node *blockNode) PastMedianTime(dag *BlockDAG) mstime.Time {
|
||||
window := blueBlockWindow(node, 2*dag.TimestampDeviationTolerance-1)
|
||||
medianTimestamp, err := window.medianTimestamp()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("blueBlockWindow: %s", err))
|
||||
}
|
||||
return time.Unix(medianTimestamp, 0)
|
||||
return mstime.UnixMilliseconds(medianTimestamp)
|
||||
}
|
||||
|
||||
func (node *blockNode) ParentHashes() []*daghash.Hash {
|
||||
@@ -223,10 +224,14 @@ func (node *blockNode) isGenesis() bool {
|
||||
}
|
||||
|
||||
func (node *blockNode) finalityScore(dag *BlockDAG) uint64 {
|
||||
return node.blueScore / uint64(dag.dagParams.FinalityInterval)
|
||||
return node.blueScore / uint64(dag.FinalityInterval())
|
||||
}
|
||||
|
||||
// String returns a string that contains the block hash.
|
||||
func (node blockNode) String() string {
|
||||
return node.hash.String()
|
||||
}
|
||||
|
||||
func (node *blockNode) time() mstime.Time {
|
||||
return mstime.UnixMilliseconds(node.timestamp)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
// This test is to ensure the size BlueAnticoneSizesSize is serialized to the size of KType.
|
||||
// We verify that by serializing and deserializing the block while making sure that we stay within the expected range.
|
||||
func TestBlueAnticoneSizesSize(t *testing.T) {
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/bigintpool"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"math/big"
|
||||
@@ -53,13 +54,19 @@ func (window blockWindow) minMaxTimestamps() (min, max int64) {
|
||||
return
|
||||
}
|
||||
|
||||
func (window blockWindow) averageTarget() *big.Int {
|
||||
averageTarget := big.NewInt(0)
|
||||
func (window blockWindow) averageTarget(averageTarget *big.Int) {
|
||||
averageTarget.SetInt64(0)
|
||||
|
||||
target := bigintpool.Acquire(0)
|
||||
defer bigintpool.Release(target)
|
||||
for _, node := range window {
|
||||
target := util.CompactToBig(node.bits)
|
||||
util.CompactToBigWithDestination(node.bits, target)
|
||||
averageTarget.Add(averageTarget, target)
|
||||
}
|
||||
return averageTarget.Div(averageTarget, big.NewInt(int64(len(window))))
|
||||
|
||||
windowLen := bigintpool.Acquire(int64(len(window)))
|
||||
defer bigintpool.Release(windowLen)
|
||||
averageTarget.Div(averageTarget, windowLen)
|
||||
}
|
||||
|
||||
func (window blockWindow) medianTimestamp() (int64, error) {
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
func TestBlueBlockWindow(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -51,14 +51,14 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
parents: []string{"D", "C"},
|
||||
id: "E",
|
||||
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
parents: []string{"D", "C"},
|
||||
id: "F",
|
||||
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
@@ -73,37 +73,37 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
{
|
||||
parents: []string{"H", "F"},
|
||||
id: "I",
|
||||
expectedWindowWithGenesisPadding: []string{"F", "C", "D", "B", "A", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "J",
|
||||
expectedWindowWithGenesisPadding: []string{"I", "F", "C", "D", "B", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J"},
|
||||
id: "K",
|
||||
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "C", "D", "B", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K"},
|
||||
id: "L",
|
||||
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "C", "D", "B", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L"},
|
||||
id: "M",
|
||||
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "C", "D", "B", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "N",
|
||||
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "C", "D", "B", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N"},
|
||||
id: "O",
|
||||
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "B"},
|
||||
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -133,7 +133,10 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
t.Fatalf("block %v was unexpectedly orphan", blockData.id)
|
||||
}
|
||||
|
||||
node := dag.index.LookupNode(utilBlock.Hash())
|
||||
node, ok := dag.index.LookupNode(utilBlock.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", utilBlock.Hash())
|
||||
}
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
|
||||
@@ -4,16 +4,16 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/coinbasepayload"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/util/txsort"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// compactFeeData is a specialized data type to store a compact list of fees
|
||||
@@ -73,55 +73,24 @@ func (cfr *compactFeeIterator) next() (uint64, error) {
|
||||
}
|
||||
|
||||
// The following functions relate to storing and retrieving fee data from the database
|
||||
var feeBucket = []byte("fees")
|
||||
|
||||
// getBluesFeeData returns the compactFeeData for all nodes's blues,
|
||||
// used to calculate the fees this blockNode needs to pay
|
||||
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
|
||||
func (dag *BlockDAG) getBluesFeeData(node *blockNode) (map[daghash.Hash]compactFeeData, error) {
|
||||
bluesFeeData := make(map[daghash.Hash]compactFeeData)
|
||||
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbaccess.FetchFeeData(dag.databaseContext, blueBlock.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
}
|
||||
|
||||
return bluesFeeData, nil
|
||||
}
|
||||
|
||||
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
|
||||
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error creating or retrieving fee bucket: %s", err)
|
||||
}
|
||||
|
||||
return feeBucket.Put(blockHash.CloneBytes(), feeData)
|
||||
}
|
||||
|
||||
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
|
||||
feeBucket := dbTx.Metadata().Bucket(feeBucket)
|
||||
if feeBucket == nil {
|
||||
return nil, errors.New("Fee bucket does not exist")
|
||||
}
|
||||
|
||||
feeData := feeBucket.Get(blockHash.CloneBytes())
|
||||
if feeData == nil {
|
||||
return nil, errors.Errorf("No fee data found for block %s", blockHash)
|
||||
}
|
||||
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
// The following functions deal with building and validating the coinbase transaction
|
||||
|
||||
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
@@ -129,7 +98,10 @@ func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Bl
|
||||
return nil
|
||||
}
|
||||
blockCoinbaseTx := block.CoinbaseTransaction().MsgTx()
|
||||
scriptPubKey, extraData, err := DeserializeCoinbasePayload(blockCoinbaseTx)
|
||||
_, scriptPubKey, extraData, err := coinbasepayload.DeserializeCoinbasePayload(blockCoinbaseTx)
|
||||
if errors.Is(err, coinbasepayload.ErrIncorrectScriptPubKeyLen) {
|
||||
return ruleError(ErrBadCoinbaseTransaction, err.Error())
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -147,132 +119,81 @@ func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Bl
|
||||
|
||||
// expectedCoinbaseTransaction returns the coinbase transaction for the current block
|
||||
func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) {
|
||||
bluesFeeData, err := node.getBluesFeeData(dag)
|
||||
bluesFeeData, err := dag.getBluesFeeData(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
txIns := []*domainmessage.TxIn{}
|
||||
txOuts := []*domainmessage.TxOut{}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
txIn, txOut, err := coinbaseInputAndOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
|
||||
txOut, err := coinbaseOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txIns = append(txIns, txIn)
|
||||
if txOut != nil {
|
||||
txOuts = append(txOuts, txOut)
|
||||
}
|
||||
}
|
||||
payload, err := SerializeCoinbasePayload(scriptPubKey, extraData)
|
||||
payload, err := coinbasepayload.SerializeCoinbasePayload(node.blueScore, scriptPubKey, extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
coinbaseTx := wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkid.SubnetworkIDCoinbase, 0, payload)
|
||||
coinbaseTx := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, txIns, txOuts, subnetworkid.SubnetworkIDCoinbase, 0, payload)
|
||||
sortedCoinbaseTx := txsort.Sort(coinbaseTx)
|
||||
return util.NewTx(sortedCoinbaseTx), nil
|
||||
}
|
||||
|
||||
// SerializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data.
|
||||
func SerializeCoinbasePayload(scriptPubKey []byte, extraData []byte) ([]byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := wire.WriteVarInt(w, uint64(len(scriptPubKey)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
|
||||
// DeserializeCoinbasePayload deserialize the coinbase payload to its component (scriptPubKey and extra data).
|
||||
func DeserializeCoinbasePayload(tx *wire.MsgTx) (scriptPubKey []byte, extraData []byte, err error) {
|
||||
r := bytes.NewReader(tx.Payload)
|
||||
scriptPubKeyLen, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
scriptPubKey = make([]byte, scriptPubKeyLen)
|
||||
_, err = r.Read(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
extraData = make([]byte, r.Len())
|
||||
if r.Len() != 0 {
|
||||
_, err = r.Read(extraData)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return scriptPubKey, extraData, nil
|
||||
}
|
||||
|
||||
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the coinbase transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns only txIn and nil for txOut
|
||||
func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
|
||||
*wire.TxIn, *wire.TxOut, error) {
|
||||
// coinbaseOutputForBlueBlock calculates the output that should go into the coinbase transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns nil for txOut
|
||||
func coinbaseOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (*domainmessage.TxOut, error) {
|
||||
|
||||
blockTxsAcceptanceData, ok := txsAcceptanceData.FindAcceptanceData(blueBlock.hash)
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
return nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
}
|
||||
blockFeeData, ok := feeData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
return nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
}
|
||||
|
||||
if len(blockTxsAcceptanceData.TxAcceptanceData) != blockFeeData.Len() {
|
||||
return nil, nil, errors.Errorf(
|
||||
return nil, errors.Errorf(
|
||||
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
|
||||
len(blockTxsAcceptanceData.TxAcceptanceData), blockFeeData.Len(), blueBlock.hash)
|
||||
}
|
||||
|
||||
txIn := &wire.TxIn{
|
||||
SignatureScript: []byte{},
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID(*blueBlock.hash),
|
||||
Index: math.MaxUint32,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
totalFees := uint64(0)
|
||||
feeIterator := blockFeeData.iterator()
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
fee, err := feeIterator.next()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
return nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
}
|
||||
if txAcceptanceData.IsAccepted {
|
||||
totalFees += fee
|
||||
}
|
||||
}
|
||||
|
||||
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.dagParams) + totalFees
|
||||
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.Params) + totalFees
|
||||
|
||||
if totalReward == 0 {
|
||||
return txIn, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// the ScriptPubKey for the coinbase is parsed from the coinbase payload
|
||||
scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx())
|
||||
_, scriptPubKey, _, err := coinbasepayload.DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txOut := &wire.TxOut{
|
||||
txOut := &domainmessage.TxOut{
|
||||
Value: totalReward,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
|
||||
return txIn, txOut, nil
|
||||
return txOut, nil
|
||||
}
|
||||
|
||||
@@ -7,20 +7,20 @@ package blockdag
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// loadUTXOSet returns a utxo view loaded from a file.
|
||||
@@ -73,19 +73,12 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Serialized utxo entry.
|
||||
serialized := make([]byte, numBytes)
|
||||
_, err = io.ReadAtLeast(r, serialized, int(numBytes))
|
||||
// Deserialize the UTXO entry and add it to the UTXO set.
|
||||
entry, err := deserializeUTXOEntry(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Deserialize it and add it to the view.
|
||||
entry, err := deserializeUTXOEntry(serialized)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoSet.utxoCollection[wire.Outpoint{TxID: txID, Index: index}] = entry
|
||||
utxoSet.utxoCollection[domainmessage.Outpoint{TxID: txID, Index: index}] = entry
|
||||
}
|
||||
|
||||
return utxoSet, nil
|
||||
@@ -94,7 +87,7 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
// TestSetCoinbaseMaturity makes the ability to set the coinbase maturity
|
||||
// available when running tests.
|
||||
func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
|
||||
dag.dagParams.BlockCoinbaseMaturity = maturity
|
||||
dag.Params.BlockCoinbaseMaturity = maturity
|
||||
}
|
||||
|
||||
// newTestDAG returns a DAG that is usable for syntetic tests. It is
|
||||
@@ -102,12 +95,10 @@ func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
|
||||
// it is not usable with all functions and the tests must take care when making
|
||||
// use of it.
|
||||
func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||
index := newBlockIndex(nil, params)
|
||||
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
|
||||
index := newBlockIndex(params)
|
||||
dag := &BlockDAG{
|
||||
dagParams: params,
|
||||
timeSource: NewMedianTime(),
|
||||
targetTimePerBlock: targetTimePerBlock,
|
||||
Params: params,
|
||||
timeSource: NewTimeSource(),
|
||||
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
|
||||
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
|
||||
powMaxBits: util.BigToCompact(params.PowMax),
|
||||
@@ -127,9 +118,9 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||
|
||||
// newTestNode creates a block node connected to the passed parent with the
|
||||
// provided fields populated and fake values for the other fields.
|
||||
func newTestNode(dag *BlockDAG, parents blockSet, blockVersion int32, bits uint32, timestamp time.Time) *blockNode {
|
||||
func newTestNode(dag *BlockDAG, parents blockSet, blockVersion int32, bits uint32, timestamp mstime.Time) *blockNode {
|
||||
// Make up a header and create a block node from it.
|
||||
header := &wire.BlockHeader{
|
||||
header := &domainmessage.BlockHeader{
|
||||
Version: blockVersion,
|
||||
ParentHashes: parents.hashes(),
|
||||
Bits: bits,
|
||||
@@ -152,62 +143,55 @@ func addNodeAsChildToParents(node *blockNode) {
|
||||
// same type (either both nil or both of type RuleError) and their error codes
|
||||
// match when not nil.
|
||||
func checkRuleError(gotErr, wantErr error) error {
|
||||
// Ensure the error code is of the expected type and the error
|
||||
// code matches the value specified in the test instance.
|
||||
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
|
||||
return errors.Errorf("wrong error - got %T (%[1]v), want %T",
|
||||
gotErr, wantErr)
|
||||
}
|
||||
if gotErr == nil {
|
||||
if wantErr == nil && gotErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure the want error type is a script error.
|
||||
werr, ok := wantErr.(RuleError)
|
||||
if !ok {
|
||||
return errors.Errorf("unexpected test error type %T", wantErr)
|
||||
var gotRuleErr RuleError
|
||||
if ok := errors.As(gotErr, &gotRuleErr); !ok {
|
||||
return errors.Errorf("gotErr expected to be RuleError, but got %+v instead", gotErr)
|
||||
}
|
||||
|
||||
var wantRuleErr RuleError
|
||||
if ok := errors.As(wantErr, &wantRuleErr); !ok {
|
||||
return errors.Errorf("wantErr expected to be RuleError, but got %+v instead", wantErr)
|
||||
}
|
||||
|
||||
// Ensure the error codes match. It's safe to use a raw type assert
|
||||
// here since the code above already proved they are the same type and
|
||||
// the want error is a script error.
|
||||
gotErrorCode := gotErr.(RuleError).ErrorCode
|
||||
if gotErrorCode != werr.ErrorCode {
|
||||
if gotRuleErr.ErrorCode != wantRuleErr.ErrorCode {
|
||||
return errors.Errorf("mismatched error code - got %v (%v), want %v",
|
||||
gotErrorCode, gotErr, werr.ErrorCode)
|
||||
gotRuleErr.ErrorCode, gotErr, wantRuleErr.ErrorCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareAndProcessBlock(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock {
|
||||
func prepareAndProcessBlockByParentMsgBlocks(t *testing.T, dag *BlockDAG, parents ...*domainmessage.MsgBlock) *domainmessage.MsgBlock {
|
||||
parentHashes := make([]*daghash.Hash, len(parents))
|
||||
for i, parent := range parents {
|
||||
parentHashes[i] = parent.BlockHash()
|
||||
}
|
||||
daghash.Sort(parentHashes)
|
||||
block, err := PrepareBlockForTest(dag, parentHashes, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in ProcessBlock: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("block is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("block was unexpectedly orphan")
|
||||
}
|
||||
return block
|
||||
return PrepareAndProcessBlockForTest(t, dag, parentHashes, nil)
|
||||
}
|
||||
|
||||
func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNode {
|
||||
node := dag.index.LookupNode(block.BlockHash())
|
||||
if node == nil {
|
||||
func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *domainmessage.MsgBlock) *blockNode {
|
||||
node, ok := dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("couldn't find block node with hash %s", block.BlockHash())
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
type fakeTimeSource struct {
|
||||
time mstime.Time
|
||||
}
|
||||
|
||||
func (fts *fakeTimeSource) Now() mstime.Time {
|
||||
return fts.time
|
||||
}
|
||||
|
||||
func newFakeTimeSource(fakeTime mstime.Time) TimeSource {
|
||||
return &fakeTimeSource{time: fakeTime}
|
||||
}
|
||||
|
||||
@@ -1,584 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
|
||||
// of binary octets to represent an arbitrarily large integer. The scheme
|
||||
// employs a most significant byte (MSB) base-128 encoding where the high bit in
|
||||
// each byte indicates whether or not the byte is the final one. In addition,
|
||||
// to ensure there are no redundant encodings, an offset is subtracted every
|
||||
// time a group of 7 bits is shifted out. Therefore each integer can be
|
||||
// represented in exactly one way, and each representation stands for exactly
|
||||
// one integer.
|
||||
//
|
||||
// Another nice property of this encoding is that it provides a compact
|
||||
// representation of values that are typically used to indicate sizes. For
|
||||
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
|
||||
// with two bytes, and 16512 - 2113663 with three bytes.
|
||||
//
|
||||
// While the encoding allows arbitrarily large integers, it is artificially
|
||||
// limited in this code to an unsigned 64-bit integer for efficiency purposes.
|
||||
//
|
||||
// Example encodings:
|
||||
// 0 -> [0x00]
|
||||
// 127 -> [0x7f] * Max 1-byte value
|
||||
// 128 -> [0x80 0x00]
|
||||
// 129 -> [0x80 0x01]
|
||||
// 255 -> [0x80 0x7f]
|
||||
// 256 -> [0x81 0x00]
|
||||
// 16511 -> [0xff 0x7f] * Max 2-byte value
|
||||
// 16512 -> [0x80 0x80 0x00]
|
||||
// 32895 -> [0x80 0xff 0x7f]
|
||||
// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value
|
||||
// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value
|
||||
// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f]
|
||||
//
|
||||
// References:
|
||||
// https://en.wikipedia.org/wiki/Variable-length_quantity
|
||||
// http://www.codecodex.com/wiki/Variable-Length_Integers
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// serializeSizeVLQ returns the number of bytes it would take to serialize the
|
||||
// passed number as a variable-length quantity according to the format described
|
||||
// above.
|
||||
func serializeSizeVLQ(n uint64) int {
|
||||
size := 1
|
||||
for ; n > 0x7f; n = (n >> 7) - 1 {
|
||||
size++
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// putVLQ serializes the provided number to a variable-length quantity according
|
||||
// to the format described above and returns the number of bytes of the encoded
|
||||
// value. The result is placed directly into the passed byte slice which must
|
||||
// be at least large enough to handle the number of bytes returned by the
|
||||
// serializeSizeVLQ function or it will panic.
|
||||
func putVLQ(target []byte, n uint64) int {
|
||||
offset := 0
|
||||
for ; ; offset++ {
|
||||
// The high bit is set when another byte follows.
|
||||
highBitMask := byte(0x80)
|
||||
if offset == 0 {
|
||||
highBitMask = 0x00
|
||||
}
|
||||
|
||||
target[offset] = byte(n&0x7f) | highBitMask
|
||||
if n <= 0x7f {
|
||||
break
|
||||
}
|
||||
n = (n >> 7) - 1
|
||||
}
|
||||
|
||||
// Reverse the bytes so it is MSB-encoded.
|
||||
for i, j := 0, offset; i < j; i, j = i+1, j-1 {
|
||||
target[i], target[j] = target[j], target[i]
|
||||
}
|
||||
|
||||
return offset + 1
|
||||
}
|
||||
|
||||
// deserializeVLQ deserializes the provided variable-length quantity according
|
||||
// to the format described above. It also returns the number of bytes
|
||||
// deserialized.
|
||||
func deserializeVLQ(serialized []byte) (uint64, int) {
|
||||
var n uint64
|
||||
var size int
|
||||
for _, val := range serialized {
|
||||
size++
|
||||
n = (n << 7) | uint64(val&0x7f)
|
||||
if val&0x80 != 0x80 {
|
||||
break
|
||||
}
|
||||
n++
|
||||
}
|
||||
|
||||
return n, size
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored scripts, a domain specific compression
|
||||
// algorithm is used which recognizes standard scripts and stores them using
|
||||
// less bytes than the original script.
|
||||
//
|
||||
// The general serialized format is:
|
||||
//
|
||||
// <script size or type><script data>
|
||||
//
|
||||
// Field Type Size
|
||||
// script size or type VLQ variable
|
||||
// script data []byte variable
|
||||
//
|
||||
// The specific serialized format for each recognized standard script is:
|
||||
//
|
||||
// - Pay-to-pubkey-hash: (21 bytes) - <0><20-byte pubkey hash>
|
||||
// - Pay-to-script-hash: (21 bytes) - <1><20-byte script hash>
|
||||
// - Pay-to-pubkey**: (33 bytes) - <2, 3, 4, or 5><32-byte pubkey X value>
|
||||
// 2, 3 = compressed pubkey with bit 0 specifying the y coordinate to use
|
||||
// 4, 5 = uncompressed pubkey with bit 0 specifying the y coordinate to use
|
||||
// ** Only valid public keys starting with 0x02, 0x03, and 0x04 are supported.
|
||||
//
|
||||
// Any scripts which are not recognized as one of the aforementioned standard
|
||||
// scripts are encoded using the general serialized format and encode the script
|
||||
// size as the sum of the actual size of the script and the number of special
|
||||
// cases.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// The following constants specify the special constants used to identify a
|
||||
// special script type in the domain-specific compressed script encoding.
|
||||
//
|
||||
// NOTE: This section specifically does not use iota since these values are
|
||||
// serialized and must be stable for long-term storage.
|
||||
const (
|
||||
// cstPayToPubKeyHash identifies a compressed pay-to-pubkey-hash script.
|
||||
cstPayToPubKeyHash = 0
|
||||
|
||||
// cstPayToScriptHash identifies a compressed pay-to-script-hash script.
|
||||
cstPayToScriptHash = 1
|
||||
|
||||
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp2 = 2
|
||||
|
||||
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp3 = 3
|
||||
|
||||
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp4 = 4
|
||||
|
||||
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp5 = 5
|
||||
|
||||
// numSpecialScripts is the number of special scripts recognized by the
|
||||
// domain-specific script compression algorithm.
|
||||
numSpecialScripts = 6
|
||||
)
|
||||
|
||||
// isPubKeyHash returns whether or not the passed public key script is a
|
||||
// standard pay-to-pubkey-hash script along with the pubkey hash it is paying to
|
||||
// if it is.
|
||||
func isPubKeyHash(script []byte) (bool, []byte) {
|
||||
if len(script) == 25 && script[0] == txscript.OpDup &&
|
||||
script[1] == txscript.OpHash160 &&
|
||||
script[2] == txscript.OpData20 &&
|
||||
script[23] == txscript.OpEqualVerify &&
|
||||
script[24] == txscript.OpCheckSig {
|
||||
|
||||
return true, script[3:23]
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isScriptHash returns whether or not the passed public key script is a
|
||||
// standard pay-to-script-hash script along with the script hash it is paying to
|
||||
// if it is.
|
||||
func isScriptHash(script []byte) (bool, []byte) {
|
||||
if len(script) == 23 && script[0] == txscript.OpHash160 &&
|
||||
script[1] == txscript.OpData20 &&
|
||||
script[22] == txscript.OpEqual {
|
||||
|
||||
return true, script[2:22]
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isPubKey returns whether or not the passed public key script is a standard
|
||||
// pay-to-pubkey script that pays to a valid compressed or uncompressed public
|
||||
// key along with the serialized pubkey it is paying to if it is.
|
||||
//
|
||||
// NOTE: This function ensures the public key is actually valid since the
|
||||
// compression algorithm requires valid pubkeys. It does not support hybrid
|
||||
// pubkeys. This means that even if the script has the correct form for a
|
||||
// pay-to-pubkey script, this function will only return true when it is paying
|
||||
// to a valid compressed or uncompressed pubkey.
|
||||
func isPubKey(script []byte) (bool, []byte) {
|
||||
// Pay-to-compressed-pubkey script.
|
||||
if len(script) == 35 && script[0] == txscript.OpData33 &&
|
||||
script[34] == txscript.OpCheckSig && (script[1] == 0x02 ||
|
||||
script[1] == 0x03) {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:34]
|
||||
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
}
|
||||
|
||||
// Pay-to-uncompressed-pubkey script.
|
||||
if len(script) == 67 && script[0] == txscript.OpData65 &&
|
||||
script[66] == txscript.OpCheckSig && script[1] == 0x04 {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:66]
|
||||
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// compressedScriptSize returns the number of bytes the passed script would take
|
||||
// when encoded with the domain specific compression algorithm described above.
|
||||
func compressedScriptSize(scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, _ := isPubKeyHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, _ := isScriptHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, _ := isPubKey(scriptPubKey); valid {
|
||||
return 33
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the script as is
|
||||
// preceded by the sum of its size and the number of special cases
|
||||
// encoded as a variable length quantity.
|
||||
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
|
||||
len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
|
||||
// script, possibly followed by other data, and returns the number of bytes it
|
||||
// occupies taking into account the special encoding of the script size by the
|
||||
// domain specific compression algorithm described above.
|
||||
func decodeCompressedScriptSize(serialized []byte) int {
|
||||
scriptSize, bytesRead := deserializeVLQ(serialized)
|
||||
if bytesRead == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
switch scriptSize {
|
||||
case cstPayToPubKeyHash:
|
||||
return 21
|
||||
|
||||
case cstPayToScriptHash:
|
||||
return 21
|
||||
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3, cstPayToPubKeyUncomp4,
|
||||
cstPayToPubKeyUncomp5:
|
||||
return 33
|
||||
}
|
||||
|
||||
scriptSize -= numSpecialScripts
|
||||
scriptSize += uint64(bytesRead)
|
||||
return int(scriptSize)
|
||||
}
|
||||
|
||||
// putCompressedScript compresses the passed script according to the domain
|
||||
// specific compression algorithm described above directly into the passed
|
||||
// target byte slice. The target byte slice must be at least large enough to
|
||||
// handle the number of bytes returned by the compressedScriptSize function or
|
||||
// it will panic.
|
||||
func putCompressedScript(target, scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, hash := isPubKeyHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToPubKeyHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, hash := isScriptHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToScriptHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
|
||||
pubKeyFormat := serializedPubKey[0]
|
||||
switch pubKeyFormat {
|
||||
case 0x02, 0x03:
|
||||
target[0] = pubKeyFormat
|
||||
copy(target[1:33], serializedPubKey[1:33])
|
||||
return 33
|
||||
case 0x04:
|
||||
// Encode the oddness of the serialized pubkey into the
|
||||
// compressed script type.
|
||||
target[0] = pubKeyFormat | (serializedPubKey[64] & 0x01)
|
||||
copy(target[1:33], serializedPubKey[1:33])
|
||||
return 33
|
||||
}
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the unmodified
|
||||
// script preceded by the sum of its size and the number of special
|
||||
// cases encoded as a variable length quantity.
|
||||
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
|
||||
vlqSizeLen := putVLQ(target, encodedSize)
|
||||
copy(target[vlqSizeLen:], scriptPubKey)
|
||||
return vlqSizeLen + len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decompressScript returns the original script obtained by decompressing the
|
||||
// passed compressed script according to the domain specific compression
|
||||
// algorithm described above.
|
||||
//
|
||||
// NOTE: The script parameter must already have been proven to be long enough
|
||||
// to contain the number of bytes returned by decodeCompressedScriptSize or it
|
||||
// will panic. This is acceptable since it is only an internal function.
|
||||
func decompressScript(compressedScriptPubKey []byte) []byte {
|
||||
// In practice this function will not be called with a zero-length or
|
||||
// nil script since the nil script encoding includes the length, however
|
||||
// the code below assumes the length exists, so just return nil now if
|
||||
// the function ever ends up being called with a nil script in the
|
||||
// future.
|
||||
if len(compressedScriptPubKey) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode the script size and examine it for the special cases.
|
||||
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
|
||||
switch encodedScriptSize {
|
||||
// Pay-to-pubkey-hash script. The resulting script is:
|
||||
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
|
||||
case cstPayToPubKeyHash:
|
||||
scriptPubKey := make([]byte, 25)
|
||||
scriptPubKey[0] = txscript.OpDup
|
||||
scriptPubKey[1] = txscript.OpHash160
|
||||
scriptPubKey[2] = txscript.OpData20
|
||||
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[23] = txscript.OpEqualVerify
|
||||
scriptPubKey[24] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-script-hash script. The resulting script is:
|
||||
// <OP_HASH160><20 byte script hash><OP_EQUAL>
|
||||
case cstPayToScriptHash:
|
||||
scriptPubKey := make([]byte, 23)
|
||||
scriptPubKey[0] = txscript.OpHash160
|
||||
scriptPubKey[1] = txscript.OpData20
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[22] = txscript.OpEqual
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-compressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
|
||||
scriptPubKey := make([]byte, 35)
|
||||
scriptPubKey[0] = txscript.OpData33
|
||||
scriptPubKey[1] = byte(encodedScriptSize)
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
|
||||
scriptPubKey[34] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-uncompressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
|
||||
// Change the leading byte to the appropriate compressed pubkey
|
||||
// identifier (0x02 or 0x03) so it can be decoded as a
|
||||
// compressed pubkey. This really should never fail since the
|
||||
// encoding ensures it is valid before compressing to this type.
|
||||
compressedKey := make([]byte, 33)
|
||||
compressedKey[0] = byte(encodedScriptSize - 2)
|
||||
copy(compressedKey[1:], compressedScriptPubKey[1:])
|
||||
key, err := ecc.ParsePubKey(compressedKey, ecc.S256())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
scriptPubKey := make([]byte, 67)
|
||||
scriptPubKey[0] = txscript.OpData65
|
||||
copy(scriptPubKey[1:], key.SerializeUncompressed())
|
||||
scriptPubKey[66] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// When none of the special cases apply, the script was encoded using
|
||||
// the general format, so reduce the script size by the number of
|
||||
// special cases and return the unmodified script.
|
||||
scriptSize := int(encodedScriptSize - numSpecialScripts)
|
||||
scriptPubKey := make([]byte, scriptSize)
|
||||
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored amounts, a domain specific compression
|
||||
// algorithm is used which relies on there typically being a lot of zeroes at
|
||||
// end of the amounts.
|
||||
//
|
||||
// While this is simply exchanging one uint64 for another, the resulting value
|
||||
// for typical amounts has a much smaller magnitude which results in fewer bytes
|
||||
// when encoded as variable length quantity. For example, consider the amount
|
||||
// of 0.1 KAS which is 10000000 sompi. Encoding 10000000 as a VLQ would take
|
||||
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
|
||||
//
|
||||
// Essentially the compression is achieved by splitting the value into an
|
||||
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
|
||||
// and encoding them in a way that can be decoded. More specifically, the
|
||||
// encoding is as follows:
|
||||
// - 0 is 0
|
||||
// - Find the exponent, e, as the largest power of 10 that evenly divides the
|
||||
// value up to a maximum of 9
|
||||
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
|
||||
// dividing the value by 10 (call the result n). The encoded value is thus:
|
||||
// 1 + 10*(9*n + d-1) + e
|
||||
// - When e==9, the only thing known is the amount is not 0. The encoded value
|
||||
// is thus:
|
||||
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
|
||||
//
|
||||
// Example encodings:
|
||||
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
|
||||
// 0 (1) -> 0 (1) * 0.00000000 KAS
|
||||
// 1000 (2) -> 4 (1) * 0.00001000 KAS
|
||||
// 10000 (2) -> 5 (1) * 0.00010000 KAS
|
||||
// 12345678 (4) -> 111111101(4) * 0.12345678 KAS
|
||||
// 50000000 (4) -> 47 (1) * 0.50000000 KAS
|
||||
// 100000000 (4) -> 9 (1) * 1.00000000 KAS
|
||||
// 500000000 (5) -> 49 (1) * 5.00000000 KAS
|
||||
// 1000000000 (5) -> 10 (1) * 10.00000000 KAS
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// compressTxOutAmount compresses the passed amount according to the domain
|
||||
// specific compression algorithm described above.
|
||||
func compressTxOutAmount(amount uint64) uint64 {
|
||||
// No need to do any work if it's zero.
|
||||
if amount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Find the largest power of 10 (max of 9) that evenly divides the
|
||||
// value.
|
||||
exponent := uint64(0)
|
||||
for amount%10 == 0 && exponent < 9 {
|
||||
amount /= 10
|
||||
exponent++
|
||||
}
|
||||
|
||||
// The compressed result for exponents less than 9 is:
|
||||
// 1 + 10*(9*n + d-1) + e
|
||||
if exponent < 9 {
|
||||
lastDigit := amount % 10
|
||||
amount /= 10
|
||||
return 1 + 10*(9*amount+lastDigit-1) + exponent
|
||||
}
|
||||
|
||||
// The compressed result for an exponent of 9 is:
|
||||
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
|
||||
return 10 + 10*(amount-1)
|
||||
}
|
||||
|
||||
// decompressTxOutAmount returns the original amount the passed compressed
|
||||
// amount represents according to the domain specific compression algorithm
|
||||
// described above.
|
||||
func decompressTxOutAmount(amount uint64) uint64 {
|
||||
// No need to do any work if it's zero.
|
||||
if amount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// The decompressed amount is either of the following two equations:
|
||||
// x = 1 + 10*(9*n + d - 1) + e
|
||||
// x = 1 + 10*(n - 1) + 9
|
||||
amount--
|
||||
|
||||
// The decompressed amount is now one of the following two equations:
|
||||
// x = 10*(9*n + d - 1) + e
|
||||
// x = 10*(n - 1) + 9
|
||||
exponent := amount % 10
|
||||
amount /= 10
|
||||
|
||||
// The decompressed amount is now one of the following two equations:
|
||||
// x = 9*n + d - 1 | where e < 9
|
||||
// x = n - 1 | where e = 9
|
||||
n := uint64(0)
|
||||
if exponent < 9 {
|
||||
lastDigit := amount%9 + 1
|
||||
amount /= 9
|
||||
n = amount*10 + lastDigit
|
||||
} else {
|
||||
n = amount + 1
|
||||
}
|
||||
|
||||
// Apply the exponent.
|
||||
for ; exponent > 0; exponent-- {
|
||||
n *= 10
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Compressed transaction outputs consist of an amount and a public key script
|
||||
// both compressed using the domain specific compression algorithms previously
|
||||
// described.
|
||||
//
|
||||
// The serialized format is:
|
||||
//
|
||||
// <compressed amount><compressed script>
|
||||
//
|
||||
// Field Type Size
|
||||
// compressed amount VLQ variable
|
||||
// compressed script []byte variable
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// compressedTxOutSize returns the number of bytes the passed transaction output
|
||||
// fields would take when encoded with the format described above.
|
||||
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
|
||||
return serializeSizeVLQ(compressTxOutAmount(amount)) +
|
||||
compressedScriptSize(scriptPubKey)
|
||||
}
|
||||
|
||||
// putCompressedTxOut compresses the passed amount and script according to their
|
||||
// domain specific compression algorithms and encodes them directly into the
|
||||
// passed target byte slice with the format described above. The target byte
|
||||
// slice must be at least large enough to handle the number of bytes returned by
|
||||
// the compressedTxOutSize function or it will panic.
|
||||
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
|
||||
offset := putVLQ(target, compressTxOutAmount(amount))
|
||||
offset += putCompressedScript(target[offset:], scriptPubKey)
|
||||
return offset
|
||||
}
|
||||
|
||||
// decodeCompressedTxOut decodes the passed compressed txout, possibly followed
|
||||
// by other data, into its uncompressed amount and script and returns them along
|
||||
// with the number of bytes they occupied prior to decompression.
|
||||
func decodeCompressedTxOut(serialized []byte) (uint64, []byte, int, error) {
|
||||
// Deserialize the compressed amount and ensure there are bytes
|
||||
// remaining for the compressed script.
|
||||
compressedAmount, bytesRead := deserializeVLQ(serialized)
|
||||
if bytesRead >= len(serialized) {
|
||||
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
|
||||
"data after compressed amount")
|
||||
}
|
||||
|
||||
// Decode the compressed script size and ensure there are enough bytes
|
||||
// left in the slice for it.
|
||||
scriptSize := decodeCompressedScriptSize(serialized[bytesRead:])
|
||||
if len(serialized[bytesRead:]) < scriptSize {
|
||||
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
|
||||
"data after script size")
|
||||
}
|
||||
|
||||
// Decompress and return the amount and script.
|
||||
amount := decompressTxOutAmount(compressedAmount)
|
||||
script := decompressScript(serialized[bytesRead : bytesRead+scriptSize])
|
||||
return amount, script, bytesRead + scriptSize, nil
|
||||
}
|
||||
@@ -1,436 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
// is an error. This is only provided for the hard-coded constants so errors in
|
||||
// the source code can be detected. It will only (and must only) be called with
|
||||
// hard-coded values.
|
||||
func hexToBytes(s string) []byte {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// TestVLQ ensures the variable length quantity serialization, deserialization,
|
||||
// and size calculation works as expected.
|
||||
func TestVLQ(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
val uint64
|
||||
serialized []byte
|
||||
}{
|
||||
{0, hexToBytes("00")},
|
||||
{1, hexToBytes("01")},
|
||||
{127, hexToBytes("7f")},
|
||||
{128, hexToBytes("8000")},
|
||||
{129, hexToBytes("8001")},
|
||||
{255, hexToBytes("807f")},
|
||||
{256, hexToBytes("8100")},
|
||||
{16383, hexToBytes("fe7f")},
|
||||
{16384, hexToBytes("ff00")},
|
||||
{16511, hexToBytes("ff7f")}, // Max 2-byte value
|
||||
{16512, hexToBytes("808000")},
|
||||
{16513, hexToBytes("808001")},
|
||||
{16639, hexToBytes("80807f")},
|
||||
{32895, hexToBytes("80ff7f")},
|
||||
{2113663, hexToBytes("ffff7f")}, // Max 3-byte value
|
||||
{2113664, hexToBytes("80808000")},
|
||||
{270549119, hexToBytes("ffffff7f")}, // Max 4-byte value
|
||||
{270549120, hexToBytes("8080808000")},
|
||||
{2147483647, hexToBytes("86fefefe7f")},
|
||||
{2147483648, hexToBytes("86fefeff00")},
|
||||
{4294967295, hexToBytes("8efefefe7f")}, // Max uint32, 5 bytes
|
||||
// Max uint64, 10 bytes
|
||||
{18446744073709551615, hexToBytes("80fefefefefefefefe7f")},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the value is calculated properly.
|
||||
gotSize := serializeSizeVLQ(test.val)
|
||||
if gotSize != len(test.serialized) {
|
||||
t.Errorf("serializeSizeVLQ: did not get expected size "+
|
||||
"for %d - got %d, want %d", test.val, gotSize,
|
||||
len(test.serialized))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the value serializes to the expected bytes.
|
||||
gotBytes := make([]byte, gotSize)
|
||||
gotBytesWritten := putVLQ(gotBytes, test.val)
|
||||
if !bytes.Equal(gotBytes, test.serialized) {
|
||||
t.Errorf("putVLQUnchecked: did not get expected bytes "+
|
||||
"for %d - got %x, want %x", test.val, gotBytes,
|
||||
test.serialized)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.serialized) {
|
||||
t.Errorf("putVLQUnchecked: did not get expected number "+
|
||||
"of bytes written for %d - got %d, want %d",
|
||||
test.val, gotBytesWritten, len(test.serialized))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the serialized bytes deserialize to the expected
|
||||
// value.
|
||||
gotVal, gotBytesRead := deserializeVLQ(test.serialized)
|
||||
if gotVal != test.val {
|
||||
t.Errorf("deserializeVLQ: did not get expected value "+
|
||||
"for %x - got %d, want %d", test.serialized,
|
||||
gotVal, test.val)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.serialized) {
|
||||
t.Errorf("deserializeVLQ: did not get expected number "+
|
||||
"of bytes read for %d - got %d, want %d",
|
||||
test.serialized, gotBytesRead,
|
||||
len(test.serialized))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestScriptCompression ensures the domain-specific script compression and
|
||||
// decompression works as expected.
|
||||
func TestScriptCompression(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
uncompressed []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
uncompressed: nil,
|
||||
compressed: hexToBytes("06"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash 1",
|
||||
uncompressed: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash 2",
|
||||
uncompressed: hexToBytes("76a914e34cce70c86373273efcc54ce7d2a491bb4a0e8488ac"),
|
||||
compressed: hexToBytes("00e34cce70c86373273efcc54ce7d2a491bb4a0e84"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-script-hash 1",
|
||||
uncompressed: hexToBytes("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87"),
|
||||
compressed: hexToBytes("01da1745e9b549bd0bfa1a569971c77eba30cd5a4b"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-script-hash 2",
|
||||
uncompressed: hexToBytes("a914f815b036d9bbbce5e9f2a00abd1bf3dc91e9551087"),
|
||||
compressed: hexToBytes("01f815b036d9bbbce5e9f2a00abd1bf3dc91e95510"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey compressed 0x02",
|
||||
uncompressed: hexToBytes("2102192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4ac"),
|
||||
compressed: hexToBytes("02192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey compressed 0x03",
|
||||
uncompressed: hexToBytes("2103b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65ac"),
|
||||
compressed: hexToBytes("03b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 0x04 even",
|
||||
uncompressed: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("04192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 0x04 odd",
|
||||
uncompressed: hexToBytes("410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac"),
|
||||
compressed: hexToBytes("0511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey invalid pubkey",
|
||||
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
},
|
||||
{
|
||||
name: "requires 2 size bytes - data push 200 bytes",
|
||||
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
// [0x80, 0x50] = 208 as a variable length quantity
|
||||
// [0x4c, 0xc8] = OP_PUSHDATA1 200
|
||||
compressed: append(hexToBytes("80504cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the value is calculated properly.
|
||||
gotSize := compressedScriptSize(test.uncompressed)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedScriptSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the script compresses to the expected bytes.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedScript(gotCompressed,
|
||||
test.uncompressed)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("putCompressedScript (%s): did not get "+
|
||||
"expected bytes - got %x, want %x", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.compressed) {
|
||||
t.Errorf("putCompressedScript (%s): did not get "+
|
||||
"expected number of bytes written - got %d, "+
|
||||
"want %d", test.name, gotBytesWritten,
|
||||
len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the compressed script size is properly decoded from
|
||||
// the compressed script.
|
||||
gotDecodedSize := decodeCompressedScriptSize(test.compressed)
|
||||
if gotDecodedSize != len(test.compressed) {
|
||||
t.Errorf("decodeCompressedScriptSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotDecodedSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the script decompresses to the expected bytes.
|
||||
gotDecompressed := decompressScript(test.compressed)
|
||||
if !bytes.Equal(gotDecompressed, test.uncompressed) {
|
||||
t.Errorf("decompressScript (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
gotDecompressed, test.uncompressed)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestScriptCompressionErrors ensures calling various functions related to
|
||||
// script compression with incorrect data returns the expected results.
|
||||
func TestScriptCompressionErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// A nil script must result in a decoded size of 0.
|
||||
if gotSize := decodeCompressedScriptSize(nil); gotSize != 0 {
|
||||
t.Fatalf("decodeCompressedScriptSize with nil script did not "+
|
||||
"return 0 - got %d", gotSize)
|
||||
}
|
||||
|
||||
// A nil script must result in a nil decompressed script.
|
||||
if gotScript := decompressScript(nil); gotScript != nil {
|
||||
t.Fatalf("decompressScript with nil script did not return nil "+
|
||||
"decompressed script - got %x", gotScript)
|
||||
}
|
||||
|
||||
// A compressed script for a pay-to-pubkey (uncompressed) that results
|
||||
// in an invalid pubkey must result in a nil decompressed script.
|
||||
compressedScript := hexToBytes("04012d74d0cb94344c9569c2e77901573d8d" +
|
||||
"7903c3ebec3a957724895dca52c6b4")
|
||||
if gotScript := decompressScript(compressedScript); gotScript != nil {
|
||||
t.Fatalf("decompressScript with compressed pay-to-"+
|
||||
"uncompressed-pubkey that is invalid did not return "+
|
||||
"nil decompressed script - got %x", gotScript)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAmountCompression ensures the domain-specific transaction output amount
|
||||
// compression and decompression works as expected.
|
||||
func TestAmountCompression(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
uncompressed uint64
|
||||
compressed uint64
|
||||
}{
|
||||
{
|
||||
name: "0 KAS",
|
||||
uncompressed: 0,
|
||||
compressed: 0,
|
||||
},
|
||||
{
|
||||
name: "546 Sompi (current network dust value)",
|
||||
uncompressed: 546,
|
||||
compressed: 4911,
|
||||
},
|
||||
{
|
||||
name: "0.00001 KAS (typical transaction fee)",
|
||||
uncompressed: 1000,
|
||||
compressed: 4,
|
||||
},
|
||||
{
|
||||
name: "0.0001 KAS (typical transaction fee)",
|
||||
uncompressed: 10000,
|
||||
compressed: 5,
|
||||
},
|
||||
{
|
||||
name: "0.12345678 KAS",
|
||||
uncompressed: 12345678,
|
||||
compressed: 111111101,
|
||||
},
|
||||
{
|
||||
name: "0.5 KAS",
|
||||
uncompressed: 50000000,
|
||||
compressed: 48,
|
||||
},
|
||||
{
|
||||
name: "1 KAS",
|
||||
uncompressed: 100000000,
|
||||
compressed: 9,
|
||||
},
|
||||
{
|
||||
name: "5 KAS",
|
||||
uncompressed: 500000000,
|
||||
compressed: 49,
|
||||
},
|
||||
{
|
||||
name: "21000000 KAS (max minted coins)",
|
||||
uncompressed: 2100000000000000,
|
||||
compressed: 21000000,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the amount compresses to the expected value.
|
||||
gotCompressed := compressTxOutAmount(test.uncompressed)
|
||||
if gotCompressed != test.compressed {
|
||||
t.Errorf("compressTxOutAmount (%s): did not get "+
|
||||
"expected value - got %d, want %d", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the value decompresses to the expected value.
|
||||
gotDecompressed := decompressTxOutAmount(test.compressed)
|
||||
if gotDecompressed != test.uncompressed {
|
||||
t.Errorf("decompressTxOutAmount (%s): did not get "+
|
||||
"expected value - got %d, want %d", test.name,
|
||||
gotDecompressed, test.uncompressed)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCompressedTxOut ensures the transaction output serialization and
|
||||
// deserialization works as expected.
|
||||
func TestCompressedTxOut(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
amount uint64
|
||||
scriptPubKey []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "pay-to-pubkey-hash dust",
|
||||
amount: 546,
|
||||
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 1 KAS",
|
||||
amount: 100000000,
|
||||
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the txout is calculated properly.
|
||||
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedTxOutSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the txout compresses to the expected value.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedTxOut(gotCompressed,
|
||||
test.amount, test.scriptPubKey)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"number of bytes written - got %d, want %d",
|
||||
test.name, gotBytesWritten,
|
||||
len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the serialized bytes are decoded back to the expected
|
||||
// uncompressed values.
|
||||
gotAmount, gotScript, gotBytesRead, err := decodeCompressedTxOut(
|
||||
test.compressed)
|
||||
if err != nil {
|
||||
t.Errorf("decodeCompressedTxOut (%s): unexpected "+
|
||||
"error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if gotAmount != test.amount {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected amount - got %d, want %d",
|
||||
test.name, gotAmount, test.amount)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(gotScript, test.scriptPubKey) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected script - got %x, want %x",
|
||||
test.name, gotScript, test.scriptPubKey)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.compressed) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected number of bytes read - got %d, want %d",
|
||||
test.name, gotBytesRead, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestTxOutCompressionErrors ensures calling various functions related to
|
||||
// txout compression with incorrect data returns the expected results.
|
||||
func TestTxOutCompressionErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// A compressed txout with missing compressed script must error.
|
||||
compressedTxOut := hexToBytes("00")
|
||||
_, _, _, err := decodeCompressedTxOut(compressedTxOut)
|
||||
if !isDeserializeErr(err) {
|
||||
t.Fatalf("decodeCompressedTxOut with missing compressed script "+
|
||||
"did not return expected error type - got %T, want "+
|
||||
"errDeserialize", err)
|
||||
}
|
||||
|
||||
// A compressed txout with short compressed script must error.
|
||||
compressedTxOut = hexToBytes("0010")
|
||||
_, _, _, err = decodeCompressedTxOut(compressedTxOut)
|
||||
if !isDeserializeErr(err) {
|
||||
t.Fatalf("decodeCompressedTxOut with short compressed script "+
|
||||
"did not return expected error type - got %T, want "+
|
||||
"errDeserialize", err)
|
||||
}
|
||||
}
|
||||
1001
blockdag/dag.go
1001
blockdag/dag.go
File diff suppressed because it is too large
Load Diff
@@ -6,19 +6,23 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func TestBlockCount(t *testing.T) {
|
||||
@@ -40,7 +44,7 @@ func TestBlockCount(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockCount", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockCount", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -93,7 +97,7 @@ func TestIsKnownBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("haveblock", Config{
|
||||
dag, teardownFunc, err := DAGSetup("haveblock", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -204,10 +208,10 @@ func TestIsKnownBlock(t *testing.T) {
|
||||
{hash: dagconfig.SimnetParams.GenesisHash.String(), want: true},
|
||||
|
||||
// Block 3b should be present (as a second child of Block 2).
|
||||
{hash: "264176fb6072e2362db18f92d3f4b739cff071a206736df7c407c0bf9a1d7fef", want: true},
|
||||
{hash: "46314ca17e117b31b467fe1b26fd36c98ee83e750aa5e3b3c1c32870afbe5984", want: true},
|
||||
|
||||
// Block 100000 should be present (as an orphan).
|
||||
{hash: "65b20b048a074793ebfd1196e49341c8d194dabfc6b44a4fd0c607406e122baf", want: true},
|
||||
{hash: "732c891529619d43b5aeb3df42ba25dea483a8c0aded1cf585751ebabea28f29", want: true},
|
||||
|
||||
// Random hashes should not be available.
|
||||
{hash: "123", want: false},
|
||||
@@ -251,7 +255,7 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// Create a utxo view with a fake utxo for the inputs used in the
|
||||
// transactions created below. This utxo is added such that it has an
|
||||
// age of 4 blocks.
|
||||
msgTx := wire.NewNativeMsgTx(wire.TxVersion, nil, []*wire.TxOut{{ScriptPubKey: nil, Value: 10}})
|
||||
msgTx := domainmessage.NewNativeMsgTx(domainmessage.TxVersion, nil, []*domainmessage.TxOut{{ScriptPubKey: nil, Value: 10}})
|
||||
targetTx := util.NewTx(msgTx)
|
||||
utxoSet := NewFullUTXOSet()
|
||||
blueScore := uint64(numBlocksToGenerate) - 4
|
||||
@@ -266,7 +270,7 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// that the sequence lock heights are always calculated from the same
|
||||
// point of view that they were originally calculated from for a given
|
||||
// utxo. That is to say, the height prior to it.
|
||||
utxo := wire.Outpoint{
|
||||
utxo := domainmessage.Outpoint{
|
||||
TxID: *targetTx.ID(),
|
||||
Index: 0,
|
||||
}
|
||||
@@ -275,19 +279,19 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// Obtain the past median time from the PoV of the input created above.
|
||||
// The past median time for the input is the past median time from the PoV
|
||||
// of the block *prior* to the one that included it.
|
||||
medianTime := node.RelativeAncestor(5).PastMedianTime(dag).Unix()
|
||||
medianTime := node.RelativeAncestor(5).PastMedianTime(dag).UnixMilliseconds()
|
||||
|
||||
// The median time calculated from the PoV of the best block in the
|
||||
// test DAG. For unconfirmed inputs, this value will be used since
|
||||
// the MTP will be calculated from the PoV of the yet-to-be-mined
|
||||
// block.
|
||||
nextMedianTime := node.PastMedianTime(dag).Unix()
|
||||
nextMedianTime := node.PastMedianTime(dag).UnixMilliseconds()
|
||||
nextBlockBlueScore := int32(numBlocksToGenerate) + 1
|
||||
|
||||
// Add an additional transaction which will serve as our unconfirmed
|
||||
// output.
|
||||
unConfTx := wire.NewNativeMsgTx(wire.TxVersion, nil, []*wire.TxOut{{ScriptPubKey: nil, Value: 5}})
|
||||
unConfUtxo := wire.Outpoint{
|
||||
unConfTx := domainmessage.NewNativeMsgTx(domainmessage.TxVersion, nil, []*domainmessage.TxOut{{ScriptPubKey: nil, Value: 5}})
|
||||
unConfUtxo := domainmessage.Outpoint{
|
||||
TxID: *unConfTx.TxID(),
|
||||
Index: 0,
|
||||
}
|
||||
@@ -299,7 +303,7 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tx *wire.MsgTx
|
||||
tx *domainmessage.MsgTx
|
||||
utxoSet UTXOSet
|
||||
mempool bool
|
||||
want *SequenceLock
|
||||
@@ -309,38 +313,37 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// should be disabled.
|
||||
{
|
||||
name: "single input, max sequence number",
|
||||
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: wire.MaxTxInSequenceNum}}, nil),
|
||||
tx: domainmessage.NewNativeMsgTx(1, []*domainmessage.TxIn{{PreviousOutpoint: utxo, Sequence: domainmessage.MaxTxInSequenceNum}}, nil),
|
||||
utxoSet: utxoSet,
|
||||
want: &SequenceLock{
|
||||
Seconds: -1,
|
||||
Milliseconds: -1,
|
||||
BlockBlueScore: -1,
|
||||
},
|
||||
},
|
||||
// A transaction with a single input whose lock time is
|
||||
// expressed in seconds. However, the specified lock time is
|
||||
// below the required floor for time based lock times since
|
||||
// they have time granularity of 512 seconds. As a result, the
|
||||
// seconds lock-time should be just before the median time of
|
||||
// they have time granularity of 524288 milliseconds. As a result, the
|
||||
// milliseconds lock-time should be just before the median time of
|
||||
// the targeted block.
|
||||
{
|
||||
name: "single input, seconds lock time below time granularity",
|
||||
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(true, 2)}}, nil),
|
||||
name: "single input, milliseconds lock time below time granularity",
|
||||
tx: domainmessage.NewNativeMsgTx(1, []*domainmessage.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(true, 2)}}, nil),
|
||||
utxoSet: utxoSet,
|
||||
want: &SequenceLock{
|
||||
Seconds: medianTime - 1,
|
||||
Milliseconds: medianTime - 1,
|
||||
BlockBlueScore: -1,
|
||||
},
|
||||
},
|
||||
// A transaction with a single input whose lock time is
|
||||
// expressed in seconds. The number of seconds should be 1023
|
||||
// seconds after the median past time of the last block in the
|
||||
// chain.
|
||||
// expressed in seconds. The number of seconds should be 1048575
|
||||
// milliseconds after the median past time of the DAG.
|
||||
{
|
||||
name: "single input, 1023 seconds after median time",
|
||||
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(true, 1024)}}, nil),
|
||||
name: "single input, 1048575 milliseconds after median time",
|
||||
tx: domainmessage.NewNativeMsgTx(1, []*domainmessage.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(true, 1048576)}}, nil),
|
||||
utxoSet: utxoSet,
|
||||
want: &SequenceLock{
|
||||
Seconds: medianTime + 1023,
|
||||
Milliseconds: medianTime + 1048575,
|
||||
BlockBlueScore: -1,
|
||||
},
|
||||
},
|
||||
@@ -352,22 +355,22 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// latest lock that isn't disabled.
|
||||
{
|
||||
name: "multiple varied inputs",
|
||||
tx: wire.NewNativeMsgTx(1,
|
||||
[]*wire.TxIn{{
|
||||
tx: domainmessage.NewNativeMsgTx(1,
|
||||
[]*domainmessage.TxIn{{
|
||||
PreviousOutpoint: utxo,
|
||||
Sequence: LockTimeToSequence(true, 2560),
|
||||
Sequence: LockTimeToSequence(true, 2621440),
|
||||
}, {
|
||||
PreviousOutpoint: utxo,
|
||||
Sequence: LockTimeToSequence(false, 4),
|
||||
}, {
|
||||
PreviousOutpoint: utxo,
|
||||
Sequence: LockTimeToSequence(false, 5) |
|
||||
wire.SequenceLockTimeDisabled,
|
||||
domainmessage.SequenceLockTimeDisabled,
|
||||
}},
|
||||
nil),
|
||||
utxoSet: utxoSet,
|
||||
want: &SequenceLock{
|
||||
Seconds: medianTime + (5 << wire.SequenceLockTimeGranularity) - 1,
|
||||
Milliseconds: medianTime + (5 << domainmessage.SequenceLockTimeGranularity) - 1,
|
||||
BlockBlueScore: int64(prevUtxoBlueScore) + 3,
|
||||
},
|
||||
},
|
||||
@@ -377,10 +380,10 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// height of 2 meaning it can be included at height 3.
|
||||
{
|
||||
name: "single input, lock-time in blocks",
|
||||
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(false, 3)}}, nil),
|
||||
tx: domainmessage.NewNativeMsgTx(1, []*domainmessage.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(false, 3)}}, nil),
|
||||
utxoSet: utxoSet,
|
||||
want: &SequenceLock{
|
||||
Seconds: -1,
|
||||
Milliseconds: -1,
|
||||
BlockBlueScore: int64(prevUtxoBlueScore) + 2,
|
||||
},
|
||||
},
|
||||
@@ -389,16 +392,16 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// be the time further in the future.
|
||||
{
|
||||
name: "two inputs, lock-times in seconds",
|
||||
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{
|
||||
tx: domainmessage.NewNativeMsgTx(1, []*domainmessage.TxIn{{
|
||||
PreviousOutpoint: utxo,
|
||||
Sequence: LockTimeToSequence(true, 5120),
|
||||
Sequence: LockTimeToSequence(true, 5242880),
|
||||
}, {
|
||||
PreviousOutpoint: utxo,
|
||||
Sequence: LockTimeToSequence(true, 2560),
|
||||
Sequence: LockTimeToSequence(true, 2621440),
|
||||
}}, nil),
|
||||
utxoSet: utxoSet,
|
||||
want: &SequenceLock{
|
||||
Seconds: medianTime + (10 << wire.SequenceLockTimeGranularity) - 1,
|
||||
Milliseconds: medianTime + (10 << domainmessage.SequenceLockTimeGranularity) - 1,
|
||||
BlockBlueScore: -1,
|
||||
},
|
||||
},
|
||||
@@ -408,8 +411,8 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// indicating it can be included at height 11.
|
||||
{
|
||||
name: "two inputs, lock-times in blocks",
|
||||
tx: wire.NewNativeMsgTx(1,
|
||||
[]*wire.TxIn{{
|
||||
tx: domainmessage.NewNativeMsgTx(1,
|
||||
[]*domainmessage.TxIn{{
|
||||
PreviousOutpoint: utxo,
|
||||
Sequence: LockTimeToSequence(false, 1),
|
||||
}, {
|
||||
@@ -419,7 +422,7 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
nil),
|
||||
utxoSet: utxoSet,
|
||||
want: &SequenceLock{
|
||||
Seconds: -1,
|
||||
Milliseconds: -1,
|
||||
BlockBlueScore: int64(prevUtxoBlueScore) + 10,
|
||||
},
|
||||
},
|
||||
@@ -428,13 +431,13 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// further into the future for both inputs should be chosen.
|
||||
{
|
||||
name: "four inputs, two lock-times in time, two lock-times in blocks",
|
||||
tx: wire.NewNativeMsgTx(1,
|
||||
[]*wire.TxIn{{
|
||||
tx: domainmessage.NewNativeMsgTx(1,
|
||||
[]*domainmessage.TxIn{{
|
||||
PreviousOutpoint: utxo,
|
||||
Sequence: LockTimeToSequence(true, 2560),
|
||||
Sequence: LockTimeToSequence(true, 2621440),
|
||||
}, {
|
||||
PreviousOutpoint: utxo,
|
||||
Sequence: LockTimeToSequence(true, 6656),
|
||||
Sequence: LockTimeToSequence(true, 6815744),
|
||||
}, {
|
||||
PreviousOutpoint: utxo,
|
||||
Sequence: LockTimeToSequence(false, 3),
|
||||
@@ -445,7 +448,7 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
nil),
|
||||
utxoSet: utxoSet,
|
||||
want: &SequenceLock{
|
||||
Seconds: medianTime + (13 << wire.SequenceLockTimeGranularity) - 1,
|
||||
Milliseconds: medianTime + (13 << domainmessage.SequenceLockTimeGranularity) - 1,
|
||||
BlockBlueScore: int64(prevUtxoBlueScore) + 8,
|
||||
},
|
||||
},
|
||||
@@ -457,11 +460,11 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// after that.
|
||||
{
|
||||
name: "single input, unconfirmed, lock-time in blocks",
|
||||
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: unConfUtxo, Sequence: LockTimeToSequence(false, 2)}}, nil),
|
||||
tx: domainmessage.NewNativeMsgTx(1, []*domainmessage.TxIn{{PreviousOutpoint: unConfUtxo, Sequence: LockTimeToSequence(false, 2)}}, nil),
|
||||
utxoSet: utxoSet,
|
||||
mempool: true,
|
||||
want: &SequenceLock{
|
||||
Seconds: -1,
|
||||
Milliseconds: -1,
|
||||
BlockBlueScore: int64(nextBlockBlueScore) + 1,
|
||||
},
|
||||
},
|
||||
@@ -469,12 +472,12 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// a time based lock, so the lock time should be based off the
|
||||
// MTP of the *next* block.
|
||||
{
|
||||
name: "single input, unconfirmed, lock-time in seoncds",
|
||||
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: unConfUtxo, Sequence: LockTimeToSequence(true, 1024)}}, nil),
|
||||
name: "single input, unconfirmed, lock-time in milliseoncds",
|
||||
tx: domainmessage.NewNativeMsgTx(1, []*domainmessage.TxIn{{PreviousOutpoint: unConfUtxo, Sequence: LockTimeToSequence(true, 1048576)}}, nil),
|
||||
utxoSet: utxoSet,
|
||||
mempool: true,
|
||||
want: &SequenceLock{
|
||||
Seconds: nextMedianTime + 1023,
|
||||
Milliseconds: nextMedianTime + 1048575,
|
||||
BlockBlueScore: -1,
|
||||
},
|
||||
},
|
||||
@@ -488,9 +491,9 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
t.Fatalf("test '%s', unable to calc sequence lock: %v", test.name, err)
|
||||
}
|
||||
|
||||
if seqLock.Seconds != test.want.Seconds {
|
||||
t.Fatalf("test '%s' got %v seconds want %v seconds",
|
||||
test.name, seqLock.Seconds, test.want.Seconds)
|
||||
if seqLock.Milliseconds != test.want.Milliseconds {
|
||||
t.Fatalf("test '%s' got %v milliseconds want %v milliseconds",
|
||||
test.name, seqLock.Milliseconds, test.want.Milliseconds)
|
||||
}
|
||||
if seqLock.BlockBlueScore != test.want.BlockBlueScore {
|
||||
t.Fatalf("test '%s' got blue score of %v want blue score of %v ",
|
||||
@@ -516,31 +519,35 @@ func TestCalcPastMedianTime(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
blockNumber uint32
|
||||
expectedSecondsSinceGenesis int64
|
||||
blockNumber uint32
|
||||
expectedMillisecondsSinceGenesis int64
|
||||
}{
|
||||
{
|
||||
blockNumber: 262,
|
||||
expectedSecondsSinceGenesis: 130,
|
||||
blockNumber: 262,
|
||||
expectedMillisecondsSinceGenesis: 130000,
|
||||
},
|
||||
{
|
||||
blockNumber: 270,
|
||||
expectedSecondsSinceGenesis: 138,
|
||||
blockNumber: 270,
|
||||
expectedMillisecondsSinceGenesis: 138000,
|
||||
},
|
||||
{
|
||||
blockNumber: 240,
|
||||
expectedSecondsSinceGenesis: 108,
|
||||
blockNumber: 240,
|
||||
expectedMillisecondsSinceGenesis: 108000,
|
||||
},
|
||||
{
|
||||
blockNumber: 5,
|
||||
expectedSecondsSinceGenesis: 0,
|
||||
blockNumber: 5,
|
||||
expectedMillisecondsSinceGenesis: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
secondsSinceGenesis := nodes[test.blockNumber].PastMedianTime(dag).Unix() - dag.genesis.Header().Timestamp.Unix()
|
||||
if secondsSinceGenesis != test.expectedSecondsSinceGenesis {
|
||||
t.Errorf("TestCalcPastMedianTime: expected past median time of block %v to be %v seconds from genesis but got %v", test.blockNumber, test.expectedSecondsSinceGenesis, secondsSinceGenesis)
|
||||
millisecondsSinceGenesis := nodes[test.blockNumber].PastMedianTime(dag).UnixMilliseconds() -
|
||||
dag.genesis.Header().Timestamp.UnixMilliseconds()
|
||||
|
||||
if millisecondsSinceGenesis != test.expectedMillisecondsSinceGenesis {
|
||||
t.Errorf("TestCalcPastMedianTime: expected past median time of block %v to be %v milliseconds "+
|
||||
"from genesis but got %v",
|
||||
test.blockNumber, test.expectedMillisecondsSinceGenesis, millisecondsSinceGenesis)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -550,19 +557,19 @@ func TestNew(t *testing.T) {
|
||||
|
||||
dbPath := filepath.Join(tempDir, "TestNew")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create(testDbType, dbPath, blockDataNet)
|
||||
databaseContext, err := dbaccess.New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
db.Close()
|
||||
databaseContext.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
}()
|
||||
config := &Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
DB: db,
|
||||
TimeSource: NewMedianTime(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
DatabaseContext: databaseContext,
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
TimeSource: NewTimeSource(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
}
|
||||
_, err = New(config)
|
||||
if err != nil {
|
||||
@@ -590,21 +597,21 @@ func TestAcceptingInInit(t *testing.T) {
|
||||
// Create a test database
|
||||
dbPath := filepath.Join(tempDir, "TestAcceptingInInit")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create(testDbType, dbPath, blockDataNet)
|
||||
databaseContext, err := dbaccess.New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
db.Close()
|
||||
databaseContext.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
}()
|
||||
|
||||
// Create a DAG to add the test block into
|
||||
config := &Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
DB: db,
|
||||
TimeSource: NewMedianTime(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
DatabaseContext: databaseContext,
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
TimeSource: NewTimeSource(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
}
|
||||
dag, err := New(config)
|
||||
if err != nil {
|
||||
@@ -620,21 +627,38 @@ func TestAcceptingInInit(t *testing.T) {
|
||||
testBlock := blocks[1]
|
||||
|
||||
// Create a test blockNode with an unvalidated status
|
||||
genesisNode := dag.index.LookupNode(genesisBlock.Hash())
|
||||
genesisNode, ok := dag.index.LookupNode(genesisBlock.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("genesis block does not exist in the DAG")
|
||||
}
|
||||
testNode, _ := dag.newBlockNode(&testBlock.MsgBlock().Header, blockSetFromSlice(genesisNode))
|
||||
testNode.status = statusDataStored
|
||||
|
||||
// Manually add the test block to the database
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
err := dbStoreBlock(dbTx, testBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbStoreBlockNode(dbTx, testNode)
|
||||
})
|
||||
dbTx, err := databaseContext.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open database "+
|
||||
"transaction: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = storeBlock(dbTx, testBlock)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to store block: %s", err)
|
||||
}
|
||||
dbTestNode, err := serializeBlockNode(testNode)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to serialize blockNode: %s", err)
|
||||
}
|
||||
key := blockIndexKey(testNode.hash, testNode.blueScore)
|
||||
err = dbaccess.StoreIndexBlock(dbTx, key, dbTestNode)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update block index: %s", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to commit database "+
|
||||
"transaction: %s", err)
|
||||
}
|
||||
|
||||
// Create a new DAG. We expect this DAG to process the
|
||||
// test node
|
||||
@@ -644,7 +668,11 @@ func TestAcceptingInInit(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make sure that the test node's status is valid
|
||||
testNode = dag.index.LookupNode(testBlock.Hash())
|
||||
testNode, ok = dag.index.LookupNode(testBlock.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", testBlock.Hash())
|
||||
}
|
||||
|
||||
if testNode.status&statusValid == 0 {
|
||||
t.Fatalf("testNode is unexpectedly invalid")
|
||||
}
|
||||
@@ -654,7 +682,7 @@ func TestConfirmations(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestConfirmations", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestConfirmations", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -673,10 +701,10 @@ func TestConfirmations(t *testing.T) {
|
||||
}
|
||||
|
||||
// Add a chain of blocks
|
||||
chainBlocks := make([]*wire.MsgBlock, 5)
|
||||
chainBlocks[0] = dag.dagParams.GenesisBlock
|
||||
chainBlocks := make([]*domainmessage.MsgBlock, 5)
|
||||
chainBlocks[0] = dag.Params.GenesisBlock
|
||||
for i := uint32(1); i < 5; i++ {
|
||||
chainBlocks[i] = prepareAndProcessBlock(t, dag, chainBlocks[i-1])
|
||||
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
|
||||
}
|
||||
|
||||
// Make sure that each one of the chain blocks has the expected confirmations number
|
||||
@@ -693,10 +721,10 @@ func TestConfirmations(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
branchingBlocks := make([]*wire.MsgBlock, 2)
|
||||
branchingBlocks := make([]*domainmessage.MsgBlock, 2)
|
||||
// Add two branching blocks
|
||||
branchingBlocks[0] = prepareAndProcessBlock(t, dag, chainBlocks[1])
|
||||
branchingBlocks[1] = prepareAndProcessBlock(t, dag, branchingBlocks[0])
|
||||
branchingBlocks[0] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[1])
|
||||
branchingBlocks[1] = prepareAndProcessBlockByParentMsgBlocks(t, dag, branchingBlocks[0])
|
||||
|
||||
// Check that the genesis has a confirmations number == len(chainBlocks)
|
||||
genesisConfirmations, err = dag.blockConfirmations(dag.genesis)
|
||||
@@ -726,7 +754,7 @@ func TestConfirmations(t *testing.T) {
|
||||
// Generate 100 blocks to force the "main" chain to become red
|
||||
branchingChainTip := branchingBlocks[1]
|
||||
for i := uint32(0); i < 100; i++ {
|
||||
nextBranchingChainTip := prepareAndProcessBlock(t, dag, branchingChainTip)
|
||||
nextBranchingChainTip := prepareAndProcessBlockByParentMsgBlocks(t, dag, branchingChainTip)
|
||||
branchingChainTip = nextBranchingChainTip
|
||||
}
|
||||
|
||||
@@ -757,7 +785,7 @@ func TestAcceptingBlock(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 3
|
||||
dag, teardownFunc, err := DAGSetup("TestAcceptingBlock", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestAcceptingBlock", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -766,7 +794,7 @@ func TestAcceptingBlock(t *testing.T) {
|
||||
defer teardownFunc()
|
||||
dag.TestSetCoinbaseMaturity(0)
|
||||
|
||||
acceptingBlockByMsgBlock := func(block *wire.MsgBlock) (*blockNode, error) {
|
||||
acceptingBlockByMsgBlock := func(block *domainmessage.MsgBlock) (*blockNode, error) {
|
||||
node := nodeByMsgBlock(t, dag, block)
|
||||
return dag.acceptingBlock(node)
|
||||
}
|
||||
@@ -782,10 +810,10 @@ func TestAcceptingBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
numChainBlocks := uint32(10)
|
||||
chainBlocks := make([]*wire.MsgBlock, numChainBlocks)
|
||||
chainBlocks[0] = dag.dagParams.GenesisBlock
|
||||
chainBlocks := make([]*domainmessage.MsgBlock, numChainBlocks)
|
||||
chainBlocks[0] = dag.Params.GenesisBlock
|
||||
for i := uint32(1); i <= numChainBlocks-1; i++ {
|
||||
chainBlocks[i] = prepareAndProcessBlock(t, dag, chainBlocks[i-1])
|
||||
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
|
||||
}
|
||||
|
||||
// Make sure that each chain block (including the genesis) is accepted by its child
|
||||
@@ -813,7 +841,7 @@ func TestAcceptingBlock(t *testing.T) {
|
||||
|
||||
// Generate a chain tip that will be in the anticone of the selected tip and
|
||||
// in dag.virtual.blues.
|
||||
branchingChainTip := prepareAndProcessBlock(t, dag, chainBlocks[len(chainBlocks)-3])
|
||||
branchingChainTip := prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[len(chainBlocks)-3])
|
||||
|
||||
// Make sure that branchingChainTip is not in the selected parent chain
|
||||
isBranchingChainTipInSelectedParentChain, err := dag.IsInSelectedParentChain(branchingChainTip.BlockHash())
|
||||
@@ -851,7 +879,7 @@ func TestAcceptingBlock(t *testing.T) {
|
||||
intersectionBlock := chainBlocks[1]
|
||||
sideChainTip := intersectionBlock
|
||||
for i := 0; i < len(chainBlocks)-3; i++ {
|
||||
sideChainTip = prepareAndProcessBlock(t, dag, sideChainTip)
|
||||
sideChainTip = prepareAndProcessBlockByParentMsgBlocks(t, dag, sideChainTip)
|
||||
}
|
||||
|
||||
// Make sure that the accepting block of the parent of the branching block didn't change
|
||||
@@ -867,7 +895,7 @@ func TestAcceptingBlock(t *testing.T) {
|
||||
|
||||
// Make sure that a block that is found in the red set of the selected tip
|
||||
// doesn't have an accepting block
|
||||
prepareAndProcessBlock(t, dag, sideChainTip, chainBlocks[len(chainBlocks)-1])
|
||||
prepareAndProcessBlockByParentMsgBlocks(t, dag, sideChainTip, chainBlocks[len(chainBlocks)-1])
|
||||
|
||||
sideChainTipAcceptingBlock, err := acceptingBlockByMsgBlock(sideChainTip)
|
||||
if err != nil {
|
||||
@@ -887,7 +915,7 @@ func TestFinalizeNodesBelowFinalityPoint(t *testing.T) {
|
||||
func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("testFinalizeNodesBelowFinalityPoint", Config{
|
||||
dag, teardownFunc, err := DAGSetup("testFinalizeNodesBelowFinalityPoint", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -899,13 +927,20 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
blockTime := dag.genesis.Header().Timestamp
|
||||
|
||||
flushUTXODiffStore := func() {
|
||||
err := dag.db.Update(func(dbTx database.Tx) error {
|
||||
return dag.utxoDiffStore.flushToDB(dbTx)
|
||||
})
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open database transaction: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.utxoDiffStore.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
|
||||
}
|
||||
dag.utxoDiffStore.clearDirtyEntries()
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to commit database transaction: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
addNode := func(parent *blockNode) *blockNode {
|
||||
@@ -922,7 +957,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
flushUTXODiffStore()
|
||||
return node
|
||||
}
|
||||
finalityInterval := dag.dagParams.FinalityInterval
|
||||
finalityInterval := dag.FinalityInterval()
|
||||
nodes := make([]*blockNode, 0, finalityInterval)
|
||||
currentNode := dag.genesis
|
||||
nodes = append(nodes, currentNode)
|
||||
@@ -934,6 +969,11 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
// Manually set the last finality point
|
||||
dag.lastFinalityPoint = nodes[finalityInterval-1]
|
||||
|
||||
// Don't unload diffData
|
||||
currentDifference := maxBlueScoreDifferenceToKeepLoaded
|
||||
maxBlueScoreDifferenceToKeepLoaded = math.MaxUint64
|
||||
defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }()
|
||||
|
||||
dag.finalizeNodesBelowFinalityPoint(deleteDiffData)
|
||||
flushUTXODiffStore()
|
||||
|
||||
@@ -941,17 +981,27 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
if !node.isFinalized {
|
||||
t.Errorf("Node with blue score %d expected to be finalized", node.blueScore)
|
||||
}
|
||||
if _, ok := dag.utxoDiffStore.loaded[*node.hash]; deleteDiffData && ok {
|
||||
if _, ok := dag.utxoDiffStore.loaded[node]; deleteDiffData && ok {
|
||||
t.Errorf("The diff data of node with blue score %d should have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
|
||||
} else if !deleteDiffData && !ok {
|
||||
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
|
||||
}
|
||||
if diffData, err := dag.utxoDiffStore.diffDataFromDB(node.hash); err != nil {
|
||||
|
||||
_, err := dag.utxoDiffStore.diffDataFromDB(node.hash)
|
||||
exists := !dbaccess.IsNotFoundError(err)
|
||||
if exists && err != nil {
|
||||
t.Errorf("diffDataFromDB: %s", err)
|
||||
} else if deleteDiffData && diffData != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if deleteDiffData && exists {
|
||||
t.Errorf("The diff data of node with blue score %d should have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
|
||||
} else if !deleteDiffData && diffData == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !deleteDiffData && !exists {
|
||||
t.Errorf("The diff data of node with blue score %d shouldn't have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -959,7 +1009,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
if node.isFinalized {
|
||||
t.Errorf("Node with blue score %d wasn't expected to be finalized", node.blueScore)
|
||||
}
|
||||
if _, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok {
|
||||
if _, ok := dag.utxoDiffStore.loaded[node]; !ok {
|
||||
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded", node.blueScore)
|
||||
}
|
||||
if diffData, err := dag.utxoDiffStore.diffDataFromDB(node.hash); err != nil {
|
||||
@@ -972,7 +1022,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
|
||||
func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
dag, teardownFunc, err := DAGSetup("TestDAGIndexFailedStatus", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestDAGIndexFailedStatus", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -980,11 +1030,11 @@ func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
invalidCbTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{}, []*wire.TxOut{}, subnetworkid.SubnetworkIDCoinbase, 0, []byte{})
|
||||
invalidCbTx := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{}, []*domainmessage.TxOut{}, subnetworkid.SubnetworkIDCoinbase, 0, []byte{})
|
||||
txs := []*util.Tx{util.NewTx(invalidCbTx)}
|
||||
hashMerkleRoot := BuildHashMerkleTreeStore(txs).Root()
|
||||
invalidMsgBlock := wire.NewMsgBlock(
|
||||
wire.NewBlockHeader(
|
||||
invalidMsgBlock := domainmessage.NewMsgBlock(
|
||||
domainmessage.NewBlockHeader(
|
||||
1,
|
||||
[]*daghash.Hash{params.GenesisHash}, hashMerkleRoot,
|
||||
&daghash.Hash{},
|
||||
@@ -1008,16 +1058,16 @@ func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
"is an orphan\n")
|
||||
}
|
||||
|
||||
invalidBlockNode := dag.index.LookupNode(invalidBlock.Hash())
|
||||
if invalidBlockNode == nil {
|
||||
invalidBlockNode, ok := dag.index.LookupNode(invalidBlock.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("invalidBlockNode wasn't added to the block index as expected")
|
||||
}
|
||||
if invalidBlockNode.status&statusValidateFailed != statusValidateFailed {
|
||||
t.Fatalf("invalidBlockNode status to have %b flags raised (got: %b)", statusValidateFailed, invalidBlockNode.status)
|
||||
}
|
||||
|
||||
invalidMsgBlockChild := wire.NewMsgBlock(
|
||||
wire.NewBlockHeader(1, []*daghash.Hash{
|
||||
invalidMsgBlockChild := domainmessage.NewMsgBlock(
|
||||
domainmessage.NewBlockHeader(1, []*daghash.Hash{
|
||||
invalidBlock.Hash(),
|
||||
}, hashMerkleRoot, &daghash.Hash{}, &daghash.Hash{}, dag.genesis.bits, 0),
|
||||
)
|
||||
@@ -1037,16 +1087,16 @@ func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
t.Fatalf("ProcessBlock incorrectly returned invalidBlockChild " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
invalidBlockChildNode := dag.index.LookupNode(invalidBlockChild.Hash())
|
||||
if invalidBlockChildNode == nil {
|
||||
invalidBlockChildNode, ok := dag.index.LookupNode(invalidBlockChild.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("invalidBlockChild wasn't added to the block index as expected")
|
||||
}
|
||||
if invalidBlockChildNode.status&statusInvalidAncestor != statusInvalidAncestor {
|
||||
t.Fatalf("invalidBlockNode status to have %b flags raised (got %b)", statusInvalidAncestor, invalidBlockChildNode.status)
|
||||
}
|
||||
|
||||
invalidMsgBlockGrandChild := wire.NewMsgBlock(
|
||||
wire.NewBlockHeader(1, []*daghash.Hash{
|
||||
invalidMsgBlockGrandChild := domainmessage.NewMsgBlock(
|
||||
domainmessage.NewBlockHeader(1, []*daghash.Hash{
|
||||
invalidBlockChild.Hash(),
|
||||
}, hashMerkleRoot, &daghash.Hash{}, &daghash.Hash{}, dag.genesis.bits, 0),
|
||||
)
|
||||
@@ -1065,8 +1115,8 @@ func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
t.Fatalf("ProcessBlock incorrectly returned invalidBlockGrandChild " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
invalidBlockGrandChildNode := dag.index.LookupNode(invalidBlockGrandChild.Hash())
|
||||
if invalidBlockGrandChildNode == nil {
|
||||
invalidBlockGrandChildNode, ok := dag.index.LookupNode(invalidBlockGrandChild.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("invalidBlockGrandChild wasn't added to the block index as expected")
|
||||
}
|
||||
if invalidBlockGrandChildNode.status&statusInvalidAncestor != statusInvalidAncestor {
|
||||
@@ -1083,8 +1133,282 @@ func TestIsDAGCurrentMaxDiff(t *testing.T) {
|
||||
&dagconfig.SimnetParams,
|
||||
}
|
||||
for _, params := range netParams {
|
||||
if params.TargetTimePerBlock*time.Duration(params.FinalityInterval) < isDAGCurrentMaxDiff {
|
||||
if params.FinalityDuration < isDAGCurrentMaxDiff*params.TargetTimePerBlock {
|
||||
t.Errorf("in %s, a DAG can be considered current even if it's below the finality point", params.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testProcessBlockRuleError(t *testing.T, dag *BlockDAG, block *domainmessage.MsgBlock, expectedRuleErr error) {
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block), BFNoPoWCheck)
|
||||
|
||||
err = checkRuleError(err, expectedRuleErr)
|
||||
if err != nil {
|
||||
t.Errorf("checkRuleError: %s", err)
|
||||
}
|
||||
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: block got unexpectedly orphaned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoubleSpends(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestDoubleSpends", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
fundingBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{params.GenesisHash}, nil)
|
||||
cbTx := fundingBlock.Transactions[0]
|
||||
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: domainmessage.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &domainmessage.TxOut{
|
||||
ScriptPubKey: OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
tx1 := domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{txIn}, []*domainmessage.TxOut{txOut})
|
||||
|
||||
doubleSpendTxOut := &domainmessage.TxOut{
|
||||
ScriptPubKey: OpTrueScript,
|
||||
Value: uint64(2),
|
||||
}
|
||||
doubleSpendTx1 := domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{txIn}, []*domainmessage.TxOut{doubleSpendTxOut})
|
||||
|
||||
blockWithTx1 := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*domainmessage.MsgTx{tx1})
|
||||
|
||||
// Check that a block will be rejected if it has a transaction that already exists in its past.
|
||||
anotherBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Manually add tx1.
|
||||
anotherBlockWithTx1.Transactions = append(anotherBlockWithTx1.Transactions, tx1)
|
||||
anotherBlockWithTx1UtilTxs := make([]*util.Tx, len(anotherBlockWithTx1.Transactions))
|
||||
for i, tx := range anotherBlockWithTx1.Transactions {
|
||||
anotherBlockWithTx1UtilTxs[i] = util.NewTx(tx)
|
||||
}
|
||||
anotherBlockWithTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(anotherBlockWithTx1UtilTxs).Root()
|
||||
|
||||
testProcessBlockRuleError(t, dag, anotherBlockWithTx1, ruleError(ErrOverwriteTx, ""))
|
||||
|
||||
// Check that a block will be rejected if it has a transaction that double spends
|
||||
// a transaction from its past.
|
||||
blockWithDoubleSpendForTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Manually add a transaction that double spends the block past.
|
||||
blockWithDoubleSpendForTx1.Transactions = append(blockWithDoubleSpendForTx1.Transactions, doubleSpendTx1)
|
||||
blockWithDoubleSpendForTx1UtilTxs := make([]*util.Tx, len(blockWithDoubleSpendForTx1.Transactions))
|
||||
for i, tx := range blockWithDoubleSpendForTx1.Transactions {
|
||||
blockWithDoubleSpendForTx1UtilTxs[i] = util.NewTx(tx)
|
||||
}
|
||||
blockWithDoubleSpendForTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendForTx1UtilTxs).Root()
|
||||
|
||||
testProcessBlockRuleError(t, dag, blockWithDoubleSpendForTx1, ruleError(ErrMissingTxOut, ""))
|
||||
|
||||
blockInAnticoneOfBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*domainmessage.MsgTx{doubleSpendTx1})
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Check that a block will not get rejected if it has a transaction that double spends
|
||||
// a transaction from its anticone.
|
||||
testProcessBlockRuleError(t, dag, blockInAnticoneOfBlockWithTx1, nil)
|
||||
|
||||
// Check that a block will be rejected if it has two transactions that spend the same UTXO.
|
||||
blockWithDoubleSpendWithItself, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Manually add tx1 and doubleSpendTx1.
|
||||
blockWithDoubleSpendWithItself.Transactions = append(blockWithDoubleSpendWithItself.Transactions, tx1, doubleSpendTx1)
|
||||
blockWithDoubleSpendWithItselfUtilTxs := make([]*util.Tx, len(blockWithDoubleSpendWithItself.Transactions))
|
||||
for i, tx := range blockWithDoubleSpendWithItself.Transactions {
|
||||
blockWithDoubleSpendWithItselfUtilTxs[i] = util.NewTx(tx)
|
||||
}
|
||||
blockWithDoubleSpendWithItself.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendWithItselfUtilTxs).Root()
|
||||
|
||||
testProcessBlockRuleError(t, dag, blockWithDoubleSpendWithItself, ruleError(ErrDoubleSpendInSameBlock, ""))
|
||||
|
||||
// Check that a block will be rejected if it has the same transaction twice.
|
||||
blockWithDuplicateTransaction, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Manually add tx1 twice.
|
||||
blockWithDuplicateTransaction.Transactions = append(blockWithDuplicateTransaction.Transactions, tx1, tx1)
|
||||
blockWithDuplicateTransactionUtilTxs := make([]*util.Tx, len(blockWithDuplicateTransaction.Transactions))
|
||||
for i, tx := range blockWithDuplicateTransaction.Transactions {
|
||||
blockWithDuplicateTransactionUtilTxs[i] = util.NewTx(tx)
|
||||
}
|
||||
blockWithDuplicateTransaction.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDuplicateTransactionUtilTxs).Root()
|
||||
testProcessBlockRuleError(t, dag, blockWithDuplicateTransaction, ruleError(ErrDuplicateTx, ""))
|
||||
}
|
||||
|
||||
func TestUTXOCommitment(t *testing.T) {
|
||||
// Create a new database and dag instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := DAGSetup("TestUTXOCommitment", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestUTXOCommitment: Failed to setup dag instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
resetExtraNonceForTest()
|
||||
|
||||
createTx := func(txToSpend *domainmessage.MsgTx) *domainmessage.MsgTx {
|
||||
scriptPubKey, err := txscript.PayToScriptHashScript(OpTrueScript)
|
||||
if err != nil {
|
||||
t.Fatalf("TestUTXOCommitment: failed to build script pub key: %s", err)
|
||||
}
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("TestUTXOCommitment: failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: domainmessage.Outpoint{TxID: *txToSpend.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &domainmessage.TxOut{
|
||||
ScriptPubKey: scriptPubKey,
|
||||
Value: uint64(1),
|
||||
}
|
||||
return domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{txIn}, []*domainmessage.TxOut{txOut})
|
||||
}
|
||||
|
||||
// Build the following DAG:
|
||||
// G <- A <- B <- D
|
||||
// <- C <-
|
||||
genesis := params.GenesisBlock
|
||||
|
||||
// Block A:
|
||||
blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{genesis.BlockHash()}, nil)
|
||||
|
||||
// Block B:
|
||||
blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil)
|
||||
|
||||
// Block C:
|
||||
txSpendBlockACoinbase := createTx(blockA.Transactions[0])
|
||||
blockCTxs := []*domainmessage.MsgTx{txSpendBlockACoinbase}
|
||||
blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, blockCTxs)
|
||||
|
||||
// Block D:
|
||||
txSpendTxInBlockC := createTx(txSpendBlockACoinbase)
|
||||
blockDTxs := []*domainmessage.MsgTx{txSpendTxInBlockC}
|
||||
blockD := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash(), blockC.BlockHash()}, blockDTxs)
|
||||
|
||||
// Get the pastUTXO of blockD
|
||||
blockNodeD, ok := dag.index.LookupNode(blockD.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("TestUTXOCommitment: blockNode for block D not found")
|
||||
}
|
||||
blockDPastUTXO, _, _, _ := dag.pastUTXO(blockNodeD)
|
||||
blockDPastDiffUTXOSet := blockDPastUTXO.(*DiffUTXOSet)
|
||||
|
||||
// Build a Multiset for block D
|
||||
multiset := secp256k1.NewMultiset()
|
||||
for outpoint, entry := range blockDPastDiffUTXOSet.base.utxoCollection {
|
||||
var err error
|
||||
multiset, err = addUTXOToMultiset(multiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("TestUTXOCommitment: addUTXOToMultiset unexpectedly failed")
|
||||
}
|
||||
}
|
||||
for outpoint, entry := range blockDPastDiffUTXOSet.UTXODiff.toAdd {
|
||||
var err error
|
||||
multiset, err = addUTXOToMultiset(multiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("TestUTXOCommitment: addUTXOToMultiset unexpectedly failed")
|
||||
}
|
||||
}
|
||||
for outpoint, entry := range blockDPastDiffUTXOSet.UTXODiff.toRemove {
|
||||
var err error
|
||||
multiset, err = removeUTXOFromMultiset(multiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("TestUTXOCommitment: removeUTXOFromMultiset unexpectedly failed")
|
||||
}
|
||||
}
|
||||
|
||||
// Turn the multiset into a UTXO commitment
|
||||
utxoCommitment := daghash.Hash(*multiset.Finalize())
|
||||
|
||||
// Make sure that the two commitments are equal
|
||||
if !utxoCommitment.IsEqual(blockNodeD.utxoCommitment) {
|
||||
t.Fatalf("TestUTXOCommitment: calculated UTXO commitment and "+
|
||||
"actual UTXO commitment don't match. Want: %s, got: %s",
|
||||
utxoCommitment, blockNodeD.utxoCommitment)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPastUTXOMultiSet(t *testing.T) {
|
||||
// Create a new database and dag instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
dag, teardownFunc, err := DAGSetup("TestPastUTXOMultiSet", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestPastUTXOMultiSet: Failed to setup dag instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Build a short chain
|
||||
genesis := params.GenesisBlock
|
||||
blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{genesis.BlockHash()}, nil)
|
||||
blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil)
|
||||
blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash()}, nil)
|
||||
|
||||
// Take blockC's selectedParentMultiset
|
||||
blockNodeC, ok := dag.index.LookupNode(blockC.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("TestPastUTXOMultiSet: blockNode for blockC not found")
|
||||
}
|
||||
blockCSelectedParentMultiset, err := blockNodeC.selectedParentMultiset(dag)
|
||||
if err != nil {
|
||||
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err)
|
||||
}
|
||||
|
||||
// Copy the multiset
|
||||
blockCSelectedParentMultisetCopy := *blockCSelectedParentMultiset
|
||||
blockCSelectedParentMultiset = &blockCSelectedParentMultisetCopy
|
||||
|
||||
// Add a block on top of blockC
|
||||
PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockC.BlockHash()}, nil)
|
||||
|
||||
// Get blockC's selectedParentMultiset again
|
||||
blockCSelectedParentMultiSetAfterAnotherBlock, err := blockNodeC.selectedParentMultiset(dag)
|
||||
if err != nil {
|
||||
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err)
|
||||
}
|
||||
|
||||
// Make sure that blockC's selectedParentMultiset had not changed
|
||||
if !reflect.DeepEqual(blockCSelectedParentMultiset, blockCSelectedParentMultiSetAfterAnotherBlock) {
|
||||
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset appears to have changed")
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6,39 +6,51 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"github.com/pkg/errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// TestErrNotInDAG ensures the functions related to errNotInDAG work
|
||||
// TestErrNotInDAG ensures the functions related to ErrNotInDAG work
|
||||
// as expected.
|
||||
func TestErrNotInDAG(t *testing.T) {
|
||||
errStr := "no block at height 1 exists"
|
||||
err := error(errNotInDAG(errStr))
|
||||
err := error(ErrNotInDAG(errStr))
|
||||
|
||||
// Ensure the stringized output for the error is as expected.
|
||||
if err.Error() != errStr {
|
||||
t.Fatalf("errNotInDAG retuned unexpected error string - "+
|
||||
t.Fatalf("ErrNotInDAG retuned unexpected error string - "+
|
||||
"got %q, want %q", err.Error(), errStr)
|
||||
}
|
||||
|
||||
// Ensure error is detected as the correct type.
|
||||
if !isNotInDAGErr(err) {
|
||||
t.Fatalf("isNotInDAGErr did not detect as expected type")
|
||||
if !IsNotInDAGErr(err) {
|
||||
t.Fatalf("IsNotInDAGErr did not detect as expected type")
|
||||
}
|
||||
err = errors.New("something else")
|
||||
if isNotInDAGErr(err) {
|
||||
t.Fatalf("isNotInDAGErr detected incorrect type")
|
||||
if IsNotInDAGErr(err) {
|
||||
t.Fatalf("IsNotInDAGErr detected incorrect type")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUtxoSerialization ensures serializing and deserializing unspent
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
// is an error. This is only provided for the hard-coded constants so errors in
|
||||
// the source code can be detected. It will only (and must only) be called with
|
||||
// hard-coded values.
|
||||
func hexToBytes(s string) []byte {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// TestUTXOSerialization ensures serializing and deserializing unspent
|
||||
// trasaction output entries works as expected.
|
||||
func TestUtxoSerialization(t *testing.T) {
|
||||
func TestUTXOSerialization(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
@@ -54,7 +66,7 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
blockBlueScore: 1,
|
||||
packedFlags: tfCoinbase,
|
||||
},
|
||||
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
|
||||
serialized: hexToBytes("01000000000000000100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
|
||||
},
|
||||
{
|
||||
name: "blue score 100001, not coinbase",
|
||||
@@ -64,13 +76,21 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
blockBlueScore: 100001,
|
||||
packedFlags: 0,
|
||||
},
|
||||
serialized: hexToBytes("8b99420700ee8bd501094a7d5ca318da2506de35e1cb025ddc"),
|
||||
serialized: hexToBytes("a1860100000000000040420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
// Ensure the utxo entry serializes to the expected value.
|
||||
gotBytes := serializeUTXOEntry(test.entry)
|
||||
w := &bytes.Buffer{}
|
||||
err := serializeUTXOEntry(w, test.entry)
|
||||
if err != nil {
|
||||
t.Errorf("serializeUTXOEntry #%d (%s) unexpected "+
|
||||
"error: %v", i, test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
gotBytes := w.Bytes()
|
||||
if !bytes.Equal(gotBytes, test.serialized) {
|
||||
t.Errorf("serializeUTXOEntry #%d (%s): mismatched "+
|
||||
"bytes - got %x, want %x", i, test.name,
|
||||
@@ -78,8 +98,8 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Deserialize to a utxo entry.
|
||||
utxoEntry, err := deserializeUTXOEntry(test.serialized)
|
||||
// Deserialize to a utxo entry.gotBytes
|
||||
utxoEntry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
|
||||
if err != nil {
|
||||
t.Errorf("deserializeUTXOEntry #%d (%s) unexpected "+
|
||||
"error: %v", i, test.name, err)
|
||||
@@ -124,28 +144,24 @@ func TestUtxoEntryDeserializeErrors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
serialized []byte
|
||||
errType error
|
||||
}{
|
||||
{
|
||||
name: "no data after header code",
|
||||
serialized: hexToBytes("02"),
|
||||
errType: errDeserialize(""),
|
||||
},
|
||||
{
|
||||
name: "incomplete compressed txout",
|
||||
serialized: hexToBytes("0232"),
|
||||
errType: errDeserialize(""),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the expected error type is returned and the returned
|
||||
// entry is nil.
|
||||
entry, err := deserializeUTXOEntry(test.serialized)
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
|
||||
t.Errorf("deserializeUTXOEntry (%s): expected error "+
|
||||
"type does not match - got %T, want %T",
|
||||
test.name, err, test.errType)
|
||||
entry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
|
||||
if err == nil {
|
||||
t.Errorf("deserializeUTXOEntry (%s): didn't return an error",
|
||||
test.name)
|
||||
continue
|
||||
}
|
||||
if entry != nil {
|
||||
@@ -172,7 +188,7 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
|
||||
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
|
||||
},
|
||||
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
|
||||
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
|
||||
},
|
||||
{
|
||||
name: "block 1",
|
||||
@@ -180,7 +196,7 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
|
||||
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
|
||||
},
|
||||
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
|
||||
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -217,51 +233,6 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestDAGStateDeserializeErrors performs negative tests against
|
||||
// deserializing the DAG state to ensure error paths work as expected.
|
||||
func TestDAGStateDeserializeErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
serialized []byte
|
||||
errType error
|
||||
}{
|
||||
{
|
||||
name: "nothing serialized",
|
||||
serialized: hexToBytes(""),
|
||||
errType: database.Error{ErrorCode: database.ErrCorruption},
|
||||
},
|
||||
{
|
||||
name: "corrupted data",
|
||||
serialized: []byte("[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,7"),
|
||||
errType: database.Error{ErrorCode: database.ErrCorruption},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the expected error type and code is returned.
|
||||
_, err := deserializeDAGState(test.serialized)
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
|
||||
t.Errorf("deserializeDAGState (%s): expected "+
|
||||
"error type does not match - got %T, want %T",
|
||||
test.name, err, test.errType)
|
||||
continue
|
||||
}
|
||||
var dbErr database.Error
|
||||
if ok := errors.As(err, &dbErr); ok {
|
||||
tderr := test.errType.(database.Error)
|
||||
if dbErr.ErrorCode != tderr.ErrorCode {
|
||||
t.Errorf("deserializeDAGState (%s): "+
|
||||
"wrong error code got: %v, want: %v",
|
||||
test.name, dbErr.ErrorCode,
|
||||
tderr.ErrorCode)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// daghash.Hash. It only differs from the one available in daghash in that
|
||||
// it panics in case of an error since it will only (and must only) be
|
||||
|
||||
@@ -5,17 +5,17 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"time"
|
||||
"github.com/kaspanet/kaspad/util/bigintpool"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// requiredDifficulty calculates the required difficulty for a
|
||||
// block given its bluest parent.
|
||||
func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime time.Time) uint32 {
|
||||
func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime mstime.Time) uint32 {
|
||||
// Genesis block.
|
||||
if bluestParent == nil || bluestParent.blueScore < dag.difficultyAdjustmentWindowSize+1 {
|
||||
if dag.Params.DisableDifficultyAdjustment || bluestParent == nil || bluestParent.blueScore < dag.difficultyAdjustmentWindowSize+1 {
|
||||
return dag.powMaxBits
|
||||
}
|
||||
|
||||
@@ -30,12 +30,21 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ti
|
||||
// averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize))
|
||||
// The result uses integer division which means it will be slightly
|
||||
// rounded down.
|
||||
newTarget := targetsWindow.averageTarget()
|
||||
newTarget := bigintpool.Acquire(0)
|
||||
defer bigintpool.Release(newTarget)
|
||||
windowTimeStampDifference := bigintpool.Acquire(windowMaxTimeStamp - windowMinTimestamp)
|
||||
defer bigintpool.Release(windowTimeStampDifference)
|
||||
targetTimePerBlock := bigintpool.Acquire(dag.Params.TargetTimePerBlock.Milliseconds())
|
||||
defer bigintpool.Release(targetTimePerBlock)
|
||||
difficultyAdjustmentWindowSize := bigintpool.Acquire(int64(dag.difficultyAdjustmentWindowSize))
|
||||
defer bigintpool.Release(difficultyAdjustmentWindowSize)
|
||||
|
||||
targetsWindow.averageTarget(newTarget)
|
||||
newTarget.
|
||||
Mul(newTarget, big.NewInt(windowMaxTimeStamp-windowMinTimestamp)).
|
||||
Div(newTarget, big.NewInt(dag.targetTimePerBlock)).
|
||||
Div(newTarget, big.NewInt(int64(dag.difficultyAdjustmentWindowSize)))
|
||||
if newTarget.Cmp(dag.dagParams.PowMax) > 0 {
|
||||
Mul(newTarget, windowTimeStampDifference).
|
||||
Div(newTarget, targetTimePerBlock).
|
||||
Div(newTarget, difficultyAdjustmentWindowSize)
|
||||
if newTarget.Cmp(dag.Params.PowMax) > 0 {
|
||||
return dag.powMaxBits
|
||||
}
|
||||
newTargetBits := util.BigToCompact(newTarget)
|
||||
@@ -46,7 +55,7 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ti
|
||||
// be built on top of the current tips.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) NextRequiredDifficulty(timestamp time.Time) uint32 {
|
||||
func (dag *BlockDAG) NextRequiredDifficulty(timestamp mstime.Time) uint32 {
|
||||
difficulty := dag.requiredDifficulty(dag.virtual.parents.bluest(), timestamp)
|
||||
return difficulty
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -79,10 +80,10 @@ func TestCalcWork(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDifficulty(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params := dagconfig.MainnetParams
|
||||
params.K = 1
|
||||
params.DifficultyAdjustmentWindowSize = 264
|
||||
dag, teardownFunc, err := DAGSetup("TestDifficulty", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestDifficulty", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -90,11 +91,12 @@ func TestDifficulty(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
zeroTime := time.Unix(0, 0)
|
||||
addNode := func(parents blockSet, blockTime time.Time) *blockNode {
|
||||
zeroTime := mstime.Time{}
|
||||
addNode := func(parents blockSet, blockTime mstime.Time) *blockNode {
|
||||
bluestParent := parents.bluest()
|
||||
if blockTime == zeroTime {
|
||||
blockTime = time.Unix(bluestParent.timestamp+1, 0)
|
||||
if blockTime.IsZero() {
|
||||
blockTime = bluestParent.time()
|
||||
blockTime = blockTime.Add(params.TargetTimePerBlock)
|
||||
}
|
||||
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
|
||||
if err != nil {
|
||||
@@ -113,13 +115,18 @@ func TestDifficulty(t *testing.T) {
|
||||
if isOrphan {
|
||||
t.Fatalf("block was unexpectedly orphan")
|
||||
}
|
||||
return dag.index.LookupNode(block.BlockHash())
|
||||
node, ok := dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", block.BlockHash())
|
||||
}
|
||||
return node
|
||||
}
|
||||
tip := dag.genesis
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if tip.bits != dag.genesis.bits {
|
||||
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment window size, the difficulty should be the same as genesis'")
|
||||
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment " +
|
||||
"window size, the difficulty should be the same as genesis'")
|
||||
}
|
||||
}
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+100; i++ {
|
||||
@@ -140,7 +147,8 @@ func TestDifficulty(t *testing.T) {
|
||||
}
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if compareBits(tip.bits, nodeInThePast.bits) >= 0 {
|
||||
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the block rate, so the difficulty should increase as well")
|
||||
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the " +
|
||||
"block rate, so the difficulty should increase as well")
|
||||
}
|
||||
expectedBits := uint32(0x207f83df)
|
||||
if tip.bits != expectedBits {
|
||||
@@ -167,7 +175,9 @@ func TestDifficulty(t *testing.T) {
|
||||
sameBitsCount = 0
|
||||
}
|
||||
}
|
||||
slowNode := addNode(blockSetFromSlice(tip), time.Unix(tip.timestamp+2, 0))
|
||||
slowBlockTime := tip.time()
|
||||
slowBlockTime = slowBlockTime.Add(params.TargetTimePerBlock + time.Second)
|
||||
slowNode := addNode(blockSetFromSlice(tip), slowBlockTime)
|
||||
if slowNode.bits != tip.bits {
|
||||
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
|
||||
}
|
||||
@@ -180,7 +190,8 @@ func TestDifficulty(t *testing.T) {
|
||||
}
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if compareBits(tip.bits, slowNode.bits) <= 0 {
|
||||
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block rate, so the difficulty should decrease as well")
|
||||
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block" +
|
||||
" rate, so the difficulty should decrease as well")
|
||||
}
|
||||
|
||||
splitNode := addNode(blockSetFromSlice(tip), zeroTime)
|
||||
@@ -197,7 +208,8 @@ func TestDifficulty(t *testing.T) {
|
||||
tipWithRedPast := addNode(blockSetFromSlice(redChainTip, blueTip), zeroTime)
|
||||
tipWithoutRedPast := addNode(blockSetFromSlice(blueTip), zeroTime)
|
||||
if tipWithoutRedPast.bits != tipWithRedPast.bits {
|
||||
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks shouldn't affect the difficulty")
|
||||
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks" +
|
||||
" shouldn't affect the difficulty")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,28 +6,10 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DeploymentError identifies an error that indicates a deployment ID was
|
||||
// specified that does not exist.
|
||||
type DeploymentError uint32
|
||||
|
||||
// Error returns the assertion error as a human-readable string and satisfies
|
||||
// the error interface.
|
||||
func (e DeploymentError) Error() string {
|
||||
return fmt.Sprintf("deployment ID %d does not exist", uint32(e))
|
||||
}
|
||||
|
||||
// AssertError identifies an error that indicates an internal code consistency
|
||||
// issue and should be treated as a critical and unrecoverable error.
|
||||
type AssertError string
|
||||
|
||||
// Error returns the assertion error as a human-readable string and satisfies
|
||||
// the error interface.
|
||||
func (e AssertError) Error() string {
|
||||
return "assertion failed: " + string(e)
|
||||
}
|
||||
|
||||
// ErrorCode identifies a kind of error.
|
||||
type ErrorCode int
|
||||
|
||||
@@ -46,11 +28,6 @@ const (
|
||||
// to a newer version.
|
||||
ErrBlockVersionTooOld
|
||||
|
||||
// ErrInvalidTime indicates the time in the passed block has a precision
|
||||
// that is more than one second. The DAG consensus rules require
|
||||
// timestamps to have a maximum precision of one second.
|
||||
ErrInvalidTime
|
||||
|
||||
// ErrTimeTooOld indicates the time is either before the median time of
|
||||
// the last several blocks per the DAG consensus rules.
|
||||
ErrTimeTooOld
|
||||
@@ -87,6 +64,9 @@ const (
|
||||
// the expected value.
|
||||
ErrBadUTXOCommitment
|
||||
|
||||
// ErrInvalidSubnetwork indicates the subnetwork is now allowed.
|
||||
ErrInvalidSubnetwork
|
||||
|
||||
// ErrFinalityPointTimeTooOld indicates a block has a timestamp before the
|
||||
// last finality point.
|
||||
ErrFinalityPointTimeTooOld
|
||||
@@ -121,6 +101,11 @@ const (
|
||||
// either does not exist or has already been spent.
|
||||
ErrMissingTxOut
|
||||
|
||||
// ErrDoubleSpendInSameBlock indicates a transaction
|
||||
// that spends an output that was already spent by another
|
||||
// transaction in the same block.
|
||||
ErrDoubleSpendInSameBlock
|
||||
|
||||
// ErrUnfinalizedTx indicates a transaction has not been finalized.
|
||||
// A valid block may only contain finalized transactions.
|
||||
ErrUnfinalizedTx
|
||||
@@ -221,6 +206,10 @@ const (
|
||||
// ErrDelayedBlockIsNotAllowed indicates that a block with a delayed timestamp was
|
||||
// submitted with BFDisallowDelay flag raised.
|
||||
ErrDelayedBlockIsNotAllowed
|
||||
|
||||
// ErrOrphanBlockIsNotAllowed indicates that an orphan block was submitted with
|
||||
// BFDisallowOrphans flag raised.
|
||||
ErrOrphanBlockIsNotAllowed
|
||||
)
|
||||
|
||||
// Map of ErrorCode values back to their constant names for pretty printing.
|
||||
@@ -228,7 +217,6 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrDuplicateBlock: "ErrDuplicateBlock",
|
||||
ErrBlockMassTooHigh: "ErrBlockMassTooHigh",
|
||||
ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
|
||||
ErrInvalidTime: "ErrInvalidTime",
|
||||
ErrTimeTooOld: "ErrTimeTooOld",
|
||||
ErrTimeTooNew: "ErrTimeTooNew",
|
||||
ErrNoParents: "ErrNoParents",
|
||||
@@ -245,6 +233,7 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
|
||||
ErrBadTxInput: "ErrBadTxInput",
|
||||
ErrMissingTxOut: "ErrMissingTxOut",
|
||||
ErrDoubleSpendInSameBlock: "ErrDoubleSpendInSameBlock",
|
||||
ErrUnfinalizedTx: "ErrUnfinalizedTx",
|
||||
ErrDuplicateTx: "ErrDuplicateTx",
|
||||
ErrOverwriteTx: "ErrOverwriteTx",
|
||||
@@ -269,6 +258,7 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrInvalidPayloadHash: "ErrInvalidPayloadHash",
|
||||
ErrInvalidParentsRelation: "ErrInvalidParentsRelation",
|
||||
ErrDelayedBlockIsNotAllowed: "ErrDelayedBlockIsNotAllowed",
|
||||
ErrOrphanBlockIsNotAllowed: "ErrOrphanBlockIsNotAllowed",
|
||||
}
|
||||
|
||||
// String returns the ErrorCode as a human-readable name.
|
||||
@@ -294,7 +284,10 @@ func (e RuleError) Error() string {
|
||||
return e.Description
|
||||
}
|
||||
|
||||
// ruleError creates an RuleError given a set of arguments.
|
||||
func ruleError(c ErrorCode, desc string) RuleError {
|
||||
return RuleError{ErrorCode: c, Description: desc}
|
||||
func ruleError(c ErrorCode, desc string) error {
|
||||
return errors.WithStack(RuleError{ErrorCode: c, Description: desc})
|
||||
}
|
||||
|
||||
// ErrInvalidParameter signifies that an invalid parameter has been
|
||||
// supplied to one of the BlockDAG functions.
|
||||
var ErrInvalidParameter = errors.New("invalid parameter")
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -18,7 +17,6 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrDuplicateBlock, "ErrDuplicateBlock"},
|
||||
{ErrBlockMassTooHigh, "ErrBlockMassTooHigh"},
|
||||
{ErrBlockVersionTooOld, "ErrBlockVersionTooOld"},
|
||||
{ErrInvalidTime, "ErrInvalidTime"},
|
||||
{ErrTimeTooOld, "ErrTimeTooOld"},
|
||||
{ErrTimeTooNew, "ErrTimeTooNew"},
|
||||
{ErrNoParents, "ErrNoParents"},
|
||||
@@ -59,6 +57,7 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrInvalidPayloadHash, "ErrInvalidPayloadHash"},
|
||||
{ErrInvalidParentsRelation, "ErrInvalidParentsRelation"},
|
||||
{ErrDelayedBlockIsNotAllowed, "ErrDelayedBlockIsNotAllowed"},
|
||||
{ErrOrphanBlockIsNotAllowed, "ErrOrphanBlockIsNotAllowed"},
|
||||
{0xffff, "Unknown ErrorCode (65535)"},
|
||||
}
|
||||
|
||||
@@ -99,46 +98,3 @@ func TestRuleError(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeploymentError tests the stringized output for the DeploymentError type.
|
||||
func TestDeploymentError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
in DeploymentError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
DeploymentError(0),
|
||||
"deployment ID 0 does not exist",
|
||||
},
|
||||
{
|
||||
DeploymentError(10),
|
||||
"deployment ID 10 does not exist",
|
||||
},
|
||||
{
|
||||
DeploymentError(123),
|
||||
"deployment ID 123 does not exist",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
result := test.in.Error()
|
||||
if result != test.want {
|
||||
t.Errorf("Error #%d\n got: %s want: %s", i, result,
|
||||
test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssertError(t *testing.T) {
|
||||
message := "abc 123"
|
||||
err := AssertError(message)
|
||||
expectedMessage := fmt.Sprintf("assertion failed: %s", message)
|
||||
if expectedMessage != err.Error() {
|
||||
t.Errorf("Unexpected AssertError message. "+
|
||||
"Got: %s, want: %s", err.Error(), expectedMessage)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,10 +2,12 @@ package blockdag_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
@@ -13,10 +15,10 @@ import (
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/mining"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// TestFinality checks that the finality mechanism works as expected.
|
||||
@@ -39,8 +41,8 @@ import (
|
||||
func TestFinality(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.FinalityInterval = 100
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
|
||||
params.FinalityDuration = 100 * params.TargetTimePerBlock
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -48,7 +50,7 @@ func TestFinality(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, nil, false)
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, parentHashes, nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -73,7 +75,7 @@ func TestFinality(t *testing.T) {
|
||||
currentNode := genesis
|
||||
|
||||
// First we build a chain of params.FinalityInterval blocks for future use
|
||||
for i := uint64(0); i < params.FinalityInterval; i++ {
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -85,7 +87,7 @@ func TestFinality(t *testing.T) {
|
||||
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
|
||||
// we expect the block with height 1 * params.FinalityInterval to be the last finality point
|
||||
currentNode = genesis
|
||||
for i := uint64(0); i < params.FinalityInterval; i++ {
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -94,7 +96,7 @@ func TestFinality(t *testing.T) {
|
||||
|
||||
expectedFinalityPoint := currentNode
|
||||
|
||||
for i := uint64(0); i < params.FinalityInterval; i++ {
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -123,7 +125,7 @@ func TestFinality(t *testing.T) {
|
||||
t.Errorf("NextBlockCoinbaseTransaction: %s", err)
|
||||
}
|
||||
merkleRoot := blockdag.BuildHashMerkleTreeStore([]*util.Tx{fakeCoinbaseTx}).Root()
|
||||
beforeFinalityBlock := wire.NewMsgBlock(&wire.BlockHeader{
|
||||
beforeFinalityBlock := domainmessage.NewMsgBlock(&domainmessage.BlockHeader{
|
||||
Version: 0x10000000,
|
||||
ParentHashes: []*daghash.Hash{genesis.Hash()},
|
||||
HashMerkleRoot: merkleRoot,
|
||||
@@ -162,7 +164,7 @@ func TestFinality(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestFinalityInterval tests that the finality interval is
|
||||
// smaller then wire.MaxInvPerMsg, so when a peer receives
|
||||
// smaller then domainmessage.MaxInvPerMsg, so when a peer receives
|
||||
// a getblocks message it should always be able to send
|
||||
// all the necessary invs.
|
||||
func TestFinalityInterval(t *testing.T) {
|
||||
@@ -174,9 +176,19 @@ func TestFinalityInterval(t *testing.T) {
|
||||
&dagconfig.SimnetParams,
|
||||
}
|
||||
for _, params := range netParams {
|
||||
if params.FinalityInterval > wire.MaxInvPerMsg {
|
||||
t.Errorf("FinalityInterval in %s should be lower or equal to wire.MaxInvPerMsg", params.Name)
|
||||
}
|
||||
func() {
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinalityInterval", true, blockdag.Config{
|
||||
DAGParams: params,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance for %s: %v", params.Name, err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
if dag.FinalityInterval() > domainmessage.MaxInvPerMsg {
|
||||
t.Errorf("FinalityInterval in %s should be lower or equal to domainmessage.MaxInvPerMsg", params.Name)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,7 +197,8 @@ func TestSubnetworkRegistry(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
params.EnableNonNativeSubnetworks = true
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -198,7 +211,7 @@ func TestSubnetworkRegistry(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
limit, err := dag.SubnetworkStore.GasLimit(subnetworkID)
|
||||
limit, err := dag.GasLimit(subnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("could not retrieve gas limit: %s", err)
|
||||
}
|
||||
@@ -211,7 +224,7 @@ func TestChainedTransactions(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -219,7 +232,7 @@ func TestChainedTransactions(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block1, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{params.GenesisHash}, nil, false)
|
||||
block1, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{params.GenesisHash}, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -240,38 +253,46 @@ func TestChainedTransactions(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
txIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: domainmessage.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &wire.TxOut{
|
||||
txOut := &domainmessage.TxOut{
|
||||
ScriptPubKey: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
tx := domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{txIn}, []*domainmessage.TxOut{txOut})
|
||||
|
||||
chainedTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *tx.TxID(), Index: 0},
|
||||
chainedTxIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: domainmessage.Outpoint{TxID: *tx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
scriptPubKey, err := txscript.PayToScriptHashScript(blockdag.OpTrueScript)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build public key script: %s", err)
|
||||
}
|
||||
chainedTxOut := &wire.TxOut{
|
||||
chainedTxOut := &domainmessage.TxOut{
|
||||
ScriptPubKey: scriptPubKey,
|
||||
Value: uint64(1),
|
||||
}
|
||||
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
|
||||
chainedTx := domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{chainedTxIn}, []*domainmessage.TxOut{chainedTxOut})
|
||||
|
||||
block2, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true)
|
||||
block2, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*domainmessage.MsgTx{tx}, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Manually add a chained transaction to block2
|
||||
block2.Transactions = append(block2.Transactions, chainedTx)
|
||||
block2UtilTxs := make([]*util.Tx, len(block2.Transactions))
|
||||
for i, tx := range block2.Transactions {
|
||||
block2UtilTxs[i] = util.NewTx(tx)
|
||||
}
|
||||
block2.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block2UtilTxs).Root()
|
||||
|
||||
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
@@ -294,18 +315,18 @@ func TestChainedTransactions(t *testing.T) {
|
||||
t.Errorf("ProcessBlock: block2 got unexpectedly orphaned")
|
||||
}
|
||||
|
||||
nonChainedTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
nonChainedTxIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: domainmessage.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
}
|
||||
nonChainedTxOut := &wire.TxOut{
|
||||
nonChainedTxOut := &domainmessage.TxOut{
|
||||
ScriptPubKey: scriptPubKey,
|
||||
Value: uint64(1),
|
||||
}
|
||||
nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut})
|
||||
nonChainedTx := domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{nonChainedTxIn}, []*domainmessage.TxOut{nonChainedTxOut})
|
||||
|
||||
block3, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false)
|
||||
block3, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*domainmessage.MsgTx{nonChainedTx}, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -331,7 +352,7 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = math.MaxUint8
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -342,27 +363,27 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
|
||||
|
||||
createBlock := func(previousBlock *util.Block) *util.Block {
|
||||
// Prepare a transaction that spends the previous block's coinbase transaction
|
||||
var txs []*wire.MsgTx
|
||||
var txs []*domainmessage.MsgTx
|
||||
if !previousBlock.IsGenesis() {
|
||||
previousCoinbaseTx := previousBlock.MsgBlock().Transactions[0]
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *previousCoinbaseTx.TxID(), Index: 0},
|
||||
txIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: domainmessage.Outpoint{TxID: *previousCoinbaseTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &wire.TxOut{
|
||||
txOut := &domainmessage.TxOut{
|
||||
ScriptPubKey: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
txs = append(txs, wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}))
|
||||
txs = append(txs, domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{txIn}, []*domainmessage.TxOut{txOut}))
|
||||
}
|
||||
|
||||
// Create the block
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{previousBlock.Hash()}, txs, false)
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{previousBlock.Hash()}, txs, false)
|
||||
if err != nil {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %s", err)
|
||||
}
|
||||
@@ -401,7 +422,8 @@ func TestGasLimit(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
params.EnableNonNativeSubnetworks = true
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -416,9 +438,9 @@ func TestGasLimit(t *testing.T) {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
|
||||
cbTxs := []*wire.MsgTx{}
|
||||
cbTxs := []*domainmessage.MsgTx{}
|
||||
for i := 0; i < 4; i++ {
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), nil, false)
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -447,30 +469,30 @@ func TestGasLimit(t *testing.T) {
|
||||
t.Fatalf("Failed to build public key script: %s", err)
|
||||
}
|
||||
|
||||
tx1In := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[0].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
tx1In := &domainmessage.TxIn{
|
||||
PreviousOutpoint: *domainmessage.NewOutpoint(cbTxs[0].TxID(), 0),
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
tx1Out := &wire.TxOut{
|
||||
tx1Out := &domainmessage.TxOut{
|
||||
Value: cbTxs[0].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
tx1 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx1In}, []*wire.TxOut{tx1Out}, subnetworkID, 10000, []byte{})
|
||||
tx1 := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{tx1In}, []*domainmessage.TxOut{tx1Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
tx2In := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[1].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
tx2In := &domainmessage.TxIn{
|
||||
PreviousOutpoint: *domainmessage.NewOutpoint(cbTxs[1].TxID(), 0),
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
tx2Out := &wire.TxOut{
|
||||
tx2Out := &domainmessage.TxOut{
|
||||
Value: cbTxs[1].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
tx2 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx2In}, []*wire.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
|
||||
tx2 := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{tx2In}, []*domainmessage.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that has transactions that exceed the gas limit
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true)
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*domainmessage.MsgTx{tx1, tx2}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -492,20 +514,20 @@ func TestGasLimit(t *testing.T) {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
overflowGasTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[2].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
overflowGasTxIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: *domainmessage.NewOutpoint(cbTxs[2].TxID(), 0),
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
overflowGasTxOut := &wire.TxOut{
|
||||
overflowGasTxOut := &domainmessage.TxOut{
|
||||
Value: cbTxs[2].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
overflowGasTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{overflowGasTxIn}, []*wire.TxOut{overflowGasTxOut},
|
||||
overflowGasTx := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{overflowGasTxIn}, []*domainmessage.TxOut{overflowGasTxOut},
|
||||
subnetworkID, math.MaxUint64, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that its transactions' gas overflows uint64
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true)
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*domainmessage.MsgTx{tx1, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -527,19 +549,19 @@ func TestGasLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
nonExistentSubnetwork := &subnetworkid.SubnetworkID{123}
|
||||
nonExistentSubnetworkTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[3].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
nonExistentSubnetworkTxIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: *domainmessage.NewOutpoint(cbTxs[3].TxID(), 0),
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
nonExistentSubnetworkTxOut := &wire.TxOut{
|
||||
nonExistentSubnetworkTxOut := &domainmessage.TxOut{
|
||||
Value: cbTxs[3].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
nonExistentSubnetworkTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{nonExistentSubnetworkTxIn},
|
||||
[]*wire.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
|
||||
nonExistentSubnetworkTx := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{nonExistentSubnetworkTxIn},
|
||||
[]*domainmessage.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
|
||||
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*domainmessage.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -548,7 +570,7 @@ func TestGasLimit(t *testing.T) {
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
|
||||
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
|
||||
nonExistentSubnetwork, nonExistentSubnetwork)
|
||||
if err.Error() != expectedErrStr {
|
||||
if strings.Contains(err.Error(), expectedErrStr) {
|
||||
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
|
||||
}
|
||||
if isDelayed {
|
||||
@@ -560,7 +582,7 @@ func TestGasLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*domainmessage.MsgTx{tx1}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
// newNode is always in the future of blueCandidate, so there's
|
||||
// no point in checking it.
|
||||
if chainBlock != newNode {
|
||||
if isAncestorOfBlueCandidate, err := dag.isAncestorOf(chainBlock, blueCandidate); err != nil {
|
||||
if isAncestorOfBlueCandidate, err := dag.isInPast(chainBlock, blueCandidate); err != nil {
|
||||
return nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
break
|
||||
@@ -66,7 +66,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
|
||||
for _, block := range chainBlock.blues {
|
||||
// Skip blocks that exist in the past of blueCandidate.
|
||||
if isAncestorOfBlueCandidate, err := dag.isAncestorOf(block, blueCandidate); err != nil {
|
||||
if isAncestorOfBlueCandidate, err := dag.isInPast(block, blueCandidate); err != nil {
|
||||
return nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
continue
|
||||
@@ -78,13 +78,13 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
}
|
||||
candidateAnticoneSize++
|
||||
|
||||
if candidateAnticoneSize > dag.dagParams.K {
|
||||
if candidateAnticoneSize > dag.Params.K {
|
||||
// k-cluster violation: The candidate's blue anticone exceeded k
|
||||
possiblyBlue = false
|
||||
break
|
||||
}
|
||||
|
||||
if candidateBluesAnticoneSizes[block] == dag.dagParams.K {
|
||||
if candidateBluesAnticoneSizes[block] == dag.Params.K {
|
||||
// k-cluster violation: A block in candidate's blue anticone already
|
||||
// has k blue blocks in its own anticone
|
||||
possiblyBlue = false
|
||||
@@ -93,7 +93,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
|
||||
// This is a sanity check that validates that a blue
|
||||
// block's blue anticone is not already larger than K.
|
||||
if candidateBluesAnticoneSizes[block] > dag.dagParams.K {
|
||||
if candidateBluesAnticoneSizes[block] > dag.Params.K {
|
||||
return nil, errors.New("found blue anticone size larger than k")
|
||||
}
|
||||
}
|
||||
@@ -109,7 +109,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
|
||||
// The maximum length of node.blues can be K+1 because
|
||||
// it contains the selected parent.
|
||||
if dagconfig.KType(len(newNode.blues)) == dag.dagParams.K+1 {
|
||||
if dagconfig.KType(len(newNode.blues)) == dag.Params.K+1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -148,7 +148,7 @@ func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, erro
|
||||
if anticoneSet.contains(parent) || selectedParentPast.contains(parent) {
|
||||
continue
|
||||
}
|
||||
isAncestorOfSelectedParent, err := dag.isAncestorOf(parent, node.selectedParent)
|
||||
isAncestorOfSelectedParent, err := dag.isInPast(parent, node.selectedParent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -2,14 +2,15 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
type testBlockData struct {
|
||||
@@ -33,7 +34,7 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
k: 3,
|
||||
expectedReds: []string{"F", "G", "H", "I", "O", "P"},
|
||||
expectedReds: []string{"F", "G", "H", "I", "N", "P"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
@@ -166,7 +167,7 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
id: "T",
|
||||
expectedScore: 13,
|
||||
expectedSelectedParent: "S",
|
||||
expectedBlues: []string{"S", "N", "Q"},
|
||||
expectedBlues: []string{"S", "O", "Q"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -176,7 +177,7 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
func() {
|
||||
resetExtraNonceForTest()
|
||||
dagParams.K = test.k
|
||||
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), Config{
|
||||
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), true, Config{
|
||||
DAGParams: &dagParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -215,7 +216,10 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
t.Fatalf("TestGHOSTDAG: block %v was unexpectedly orphan", blockData.id)
|
||||
}
|
||||
|
||||
node := dag.index.LookupNode(utilBlock.Hash())
|
||||
node, ok := dag.index.LookupNode(utilBlock.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", utilBlock.Hash())
|
||||
}
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
@@ -282,7 +286,7 @@ func checkReds(expectedReds []string, reds map[string]bool) bool {
|
||||
|
||||
func TestBlueAnticoneSizeErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -291,22 +295,29 @@ func TestBlueAnticoneSizeErrors(t *testing.T) {
|
||||
defer teardownFunc()
|
||||
|
||||
// Prepare a block chain with size K beginning with the genesis block
|
||||
currentBlockA := dag.dagParams.GenesisBlock
|
||||
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
|
||||
newBlock := prepareAndProcessBlock(t, dag, currentBlockA)
|
||||
currentBlockA := dag.Params.GenesisBlock
|
||||
for i := dagconfig.KType(0); i < dag.Params.K; i++ {
|
||||
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockA)
|
||||
currentBlockA = newBlock
|
||||
}
|
||||
|
||||
// Prepare another block chain with size K beginning with the genesis block
|
||||
currentBlockB := dag.dagParams.GenesisBlock
|
||||
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
|
||||
newBlock := prepareAndProcessBlock(t, dag, currentBlockB)
|
||||
currentBlockB := dag.Params.GenesisBlock
|
||||
for i := dagconfig.KType(0); i < dag.Params.K; i++ {
|
||||
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockB)
|
||||
currentBlockB = newBlock
|
||||
}
|
||||
|
||||
// Get references to the tips of the two chains
|
||||
blockNodeA := dag.index.LookupNode(currentBlockA.BlockHash())
|
||||
blockNodeB := dag.index.LookupNode(currentBlockB.BlockHash())
|
||||
blockNodeA, ok := dag.index.LookupNode(currentBlockA.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", currentBlockA.BlockHash())
|
||||
}
|
||||
|
||||
blockNodeB, ok := dag.index.LookupNode(currentBlockB.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", currentBlockB.BlockHash())
|
||||
}
|
||||
|
||||
// Try getting the blueAnticoneSize between them. Since the two
|
||||
// blocks are not in the anticones of eachother, this should fail.
|
||||
@@ -323,7 +334,7 @@ func TestBlueAnticoneSizeErrors(t *testing.T) {
|
||||
|
||||
func TestGHOSTDAGErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -332,37 +343,42 @@ func TestGHOSTDAGErrors(t *testing.T) {
|
||||
defer teardownFunc()
|
||||
|
||||
// Add two child blocks to the genesis
|
||||
block1 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
|
||||
block2 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
|
||||
block1 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
|
||||
block2 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
|
||||
|
||||
// Add a child block to the previous two blocks
|
||||
block3 := prepareAndProcessBlock(t, dag, block1, block2)
|
||||
block3 := prepareAndProcessBlockByParentMsgBlocks(t, dag, block1, block2)
|
||||
|
||||
// Clear the reachability store
|
||||
dag.reachabilityStore.loaded = map[daghash.Hash]*reachabilityData{}
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
|
||||
cursor := bucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
err := bucket.Delete(cursor.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
dag.reachabilityTree.store.loaded = map[daghash.Hash]*reachabilityData{}
|
||||
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAGErrors: db.Update failed: %s", err)
|
||||
t.Fatalf("NewTx: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
err = dbaccess.ClearReachabilityData(dbTx)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearReachabilityData: %s", err)
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Commit: %s", err)
|
||||
}
|
||||
|
||||
// Try to rerun GHOSTDAG on the last block. GHOSTDAG uses
|
||||
// reachability data, so we expect it to fail.
|
||||
blockNode3 := dag.index.LookupNode(block3.BlockHash())
|
||||
blockNode3, ok := dag.index.LookupNode(block3.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", block3.BlockHash())
|
||||
}
|
||||
_, err = dag.ghostdag(blockNode3)
|
||||
if err == nil {
|
||||
t.Fatalf("TestGHOSTDAGErrors: ghostdag unexpectedly succeeded")
|
||||
}
|
||||
expectedErrSubstring := "Couldn't find reachability data"
|
||||
expectedErrSubstring := "couldn't find reachability data"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestGHOSTDAGErrors: ghostdag returned wrong error. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
|
||||
@@ -3,31 +3,20 @@ package indexers
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// acceptanceIndexName is the human-readable name for the index.
|
||||
acceptanceIndexName = "acceptance index"
|
||||
)
|
||||
|
||||
var (
|
||||
// acceptanceIndexKey is the key of the acceptance index and the db bucket used
|
||||
// to house it.
|
||||
acceptanceIndexKey = []byte("acceptanceidx")
|
||||
)
|
||||
|
||||
// AcceptanceIndex implements a txAcceptanceData by block hash index. That is to say,
|
||||
// it stores a mapping between a block's hash and the set of transactions that the
|
||||
// block accepts among its blue blocks.
|
||||
type AcceptanceIndex struct {
|
||||
db database.DB
|
||||
dag *blockdag.BlockDAG
|
||||
dag *blockdag.BlockDAG
|
||||
databaseContext *dbaccess.DatabaseContext
|
||||
}
|
||||
|
||||
// Ensure the AcceptanceIndex type implements the Indexer interface.
|
||||
@@ -43,127 +32,88 @@ func NewAcceptanceIndex() *AcceptanceIndex {
|
||||
return &AcceptanceIndex{}
|
||||
}
|
||||
|
||||
// DropAcceptanceIndex drops the acceptance index from the provided database if it
|
||||
// exists.
|
||||
func DropAcceptanceIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, acceptanceIndexKey, acceptanceIndexName, interrupt)
|
||||
}
|
||||
// DropAcceptanceIndex drops the acceptance index.
|
||||
func DropAcceptanceIndex(databaseContext *dbaccess.DatabaseContext) error {
|
||||
dbTx, err := databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Key() []byte {
|
||||
return acceptanceIndexKey
|
||||
}
|
||||
err = dbaccess.DropAcceptanceIndex(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Name() string {
|
||||
return acceptanceIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the bucket for the
|
||||
// acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Create(dbTx database.Tx) error {
|
||||
_, err := dbTx.Metadata().CreateBucket(acceptanceIndexKey)
|
||||
return err
|
||||
return dbTx.Commit()
|
||||
}
|
||||
|
||||
// Init initializes the hash-based acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
func (idx *AcceptanceIndex) Init(dag *blockdag.BlockDAG, databaseContext *dbaccess.DatabaseContext) error {
|
||||
idx.dag = dag
|
||||
return nil
|
||||
idx.databaseContext = databaseContext
|
||||
return idx.recover()
|
||||
}
|
||||
|
||||
// recover attempts to insert any data that's missing from the
|
||||
// acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) recover() error {
|
||||
return idx.dag.ForEachHash(func(hash daghash.Hash) error {
|
||||
dbTx, err := idx.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
exists, err := dbaccess.HasAcceptanceData(dbTx, &hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(&hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = idx.ConnectBlock(dbTx, &hash, txAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbTx.Commit()
|
||||
})
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the DAG.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) ConnectBlock(dbTx database.Tx, _ *util.Block, blockID uint64, _ *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
return dbPutTxsAcceptanceData(dbTx, blockID, txsAcceptanceData)
|
||||
}
|
||||
|
||||
// TxsAcceptanceData returns the acceptance data of all the transactions that
|
||||
// were accepted by the block with hash blockHash.
|
||||
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
var txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
txsAcceptanceData, err = dbFetchTxsAcceptanceDataByHash(dbTx, blockHash)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return txsAcceptanceData, nil
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
for blockID := currentBlockID + 1; blockID <= lastKnownBlockID; blockID++ {
|
||||
hash, err := blockdag.DBFetchBlockHashByID(dbTx, currentBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = idx.ConnectBlock(dbTx, nil, blockID, nil, txAcceptanceData, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dbPutTxsAcceptanceData(dbTx database.Tx, blockID uint64,
|
||||
func (idx *AcceptanceIndex) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedTxsAcceptanceData)
|
||||
return dbaccess.StoreAcceptanceData(dbContext, blockHash, serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByHash(dbTx database.Tx,
|
||||
hash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
|
||||
blockID, err := blockdag.DBFetchBlockIDByHash(dbTx, hash)
|
||||
// TxsAcceptanceData returns the acceptance data of all the transactions that
|
||||
// were accepted by the block with hash blockHash.
|
||||
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
serializedTxsAcceptanceData, err := dbaccess.FetchAcceptanceData(idx.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dbFetchTxsAcceptanceDataByID(dbTx, blockID)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByID(dbTx database.Tx,
|
||||
blockID uint64) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
serializedTxsAcceptanceData := bucket.Get(serializedBlockID)
|
||||
if serializedTxsAcceptanceData == nil {
|
||||
return nil, errors.Errorf("no entry in the accpetance index for block id %d", blockID)
|
||||
}
|
||||
|
||||
return deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
type serializableTxAcceptanceData struct {
|
||||
MsgTx wire.MsgTx
|
||||
MsgTx domainmessage.MsgTx
|
||||
IsAccepted bool
|
||||
}
|
||||
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -15,24 +8,32 @@ import (
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestAcceptanceIndexSerializationAndDeserialization(t *testing.T) {
|
||||
// Create test data
|
||||
hash, _ := daghash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
txIn1 := &wire.TxIn{SignatureScript: []byte{1}, PreviousOutpoint: wire.Outpoint{Index: 1}, Sequence: 0}
|
||||
txIn2 := &wire.TxIn{SignatureScript: []byte{2}, PreviousOutpoint: wire.Outpoint{Index: 2}, Sequence: 0}
|
||||
txOut1 := &wire.TxOut{ScriptPubKey: []byte{1}, Value: 10}
|
||||
txOut2 := &wire.TxOut{ScriptPubKey: []byte{2}, Value: 20}
|
||||
txIn1 := &domainmessage.TxIn{SignatureScript: []byte{1}, PreviousOutpoint: domainmessage.Outpoint{Index: 1}, Sequence: 0}
|
||||
txIn2 := &domainmessage.TxIn{SignatureScript: []byte{2}, PreviousOutpoint: domainmessage.Outpoint{Index: 2}, Sequence: 0}
|
||||
txOut1 := &domainmessage.TxOut{ScriptPubKey: []byte{1}, Value: 10}
|
||||
txOut2 := &domainmessage.TxOut{ScriptPubKey: []byte{2}, Value: 20}
|
||||
blockTxsAcceptanceData := blockdag.BlockTxsAcceptanceData{
|
||||
BlockHash: *hash,
|
||||
TxAcceptanceData: []blockdag.TxAcceptanceData{
|
||||
{
|
||||
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn1}, []*wire.TxOut{txOut1})),
|
||||
Tx: util.NewTx(domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{txIn1}, []*domainmessage.TxOut{txOut1})),
|
||||
IsAccepted: true,
|
||||
},
|
||||
{
|
||||
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn2}, []*wire.TxOut{txOut2})),
|
||||
Tx: util.NewTx(domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{txIn2}, []*domainmessage.TxOut{txOut2})),
|
||||
IsAccepted: false,
|
||||
},
|
||||
},
|
||||
@@ -96,18 +97,18 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(db1Path)
|
||||
|
||||
db1, err := database.Create("ffldb", db1Path, params.Net)
|
||||
databaseContext1, err := dbaccess.New(db1Path)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
db1Config := blockdag.Config{
|
||||
IndexManager: db1IndexManager,
|
||||
DAGParams: params,
|
||||
DB: db1,
|
||||
IndexManager: db1IndexManager,
|
||||
DAGParams: params,
|
||||
DatabaseContext: databaseContext1,
|
||||
}
|
||||
|
||||
db1DAG, teardown, err := blockdag.DAGSetup("", db1Config)
|
||||
db1DAG, teardown, err := blockdag.DAGSetup("", false, db1Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
@@ -130,11 +131,6 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
err = db1.FlushCache()
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing database to disk: %s", err)
|
||||
}
|
||||
|
||||
db2Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover2")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
@@ -166,17 +162,21 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
t.Fatalf("Error fetching acceptance data: %s", err)
|
||||
}
|
||||
|
||||
db2, err := database.Open("ffldb", db2Path, params.Net)
|
||||
err = databaseContext1.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Error opening database: %s", err)
|
||||
t.Fatalf("Error closing the database: %s", err)
|
||||
}
|
||||
databaseContext2, err := dbaccess.New(db2Path)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
db2Config := blockdag.Config{
|
||||
DAGParams: params,
|
||||
DB: db2,
|
||||
DAGParams: params,
|
||||
DatabaseContext: databaseContext2,
|
||||
}
|
||||
|
||||
db2DAG, teardown, err := blockdag.DAGSetup("", db2Config)
|
||||
db2DAG, teardown, err := blockdag.DAGSetup("", false, db2Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
@@ -199,10 +199,6 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
err = db2.FlushCache()
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing database to disk: %s", err)
|
||||
}
|
||||
db3Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover3")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
@@ -213,20 +209,24 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
t.Fatalf("copyDirectory: %s", err)
|
||||
}
|
||||
|
||||
db3, err := database.Open("ffldb", db3Path, params.Net)
|
||||
err = databaseContext2.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Error opening database: %s", err)
|
||||
t.Fatalf("Error closing the database: %s", err)
|
||||
}
|
||||
databaseContext3, err := dbaccess.New(db3Path)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
db3AcceptanceIndex := NewAcceptanceIndex()
|
||||
db3IndexManager := NewManager([]Indexer{db3AcceptanceIndex})
|
||||
db3Config := blockdag.Config{
|
||||
IndexManager: db3IndexManager,
|
||||
DAGParams: params,
|
||||
DB: db3,
|
||||
IndexManager: db3IndexManager,
|
||||
DAGParams: params,
|
||||
DatabaseContext: databaseContext3,
|
||||
}
|
||||
|
||||
_, teardown, err = blockdag.DAGSetup("", db3Config)
|
||||
_, teardown, err = blockdag.DAGSetup("", false, db3Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package indexers implements optional block DAG indexes.
|
||||
*/
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// byteOrder is the preferred byte order used for serializing numeric
|
||||
// fields for storage in the database.
|
||||
byteOrder = binary.LittleEndian
|
||||
|
||||
// errInterruptRequested indicates that an operation was cancelled due
|
||||
// to a user-requested interrupt.
|
||||
errInterruptRequested = errors.New("interrupt requested")
|
||||
)
|
||||
|
||||
// NeedsInputser provides a generic interface for an indexer to specify the it
|
||||
// requires the ability to look up inputs for a transaction.
|
||||
type NeedsInputser interface {
|
||||
NeedsInputs() bool
|
||||
}
|
||||
|
||||
// Indexer provides a generic interface for an indexer that is managed by an
|
||||
// index manager such as the Manager type provided by this package.
|
||||
type Indexer interface {
|
||||
// Key returns the key of the index as a byte slice.
|
||||
Key() []byte
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
Name() string
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time.
|
||||
Create(dbTx database.Tx) error
|
||||
|
||||
// Init is invoked when the index manager is first initializing the
|
||||
// index. This differs from the Create method in that it is called on
|
||||
// every load, including the case the index was just created.
|
||||
Init(db database.DB, dag *blockdag.BlockDAG) error
|
||||
|
||||
// ConnectBlock is invoked when the index manager is notified that a new
|
||||
// block has been connected to the DAG.
|
||||
ConnectBlock(dbTx database.Tx,
|
||||
block *util.Block,
|
||||
blockID uint64,
|
||||
dag *blockdag.BlockDAG,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData,
|
||||
virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error
|
||||
}
|
||||
|
||||
// AssertError identifies an error that indicates an internal code consistency
|
||||
// issue and should be treated as a critical and unrecoverable error.
|
||||
type AssertError string
|
||||
|
||||
// Error returns the assertion error as a huma-readable string and satisfies
|
||||
// the error interface.
|
||||
func (e AssertError) Error() string {
|
||||
return "assertion failed: " + string(e)
|
||||
}
|
||||
|
||||
// errDeserialize signifies that a problem was encountered when deserializing
|
||||
// data.
|
||||
type errDeserialize string
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e errDeserialize) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// isDeserializeErr returns whether or not the passed error is an errDeserialize
|
||||
// error.
|
||||
func isDeserializeErr(err error) bool {
|
||||
var deserializeErr errDeserialize
|
||||
return errors.As(err, &deserializeErr)
|
||||
}
|
||||
|
||||
// internalBucket is an abstraction over a database bucket. It is used to make
|
||||
// the code easier to test since it allows mock objects in the tests to only
|
||||
// implement these functions instead of everything a database.Bucket supports.
|
||||
type internalBucket interface {
|
||||
Get(key []byte) []byte
|
||||
Put(key []byte, value []byte) error
|
||||
Delete(key []byte) error
|
||||
}
|
||||
|
||||
// interruptRequested returns true when the provided channel has been closed.
|
||||
// This simplifies early shutdown slightly since the caller can just use an if
|
||||
// statement instead of a select.
|
||||
func interruptRequested(interrupted <-chan struct{}) bool {
|
||||
select {
|
||||
case <-interrupted:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
28
blockdag/indexers/indexer.go
Normal file
28
blockdag/indexers/indexer.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package indexers implements optional block DAG indexes.
|
||||
*/
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// Indexer provides a generic interface for an indexer that is managed by an
|
||||
// index manager such as the Manager type provided by this package.
|
||||
type Indexer interface {
|
||||
// Init is invoked when the index manager is first initializing the
|
||||
// index.
|
||||
Init(dag *blockdag.BlockDAG, databaseContext *dbaccess.DatabaseContext) error
|
||||
|
||||
// ConnectBlock is invoked when the index manager is notified that a new
|
||||
// block has been connected to the DAG.
|
||||
ConnectBlock(dbContext *dbaccess.TxContext,
|
||||
blockHash *daghash.Hash,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData) error
|
||||
}
|
||||
@@ -6,190 +6,30 @@ package indexers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
var (
|
||||
// indexTipsBucketName is the name of the db bucket used to house the
|
||||
// current tip of each index.
|
||||
indexTipsBucketName = []byte("idxtips")
|
||||
|
||||
indexCurrentBlockIDBucketName = []byte("idxcurrentblockid")
|
||||
)
|
||||
|
||||
// Manager defines an index manager that manages multiple optional indexes and
|
||||
// implements the blockdag.IndexManager interface so it can be seamlessly
|
||||
// plugged into normal DAG processing.
|
||||
type Manager struct {
|
||||
db database.DB
|
||||
enabledIndexes []Indexer
|
||||
}
|
||||
|
||||
// Ensure the Manager type implements the blockdag.IndexManager interface.
|
||||
var _ blockdag.IndexManager = (*Manager)(nil)
|
||||
|
||||
// indexDropKey returns the key for an index which indicates it is in the
|
||||
// process of being dropped.
|
||||
func indexDropKey(idxKey []byte) []byte {
|
||||
dropKey := make([]byte, len(idxKey)+1)
|
||||
dropKey[0] = 'd'
|
||||
copy(dropKey[1:], idxKey)
|
||||
return dropKey
|
||||
}
|
||||
|
||||
// maybeFinishDrops determines if each of the enabled indexes are in the middle
|
||||
// of being dropped and finishes dropping them when the are. This is necessary
|
||||
// because dropping and index has to be done in several atomic steps rather than
|
||||
// one big atomic step due to the massive number of entries.
|
||||
func (m *Manager) maybeFinishDrops(interrupt <-chan struct{}) error {
|
||||
indexNeedsDrop := make([]bool, len(m.enabledIndexes))
|
||||
err := m.db.View(func(dbTx database.Tx) error {
|
||||
// None of the indexes needs to be dropped if the index tips
|
||||
// bucket hasn't been created yet.
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
if indexesBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark the indexer as requiring a drop if one is already in
|
||||
// progress.
|
||||
for i, indexer := range m.enabledIndexes {
|
||||
dropKey := indexDropKey(indexer.Key())
|
||||
if indexesBucket.Get(dropKey) != nil {
|
||||
indexNeedsDrop[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
// Finish dropping any of the enabled indexes that are already in the
|
||||
// middle of being dropped.
|
||||
for i, indexer := range m.enabledIndexes {
|
||||
if !indexNeedsDrop[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infof("Resuming %s drop", indexer.Name())
|
||||
err := dropIndex(m.db, indexer.Key(), indexer.Name(), interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// maybeCreateIndexes determines if each of the enabled indexes have already
|
||||
// been created and creates them if not.
|
||||
func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
// Nothing to do if the index tip already exists.
|
||||
idxKey := indexer.Key()
|
||||
if indexesBucket.Get(idxKey) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// The tip for the index does not exist, so create it and
|
||||
// invoke the create callback for the index so it can perform
|
||||
// any one-time initialization it requires.
|
||||
if err := indexer.Create(dbTx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO (Mike): this is temporary solution to prevent node from not starting
|
||||
// because it thinks indexers are not initialized.
|
||||
// Indexers, however, do not work properly, and a general solution to their work operation is required
|
||||
indexesBucket.Put(idxKey, []byte{0})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init initializes the enabled indexes. This is called during DAG
|
||||
// initialization and primarily consists of catching up all indexes to the
|
||||
// current tips. This is necessary since each index can be disabled
|
||||
// and re-enabled at any time and attempting to catch-up indexes at the same
|
||||
// time new blocks are being downloaded would lead to an overall longer time to
|
||||
// catch up due to the I/O contention.
|
||||
//
|
||||
// Init initializes the enabled indexes.
|
||||
// This is part of the blockdag.IndexManager interface.
|
||||
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
|
||||
// Nothing to do when no indexes are enabled.
|
||||
if len(m.enabledIndexes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
m.db = db
|
||||
|
||||
// Finish and drops that were previously interrupted.
|
||||
if err := m.maybeFinishDrops(interrupt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the initial state for the indexes as needed.
|
||||
err := m.db.Update(func(dbTx database.Tx) error {
|
||||
// Create the bucket for the current tips as needed.
|
||||
meta := dbTx.Metadata()
|
||||
_, err := meta.CreateBucketIfNotExists(indexTipsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucketIfNotExists(indexCurrentBlockIDBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.maybeCreateIndexes(dbTx)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize each of the enabled indexes.
|
||||
func (m *Manager) Init(dag *blockdag.BlockDAG, databaseContext *dbaccess.DatabaseContext) error {
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
if err := indexer.Init(db, blockDAG); err != nil {
|
||||
if err := indexer.Init(dag, databaseContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return m.recoverIfNeeded()
|
||||
}
|
||||
|
||||
// recoverIfNeeded checks if the node worked for some time
|
||||
// without one of the current enabled indexes, and if it's
|
||||
// the case, recovers the missing blocks from the index.
|
||||
func (m *Manager) recoverIfNeeded() error {
|
||||
return m.db.Update(func(dbTx database.Tx) error {
|
||||
lastKnownBlockID := blockdag.DBFetchCurrentBlockID(dbTx)
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
serializedCurrentIdxBlockID := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Get(indexer.Key())
|
||||
currentIdxBlockID := uint64(0)
|
||||
if serializedCurrentIdxBlockID != nil {
|
||||
currentIdxBlockID = blockdag.DeserializeBlockID(serializedCurrentIdxBlockID)
|
||||
}
|
||||
if lastKnownBlockID > currentIdxBlockID {
|
||||
err := indexer.Recover(dbTx, currentIdxBlockID, lastKnownBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectBlock must be invoked when a block is added to the DAG. It
|
||||
@@ -197,32 +37,13 @@ func (m *Manager) recoverIfNeeded() error {
|
||||
// checks, and invokes each indexer.
|
||||
//
|
||||
// This is part of the blockdag.IndexManager interface.
|
||||
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
func (m *Manager) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
// Call each of the currently active optional indexes with the block
|
||||
// being connected so they can update accordingly.
|
||||
for _, index := range m.enabledIndexes {
|
||||
// Notify the indexer with the connected block so it can index it.
|
||||
if err := index.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new block ID index entry for the block being connected and
|
||||
// update the current internal block ID accordingly.
|
||||
err := m.updateIndexersWithCurrentBlockID(dbTx, block.Hash(), blockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *daghash.Hash, blockID uint64) error {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
for _, index := range m.enabledIndexes {
|
||||
err := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Put(index.Key(), serializedBlockID)
|
||||
if err != nil {
|
||||
if err := index.ConnectBlock(dbContext, blockHash, txsAcceptanceData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -238,155 +59,3 @@ func NewManager(enabledIndexes []Indexer) *Manager {
|
||||
enabledIndexes: enabledIndexes,
|
||||
}
|
||||
}
|
||||
|
||||
// dropIndex drops the passed index from the database. Since indexes can be
|
||||
// massive, it deletes the index in multiple database transactions in order to
|
||||
// keep memory usage to reasonable levels. It also marks the drop in progress
|
||||
// so the drop can be resumed if it is stopped before it is done before the
|
||||
// index can be used again.
|
||||
func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan struct{}) error {
|
||||
// Nothing to do if the index doesn't already exist.
|
||||
var needsDelete bool
|
||||
err := db.View(func(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
if indexesBucket != nil && indexesBucket.Get(idxKey) != nil {
|
||||
needsDelete = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !needsDelete {
|
||||
log.Infof("Not dropping %s because it does not exist", idxName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark that the index is in the process of being dropped so that it
|
||||
// can be resumed on the next start if interrupted before the process is
|
||||
// complete.
|
||||
log.Infof("Dropping all %s entries. This might take a while...",
|
||||
idxName)
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
return indexesBucket.Put(indexDropKey(idxKey), idxKey)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since the indexes can be so large, attempting to simply delete
|
||||
// the bucket in a single database transaction would result in massive
|
||||
// memory usage and likely crash many systems due to ulimits. In order
|
||||
// to avoid this, use a cursor to delete a maximum number of entries out
|
||||
// of the bucket at a time. Recurse buckets depth-first to delete any
|
||||
// sub-buckets.
|
||||
const maxDeletions = 2000000
|
||||
var totalDeleted uint64
|
||||
|
||||
// Recurse through all buckets in the index, cataloging each for
|
||||
// later deletion.
|
||||
var subBuckets [][][]byte
|
||||
var subBucketClosure func(database.Tx, []byte, [][]byte) error
|
||||
subBucketClosure = func(dbTx database.Tx,
|
||||
subBucket []byte, tlBucket [][]byte) error {
|
||||
// Get full bucket name and append to subBuckets for later
|
||||
// deletion.
|
||||
var bucketName [][]byte
|
||||
if (tlBucket == nil) || (len(tlBucket) == 0) {
|
||||
bucketName = append(bucketName, subBucket)
|
||||
} else {
|
||||
bucketName = append(tlBucket, subBucket)
|
||||
}
|
||||
subBuckets = append(subBuckets, bucketName)
|
||||
// Recurse sub-buckets to append to subBuckets slice.
|
||||
bucket := dbTx.Metadata()
|
||||
for _, subBucketName := range bucketName {
|
||||
bucket = bucket.Bucket(subBucketName)
|
||||
}
|
||||
return bucket.ForEachBucket(func(k []byte) error {
|
||||
return subBucketClosure(dbTx, k, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// Call subBucketClosure with top-level bucket.
|
||||
err = db.View(func(dbTx database.Tx) error {
|
||||
return subBucketClosure(dbTx, idxKey, nil)
|
||||
})
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterate through each sub-bucket in reverse, deepest-first, deleting
|
||||
// all keys inside them and then dropping the buckets themselves.
|
||||
for i := range subBuckets {
|
||||
bucketName := subBuckets[len(subBuckets)-1-i]
|
||||
// Delete maxDeletions key/value pairs at a time.
|
||||
for numDeleted := maxDeletions; numDeleted == maxDeletions; {
|
||||
numDeleted = 0
|
||||
err := db.Update(func(dbTx database.Tx) error {
|
||||
subBucket := dbTx.Metadata()
|
||||
for _, subBucketName := range bucketName {
|
||||
subBucket = subBucket.Bucket(subBucketName)
|
||||
}
|
||||
cursor := subBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() &&
|
||||
numDeleted < maxDeletions {
|
||||
|
||||
if err := cursor.Delete(); err != nil {
|
||||
return err
|
||||
}
|
||||
numDeleted++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numDeleted > 0 {
|
||||
totalDeleted += uint64(numDeleted)
|
||||
log.Infof("Deleted %d keys (%d total) from %s",
|
||||
numDeleted, totalDeleted, idxName)
|
||||
}
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
// Drop the bucket itself.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata()
|
||||
for j := 0; j < len(bucketName)-1; j++ {
|
||||
bucket = bucket.Bucket(bucketName[j])
|
||||
}
|
||||
return bucket.DeleteBucket(bucketName[len(bucketName)-1])
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the index tip, index bucket, and in-progress drop flag now
|
||||
// that all index entries have been removed.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
indexesBucket := meta.Bucket(indexTipsBucketName)
|
||||
if err := indexesBucket.Delete(idxKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := meta.Bucket(indexCurrentBlockIDBucketName).Delete(idxKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return indexesBucket.Delete(indexDropKey(idxKey))
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Dropped %s", idxName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,206 +0,0 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxAllowedOffsetSeconds is the maximum number of seconds in either
|
||||
// direction that local clock will be adjusted. When the median time
|
||||
// of the network is outside of this range, no offset will be applied.
|
||||
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
|
||||
|
||||
// similarTimeSecs is the number of seconds in either direction from the
|
||||
// local clock that is used to determine that it is likley wrong and
|
||||
// hence to show a warning.
|
||||
similarTimeSecs = 5 * 60 // 5 minutes
|
||||
)
|
||||
|
||||
var (
|
||||
// maxMedianTimeEntries is the maximum number of entries allowed in the
|
||||
// median time data. This is a variable as opposed to a constant so the
|
||||
// test code can modify it.
|
||||
maxMedianTimeEntries = 200
|
||||
)
|
||||
|
||||
// MedianTimeSource provides a mechanism to add several time samples which are
|
||||
// used to determine a median time which is then used as an offset to the local
|
||||
// clock.
|
||||
type MedianTimeSource interface {
|
||||
// AdjustedTime returns the current time adjusted by the median time
|
||||
// offset as calculated from the time samples added by AddTimeSample.
|
||||
AdjustedTime() time.Time
|
||||
|
||||
// AddTimeSample adds a time sample that is used when determining the
|
||||
// median time of the added samples.
|
||||
AddTimeSample(id string, timeVal time.Time)
|
||||
|
||||
// Offset returns the number of seconds to adjust the local clock based
|
||||
// upon the median of the time samples added by AddTimeData.
|
||||
Offset() time.Duration
|
||||
}
|
||||
|
||||
// int64Sorter implements sort.Interface to allow a slice of 64-bit integers to
|
||||
// be sorted.
|
||||
type int64Sorter []int64
|
||||
|
||||
// Len returns the number of 64-bit integers in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s int64Sorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap swaps the 64-bit integers at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s int64Sorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less returns whether the 64-bit integer with index i should sort before the
|
||||
// 64-bit integer with index j. It is part of the sort.Interface
|
||||
// implementation.
|
||||
func (s int64Sorter) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
// medianTime provides an implementation of the MedianTimeSource interface.
|
||||
type medianTime struct {
|
||||
mtx sync.Mutex
|
||||
knownIDs map[string]struct{}
|
||||
offsets []int64
|
||||
offsetSecs int64
|
||||
invalidTimeChecked bool
|
||||
}
|
||||
|
||||
// Ensure the medianTime type implements the MedianTimeSource interface.
|
||||
var _ MedianTimeSource = (*medianTime)(nil)
|
||||
|
||||
// AdjustedTime returns the current time adjusted by the median time offset as
|
||||
// calculated from the time samples added by AddTimeSample.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) AdjustedTime() time.Time {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// Limit the adjusted time to 1 second precision.
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
return now.Add(time.Duration(m.offsetSecs) * time.Second)
|
||||
}
|
||||
|
||||
// AddTimeSample adds a time sample that is used when determining the median
|
||||
// time of the added samples.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// Don't add time data from the same source.
|
||||
if _, exists := m.knownIDs[sourceID]; exists {
|
||||
return
|
||||
}
|
||||
m.knownIDs[sourceID] = struct{}{}
|
||||
|
||||
// Truncate the provided offset to seconds and append it to the slice
|
||||
// of offsets while respecting the maximum number of allowed entries by
|
||||
// replacing the oldest entry with the new entry once the maximum number
|
||||
// of entries is reached.
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
offsetSecs := int64(timeVal.Sub(now).Seconds())
|
||||
numOffsets := len(m.offsets)
|
||||
if numOffsets == maxMedianTimeEntries && maxMedianTimeEntries > 0 {
|
||||
m.offsets = m.offsets[1:]
|
||||
numOffsets--
|
||||
}
|
||||
m.offsets = append(m.offsets, offsetSecs)
|
||||
numOffsets++
|
||||
|
||||
// Sort the offsets so the median can be obtained as needed later.
|
||||
sortedOffsets := make([]int64, numOffsets)
|
||||
copy(sortedOffsets, m.offsets)
|
||||
sort.Sort(int64Sorter(sortedOffsets))
|
||||
|
||||
offsetDuration := time.Duration(offsetSecs) * time.Second
|
||||
log.Debugf("Added time sample of %s (total: %d)", offsetDuration,
|
||||
numOffsets)
|
||||
|
||||
// The median offset is only updated when there are enough offsets and
|
||||
// the number of offsets is odd so the middle value is the true median.
|
||||
// Thus, there is nothing to do when those conditions are not met.
|
||||
if numOffsets < 5 || numOffsets&0x01 != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
// At this point the number of offsets in the list is odd, so the
|
||||
// middle value of the sorted offsets is the median.
|
||||
median := sortedOffsets[numOffsets/2]
|
||||
|
||||
// Set the new offset when the median offset is within the allowed
|
||||
// offset range.
|
||||
if math.Abs(float64(median)) < maxAllowedOffsetSecs {
|
||||
m.offsetSecs = median
|
||||
} else {
|
||||
// The median offset of all added time data is larger than the
|
||||
// maximum allowed offset, so don't use an offset. This
|
||||
// effectively limits how far the local clock can be skewed.
|
||||
m.offsetSecs = 0
|
||||
|
||||
if !m.invalidTimeChecked {
|
||||
m.invalidTimeChecked = true
|
||||
|
||||
// Find if any time samples have a time that is close
|
||||
// to the local time.
|
||||
var remoteHasCloseTime bool
|
||||
for _, offset := range sortedOffsets {
|
||||
if math.Abs(float64(offset)) < similarTimeSecs {
|
||||
remoteHasCloseTime = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Warn if none of the time samples are close.
|
||||
if !remoteHasCloseTime {
|
||||
log.Warnf("Please check your date and time " +
|
||||
"are correct! kaspad will not work " +
|
||||
"properly with an invalid time")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
medianDuration := time.Duration(m.offsetSecs) * time.Second
|
||||
log.Debugf("New time offset: %d", medianDuration)
|
||||
}
|
||||
|
||||
// Offset returns the number of seconds to adjust the local clock based upon the
|
||||
// median of the time samples added by AddTimeData.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) Offset() time.Duration {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
return time.Duration(m.offsetSecs) * time.Second
|
||||
}
|
||||
|
||||
// NewMedianTime returns a new instance of concurrency-safe implementation of
|
||||
// the MedianTimeSource interface. The returned implementation contains the
|
||||
// rules necessary for proper time handling in the DAG consensus rules and
|
||||
// expects the time samples to be added from the timestamp field of the version
|
||||
// message received from remote peers that successfully connect and negotiate.
|
||||
func NewMedianTime() MedianTimeSource {
|
||||
return &medianTime{
|
||||
knownIDs: make(map[string]struct{}),
|
||||
offsets: make([]int64, 0, maxMedianTimeEntries),
|
||||
}
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestMedianTime tests the medianTime implementation.
|
||||
func TestMedianTime(t *testing.T) {
|
||||
tests := []struct {
|
||||
in []int64
|
||||
wantOffset int64
|
||||
useDupID bool
|
||||
}{
|
||||
// Not enough samples must result in an offset of 0.
|
||||
{in: []int64{1}, wantOffset: 0},
|
||||
{in: []int64{1, 2}, wantOffset: 0},
|
||||
{in: []int64{1, 2, 3}, wantOffset: 0},
|
||||
{in: []int64{1, 2, 3, 4}, wantOffset: 0},
|
||||
|
||||
// Various number of entries. The expected offset is only
|
||||
// updated on odd number of elements.
|
||||
{in: []int64{-13, 57, -4, -23, -12}, wantOffset: -12},
|
||||
{in: []int64{55, -13, 61, -52, 39, 55}, wantOffset: 39},
|
||||
{in: []int64{-62, -58, -30, -62, 51, -30, 15}, wantOffset: -30},
|
||||
{in: []int64{29, -47, 39, 54, 42, 41, 8, -33}, wantOffset: 39},
|
||||
{in: []int64{37, 54, 9, -21, -56, -36, 5, -11, -39}, wantOffset: -11},
|
||||
{in: []int64{57, -28, 25, -39, 9, 63, -16, 19, -60, 25}, wantOffset: 9},
|
||||
{in: []int64{-5, -4, -3, -2, -1}, wantOffset: -3, useDupID: true},
|
||||
|
||||
// The offset stops being updated once the max number of entries
|
||||
// has been reached.
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52}, wantOffset: 17},
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45}, wantOffset: 17},
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45, 4}, wantOffset: 17},
|
||||
|
||||
// Offsets that are too far away from the local time should
|
||||
// be ignored.
|
||||
{in: []int64{-4201, 4202, -4203, 4204, -4205}, wantOffset: 0},
|
||||
|
||||
// Exercise the condition where the median offset is greater
|
||||
// than the max allowed adjustment, but there is at least one
|
||||
// sample that is close enough to the current time to avoid
|
||||
// triggering a warning about an invalid local clock.
|
||||
{in: []int64{4201, 4202, 4203, 4204, -299}, wantOffset: 0},
|
||||
}
|
||||
|
||||
// Modify the max number of allowed median time entries for these tests.
|
||||
maxMedianTimeEntries = 10
|
||||
defer func() { maxMedianTimeEntries = 200 }()
|
||||
|
||||
for i, test := range tests {
|
||||
filter := NewMedianTime()
|
||||
for j, offset := range test.in {
|
||||
id := strconv.Itoa(j)
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
tOffset := now.Add(time.Duration(offset) * time.Second)
|
||||
filter.AddTimeSample(id, tOffset)
|
||||
|
||||
// Ensure the duplicate IDs are ignored.
|
||||
if test.useDupID {
|
||||
// Modify the offsets to ensure the final median
|
||||
// would be different if the duplicate is added.
|
||||
tOffset = tOffset.Add(time.Duration(offset) *
|
||||
time.Second)
|
||||
filter.AddTimeSample(id, tOffset)
|
||||
}
|
||||
}
|
||||
|
||||
// Since it is possible that the time.Now call in AddTimeSample
|
||||
// and the time.Now calls here in the tests will be off by one
|
||||
// second, allow a fudge factor to compensate.
|
||||
gotOffset := filter.Offset()
|
||||
wantOffset := time.Duration(test.wantOffset) * time.Second
|
||||
wantOffset2 := time.Duration(test.wantOffset-1) * time.Second
|
||||
if gotOffset != wantOffset && gotOffset != wantOffset2 {
|
||||
t.Errorf("Offset #%d: unexpected offset -- got %v, "+
|
||||
"want %v or %v", i, gotOffset, wantOffset,
|
||||
wantOffset2)
|
||||
continue
|
||||
}
|
||||
|
||||
// Since it is possible that the time.Now call in AdjustedTime
|
||||
// and the time.Now call here in the tests will be off by one
|
||||
// second, allow a fudge factor to compensate.
|
||||
adjustedTime := filter.AdjustedTime()
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
wantTime := now.Add(filter.Offset())
|
||||
wantTime2 := now.Add(filter.Offset() - time.Second)
|
||||
if !adjustedTime.Equal(wantTime) && !adjustedTime.Equal(wantTime2) {
|
||||
t.Errorf("AdjustedTime #%d: unexpected result -- got %v, "+
|
||||
"want %v or %v", i, adjustedTime, wantTime,
|
||||
wantTime2)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,16 +3,22 @@ package blockdag
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"time"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
)
|
||||
|
||||
// BlockForMining returns a block with the given transactions
|
||||
// that points to the current DAG tips, that is valid from
|
||||
// all aspects except proof of work.
|
||||
func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, error) {
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*domainmessage.MsgBlock, error) {
|
||||
blockTimestamp := dag.NextBlockTime()
|
||||
requiredDifficulty := dag.NextRequiredDifficulty(blockTimestamp)
|
||||
|
||||
@@ -29,23 +35,22 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var msgBlock wire.MsgBlock
|
||||
var msgBlock domainmessage.MsgBlock
|
||||
for _, tx := range transactions {
|
||||
msgBlock.AddTransaction(tx.MsgTx())
|
||||
}
|
||||
|
||||
utxoWithTransactions, err := dag.UTXOSet().WithTransactions(msgBlock.Transactions, UnacceptedBlueScore, false)
|
||||
multiset, err := dag.NextBlockMultiset()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoCommitment := utxoWithTransactions.Multiset().Hash()
|
||||
|
||||
msgBlock.Header = wire.BlockHeader{
|
||||
msgBlock.Header = domainmessage.BlockHeader{
|
||||
Version: nextBlockVersion,
|
||||
ParentHashes: dag.TipHashes(),
|
||||
HashMerkleRoot: hashMerkleTree.Root(),
|
||||
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
|
||||
UTXOCommitment: utxoCommitment,
|
||||
UTXOCommitment: (*daghash.Hash)(multiset.Finalize()),
|
||||
Timestamp: blockTimestamp,
|
||||
Bits: requiredDifficulty,
|
||||
}
|
||||
@@ -53,6 +58,19 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
|
||||
return &msgBlock, nil
|
||||
}
|
||||
|
||||
// NextBlockMultiset returns the multiset of an assumed next block
|
||||
// built on top of the current tips.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) NextBlockMultiset() (*secp256k1.MultiSet, error) {
|
||||
_, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dag.virtual.blockNode.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO)
|
||||
}
|
||||
|
||||
// CoinbasePayloadExtraData returns coinbase payload extra data parameter
|
||||
// which is built from extra nonce and coinbase flags.
|
||||
func CoinbasePayloadExtraData(extraNonce uint64, coinbaseFlags string) ([]byte, error) {
|
||||
@@ -88,20 +106,20 @@ func (dag *BlockDAG) NextCoinbaseFromAddress(payToAddress util.Address, extraDat
|
||||
// on the end of the DAG. In particular, it is one second after
|
||||
// the median timestamp of the last several blocks per the DAG consensus
|
||||
// rules.
|
||||
func (dag *BlockDAG) NextBlockMinimumTime() time.Time {
|
||||
return dag.CalcPastMedianTime().Add(time.Second)
|
||||
func (dag *BlockDAG) NextBlockMinimumTime() mstime.Time {
|
||||
return dag.CalcPastMedianTime().Add(time.Millisecond)
|
||||
}
|
||||
|
||||
// NextBlockTime returns a valid block time for the
|
||||
// next block that will point to the existing DAG tips.
|
||||
func (dag *BlockDAG) NextBlockTime() time.Time {
|
||||
func (dag *BlockDAG) NextBlockTime() mstime.Time {
|
||||
// The timestamp for the block must not be before the median timestamp
|
||||
// of the last several blocks. Thus, choose the maximum between the
|
||||
// current time and one second after the past median time. The current
|
||||
// timestamp is truncated to a second boundary before comparison since a
|
||||
// timestamp is truncated to a millisecond boundary before comparison since a
|
||||
// block timestamp does not supported a precision greater than one
|
||||
// second.
|
||||
newTimestamp := dag.AdjustedTime()
|
||||
// millisecond.
|
||||
newTimestamp := dag.Now()
|
||||
minTimestamp := dag.NextBlockMinimumTime()
|
||||
if newTimestamp.Before(minTimestamp) {
|
||||
newTimestamp = minTimestamp
|
||||
|
||||
29
blockdag/multisetio.go
Normal file
29
blockdag/multisetio.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"io"
|
||||
)
|
||||
|
||||
const multisetPointSize = 32
|
||||
|
||||
// serializeMultiset serializes an ECMH multiset.
|
||||
func serializeMultiset(w io.Writer, ms *secp256k1.MultiSet) error {
|
||||
serialized := ms.Serialize()
|
||||
err := binary.Write(w, byteOrder, serialized)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deserializeMultiset deserializes an EMCH multiset.
|
||||
func deserializeMultiset(r io.Reader) (*secp256k1.MultiSet, error) {
|
||||
serialized := &secp256k1.SerializedMultiSet{}
|
||||
err := binary.Read(r, byteOrder, serialized[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return secp256k1.DeserializeMultiSet(serialized)
|
||||
}
|
||||
131
blockdag/multisetstore.go
Normal file
131
blockdag/multisetstore.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/locks"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type multisetStore struct {
|
||||
dag *BlockDAG
|
||||
new map[daghash.Hash]struct{}
|
||||
loaded map[daghash.Hash]secp256k1.MultiSet
|
||||
mtx *locks.PriorityMutex
|
||||
}
|
||||
|
||||
func newMultisetStore(dag *BlockDAG) *multisetStore {
|
||||
return &multisetStore{
|
||||
dag: dag,
|
||||
new: make(map[daghash.Hash]struct{}),
|
||||
loaded: make(map[daghash.Hash]secp256k1.MultiSet),
|
||||
}
|
||||
}
|
||||
|
||||
func (store *multisetStore) setMultiset(node *blockNode, ms *secp256k1.MultiSet) {
|
||||
store.loaded[*node.hash] = *ms
|
||||
store.addToNewBlocks(node.hash)
|
||||
}
|
||||
|
||||
func (store *multisetStore) addToNewBlocks(blockHash *daghash.Hash) {
|
||||
store.new[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func multisetNotFoundError(blockHash *daghash.Hash) error {
|
||||
return errors.Errorf("Couldn't find multiset data for block %s", blockHash)
|
||||
}
|
||||
|
||||
func (store *multisetStore) multisetByBlockNode(node *blockNode) (*secp256k1.MultiSet, error) {
|
||||
ms, exists := store.multisetByBlockHash(node.hash)
|
||||
if !exists {
|
||||
return nil, multisetNotFoundError(node.hash)
|
||||
}
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
func (store *multisetStore) multisetByBlockHash(hash *daghash.Hash) (*secp256k1.MultiSet, bool) {
|
||||
ms, ok := store.loaded[*hash]
|
||||
return &ms, ok
|
||||
}
|
||||
|
||||
// flushToDB writes all new multiset data to the database.
|
||||
func (store *multisetStore) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
if len(store.new) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
w := &bytes.Buffer{}
|
||||
for hash := range store.new {
|
||||
hash := hash // Copy hash to a new variable to avoid passing the same pointer
|
||||
|
||||
w.Reset()
|
||||
ms, exists := store.loaded[hash]
|
||||
if !exists {
|
||||
return multisetNotFoundError(&hash)
|
||||
}
|
||||
|
||||
err := serializeMultiset(w, &ms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = store.storeMultiset(dbContext, &hash, w.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *multisetStore) clearNewEntries() {
|
||||
store.new = make(map[daghash.Hash]struct{})
|
||||
}
|
||||
|
||||
func (store *multisetStore) init(dbContext dbaccess.Context) error {
|
||||
cursor, err := dbaccess.MultisetCursor(dbContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
key, err := cursor.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := daghash.NewHash(key.Suffix())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serializedMS, err := cursor.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ms, err := deserializeMultiset(bytes.NewReader(serializedMS))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store.loaded[*hash] = *ms
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// storeMultiset stores the multiset data to the database.
|
||||
func (store *multisetStore) storeMultiset(dbContext dbaccess.Context, blockHash *daghash.Hash, serializedMS []byte) error {
|
||||
exists, err := dbaccess.HasMultiset(dbContext, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
return errors.Errorf("Can't override an existing multiset database entry for block %s", blockHash)
|
||||
}
|
||||
|
||||
return dbaccess.StoreMultiset(dbContext, blockHash, serializedMS)
|
||||
}
|
||||
@@ -19,7 +19,7 @@ func TestNotifications(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("notifications", Config{
|
||||
dag, teardownFunc, err := DAGSetup("notifications", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -6,9 +6,10 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
@@ -49,6 +50,10 @@ const (
|
||||
// This is used for the case where a block is submitted through RPC.
|
||||
BFDisallowDelay
|
||||
|
||||
// BFDisallowOrphans is set to indicate that an orphan block should be rejected.
|
||||
// This is used for the case where a block is submitted through RPC.
|
||||
BFDisallowOrphans
|
||||
|
||||
// BFNone is a convenience value to specifically indicate no flags.
|
||||
BFNone BehaviorFlags = 0
|
||||
)
|
||||
@@ -151,6 +156,7 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
|
||||
isAfterDelay := flags&BFAfterDelay == BFAfterDelay
|
||||
wasBlockStored := flags&BFWasStored == BFWasStored
|
||||
disallowDelay := flags&BFDisallowDelay == BFDisallowDelay
|
||||
disallowOrphans := flags&BFDisallowOrphans == BFDisallowOrphans
|
||||
|
||||
blockHash := block.Hash()
|
||||
log.Tracef("Processing block %s", blockHash)
|
||||
@@ -199,12 +205,16 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
|
||||
missingParents = append(missingParents, parentHash)
|
||||
}
|
||||
}
|
||||
if len(missingParents) > 0 && disallowOrphans {
|
||||
str := fmt.Sprintf("Cannot process orphan blocks while the BFDisallowOrphans flag is raised %s", blockHash)
|
||||
return false, false, ruleError(ErrOrphanBlockIsNotAllowed, str)
|
||||
}
|
||||
|
||||
// Handle the case of a block with a valid timestamp(non-delayed) which points to a delayed block.
|
||||
delay, isParentDelayed := dag.maxDelayOfParents(missingParents)
|
||||
if isParentDelayed {
|
||||
// Add Nanosecond to ensure that parent process time will be after its child.
|
||||
delay += time.Nanosecond
|
||||
// Add Millisecond to ensure that parent process time will be after its child.
|
||||
delay += time.Millisecond
|
||||
err := dag.addDelayedBlock(block, delay)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
@@ -221,7 +231,7 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
|
||||
// The number K*2 was chosen since in peace times anticone is limited to K blocks,
|
||||
// while some red block can make it a bit bigger, but much more than that indicates
|
||||
// there might be some problem with the netsync process.
|
||||
if flags&BFIsSync == BFIsSync && dagconfig.KType(len(dag.orphans)) < dag.dagParams.K*2 {
|
||||
if flags&BFIsSync == BFIsSync && dagconfig.KType(len(dag.orphans)) < dag.Params.K*2 {
|
||||
log.Debugf("Adding orphan block %s. This is normal part of netsync process", blockHash)
|
||||
} else {
|
||||
log.Infof("Adding orphan block %s", blockHash)
|
||||
@@ -253,6 +263,8 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
|
||||
}
|
||||
}
|
||||
|
||||
dag.addBlockProcessingTimestamp()
|
||||
|
||||
log.Debugf("Accepted block %s", blockHash)
|
||||
|
||||
return false, false, nil
|
||||
@@ -264,7 +276,7 @@ func (dag *BlockDAG) maxDelayOfParents(parentHashes []*daghash.Hash) (delay time
|
||||
for _, parentHash := range parentHashes {
|
||||
if delayedParent, exists := dag.delayedBlocks[*parentHash]; exists {
|
||||
isDelayed = true
|
||||
parentDelay := delayedParent.processTime.Sub(dag.AdjustedTime())
|
||||
parentDelay := delayedParent.processTime.Sub(dag.Now())
|
||||
if parentDelay > delay {
|
||||
delay = parentDelay
|
||||
}
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
func TestProcessOrphans(t *testing.T) {
|
||||
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -63,8 +64,8 @@ func TestProcessOrphans(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make sure that the child block had been rejected
|
||||
node := dag.index.LookupNode(childBlock.Hash())
|
||||
if node == nil {
|
||||
node, ok := dag.index.LookupNode(childBlock.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("TestProcessOrphans: child block missing from block index")
|
||||
}
|
||||
if !dag.index.NodeStatus(node).KnownInvalid() {
|
||||
@@ -72,45 +73,35 @@ func TestProcessOrphans(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type fakeTimeSource struct {
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (fts *fakeTimeSource) AdjustedTime() time.Time {
|
||||
return fts.time
|
||||
}
|
||||
|
||||
func (fts *fakeTimeSource) AddTimeSample(_ string, _ time.Time) {
|
||||
}
|
||||
|
||||
func (fts *fakeTimeSource) Offset() time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
func TestProcessDelayedBlocks(t *testing.T) {
|
||||
// We use dag1 so we can build the test blocks with the proper
|
||||
// block header (UTXO commitment, acceptedIDMerkleroot, etc), and
|
||||
// then we use dag2 for the actual test.
|
||||
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", Config{
|
||||
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
isDAG1Open := true
|
||||
defer func() {
|
||||
if isDAG1Open {
|
||||
teardownFunc()
|
||||
}
|
||||
}()
|
||||
|
||||
initialTime := dag1.dagParams.GenesisBlock.Header.Timestamp
|
||||
initialTime := dag1.Params.GenesisBlock.Header.Timestamp
|
||||
// Here we use a fake time source that returns a timestamp
|
||||
// one hour into the future to make delayedBlock artificially
|
||||
// valid.
|
||||
dag1.timeSource = &fakeTimeSource{initialTime.Add(time.Hour)}
|
||||
dag1.timeSource = newFakeTimeSource(initialTime.Add(time.Hour))
|
||||
|
||||
delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.dagParams.GenesisBlock.BlockHash()}, nil)
|
||||
delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.Params.GenesisBlock.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
|
||||
blockDelay := time.Duration(dag1.dagParams.TimestampDeviationTolerance+5) * time.Second
|
||||
blockDelay := time.Duration(dag1.Params.TimestampDeviationTolerance)*dag1.Params.TargetTimePerBlock + 5*time.Second
|
||||
delayedBlock.Header.Timestamp = initialTime.Add(blockDelay)
|
||||
|
||||
isOrphan, isDelayed, err := dag1.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
|
||||
@@ -131,18 +122,21 @@ func TestProcessDelayedBlocks(t *testing.T) {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
|
||||
teardownFunc()
|
||||
isDAG1Open = false
|
||||
|
||||
// Here the actual test begins. We add a delayed block and
|
||||
// its child and check that they are not added to the DAG,
|
||||
// and check that they're added only if we add a new block
|
||||
// after the delayed block timestamp is valid.
|
||||
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", Config{
|
||||
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc2()
|
||||
dag2.timeSource = &fakeTimeSource{initialTime}
|
||||
dag2.timeSource = newFakeTimeSource(initialTime)
|
||||
|
||||
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
@@ -184,7 +178,7 @@ func TestProcessDelayedBlocks(t *testing.T) {
|
||||
t.Errorf("dag.IsKnownBlock should return true for a child of a delayed block")
|
||||
}
|
||||
|
||||
blockBeforeDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()}, nil)
|
||||
blockBeforeDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.Params.GenesisBlock.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
@@ -209,10 +203,16 @@ func TestProcessDelayedBlocks(t *testing.T) {
|
||||
}
|
||||
|
||||
// We advance the clock to the point where delayedBlock timestamp is valid.
|
||||
secondsUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.Unix() - int64(dag2.TimestampDeviationTolerance) - dag2.AdjustedTime().Unix() + 1
|
||||
dag2.timeSource = &fakeTimeSource{initialTime.Add(time.Duration(secondsUntilDelayedBlockIsValid) * time.Second)}
|
||||
deviationTolerance := time.Duration(dag2.TimestampDeviationTolerance) * dag2.Params.TargetTimePerBlock
|
||||
timeUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.
|
||||
Add(-deviationTolerance).
|
||||
Sub(dag2.Now()) +
|
||||
time.Second
|
||||
dag2.timeSource = newFakeTimeSource(initialTime.Add(timeUntilDelayedBlockIsValid))
|
||||
|
||||
blockAfterDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()}, nil)
|
||||
blockAfterDelay, err := PrepareBlockForTest(dag2,
|
||||
[]*daghash.Hash{dag2.Params.GenesisBlock.BlockHash()},
|
||||
nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,8 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -11,19 +13,20 @@ func TestAddChild(t *testing.T) {
|
||||
// root -> a -> b -> c...
|
||||
// Create the root node of a new reachability tree
|
||||
root := newReachabilityTreeNode(&blockNode{})
|
||||
root.setInterval(newReachabilityInterval(1, 100))
|
||||
root.interval = newReachabilityInterval(1, 100)
|
||||
|
||||
// Add a chain of child nodes just before a reindex occurs (2^6=64 < 100)
|
||||
currentTip := root
|
||||
for i := 0; i < 6; i++ {
|
||||
node := newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes, err := currentTip.addChild(node)
|
||||
modifiedNodes := newModifiedTreeNodes()
|
||||
err := currentTip.addChild(node, root, modifiedNodes)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
|
||||
// Expect only the node and its parent to be affected
|
||||
expectedModifiedNodes := []*reachabilityTreeNode{currentTip, node}
|
||||
expectedModifiedNodes := newModifiedTreeNodes(currentTip, node)
|
||||
if !reflect.DeepEqual(modifiedNodes, expectedModifiedNodes) {
|
||||
t.Fatalf("TestAddChild: unexpected modifiedNodes. "+
|
||||
"want: %s, got: %s", expectedModifiedNodes, modifiedNodes)
|
||||
@@ -34,7 +37,8 @@ func TestAddChild(t *testing.T) {
|
||||
|
||||
// Add another node to the tip of the chain to trigger a reindex (100 < 2^7=128)
|
||||
lastChild := newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes, err := currentTip.addChild(lastChild)
|
||||
modifiedNodes := newModifiedTreeNodes()
|
||||
err := currentTip.addChild(lastChild, root, modifiedNodes)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
@@ -45,19 +49,23 @@ func TestAddChild(t *testing.T) {
|
||||
t.Fatalf("TestAddChild: unexpected amount of modifiedNodes.")
|
||||
}
|
||||
|
||||
// Expect the tip to have an interval of 1 and remaining interval of 0
|
||||
// Expect the tip to have an interval of 1 and remaining interval of 0 both before and after
|
||||
tipInterval := lastChild.interval.size()
|
||||
if tipInterval != 1 {
|
||||
t.Fatalf("TestAddChild: unexpected tip interval size: want: 1, got: %d", tipInterval)
|
||||
}
|
||||
tipRemainingInterval := lastChild.remainingInterval.size()
|
||||
if tipRemainingInterval != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected tip interval size: want: 0, got: %d", tipRemainingInterval)
|
||||
tipRemainingIntervalBefore := lastChild.remainingIntervalBefore().size()
|
||||
if tipRemainingIntervalBefore != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected tip interval before size: want: 0, got: %d", tipRemainingIntervalBefore)
|
||||
}
|
||||
tipRemainingIntervalAfter := lastChild.remainingIntervalAfter().size()
|
||||
if tipRemainingIntervalAfter != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected tip interval after size: want: 0, got: %d", tipRemainingIntervalAfter)
|
||||
}
|
||||
|
||||
// Expect all nodes to be descendant nodes of root
|
||||
currentNode := currentTip
|
||||
for currentNode != nil {
|
||||
for currentNode != root {
|
||||
if !root.isAncestorOf(currentNode) {
|
||||
t.Fatalf("TestAddChild: currentNode is not a descendant of root")
|
||||
}
|
||||
@@ -68,19 +76,20 @@ func TestAddChild(t *testing.T) {
|
||||
// root -> a, b, c...
|
||||
// Create the root node of a new reachability tree
|
||||
root = newReachabilityTreeNode(&blockNode{})
|
||||
root.setInterval(newReachabilityInterval(1, 100))
|
||||
root.interval = newReachabilityInterval(1, 100)
|
||||
|
||||
// Add child nodes to root just before a reindex occurs (2^6=64 < 100)
|
||||
childNodes := make([]*reachabilityTreeNode, 6)
|
||||
for i := 0; i < len(childNodes); i++ {
|
||||
childNodes[i] = newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes, err := root.addChild(childNodes[i])
|
||||
modifiedNodes := newModifiedTreeNodes()
|
||||
err := root.addChild(childNodes[i], root, modifiedNodes)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
|
||||
// Expect only the node and the root to be affected
|
||||
expectedModifiedNodes := []*reachabilityTreeNode{root, childNodes[i]}
|
||||
expectedModifiedNodes := newModifiedTreeNodes(root, childNodes[i])
|
||||
if !reflect.DeepEqual(modifiedNodes, expectedModifiedNodes) {
|
||||
t.Fatalf("TestAddChild: unexpected modifiedNodes. "+
|
||||
"want: %s, got: %s", expectedModifiedNodes, modifiedNodes)
|
||||
@@ -89,7 +98,8 @@ func TestAddChild(t *testing.T) {
|
||||
|
||||
// Add another node to the root to trigger a reindex (100 < 2^7=128)
|
||||
lastChild = newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes, err = root.addChild(lastChild)
|
||||
modifiedNodes = newModifiedTreeNodes()
|
||||
err = root.addChild(lastChild, root, modifiedNodes)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
@@ -100,14 +110,18 @@ func TestAddChild(t *testing.T) {
|
||||
t.Fatalf("TestAddChild: unexpected amount of modifiedNodes.")
|
||||
}
|
||||
|
||||
// Expect the last-added child to have an interval of 1 and remaining interval of 0
|
||||
// Expect the last-added child to have an interval of 1 and remaining interval of 0 both before and after
|
||||
lastChildInterval := lastChild.interval.size()
|
||||
if lastChildInterval != 1 {
|
||||
t.Fatalf("TestAddChild: unexpected lastChild interval size: want: 1, got: %d", lastChildInterval)
|
||||
}
|
||||
lastChildRemainingInterval := lastChild.remainingInterval.size()
|
||||
if lastChildRemainingInterval != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected lastChild interval size: want: 0, got: %d", lastChildRemainingInterval)
|
||||
lastChildRemainingIntervalBefore := lastChild.remainingIntervalBefore().size()
|
||||
if lastChildRemainingIntervalBefore != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected lastChild interval before size: want: 0, got: %d", lastChildRemainingIntervalBefore)
|
||||
}
|
||||
lastChildRemainingIntervalAfter := lastChild.remainingIntervalAfter().size()
|
||||
if lastChildRemainingIntervalAfter != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected lastChild interval after size: want: 0, got: %d", lastChildRemainingIntervalAfter)
|
||||
}
|
||||
|
||||
// Expect all nodes to be descendant nodes of root
|
||||
@@ -118,6 +132,91 @@ func TestAddChild(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) {
|
||||
root := newReachabilityTreeNode(&blockNode{})
|
||||
currentTip := root
|
||||
const numberOfDescendants = 6
|
||||
descendants := make([]*reachabilityTreeNode, numberOfDescendants)
|
||||
for i := 0; i < numberOfDescendants; i++ {
|
||||
node := newReachabilityTreeNode(&blockNode{})
|
||||
err := currentTip.addChild(node, root, newModifiedTreeNodes())
|
||||
if err != nil {
|
||||
t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: addChild failed: %s", err)
|
||||
}
|
||||
descendants[i] = node
|
||||
currentTip = node
|
||||
}
|
||||
|
||||
// Expect all descendants to be in the future of root
|
||||
for _, node := range descendants {
|
||||
if !root.isAncestorOf(node) {
|
||||
t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: node is not a descendant of root")
|
||||
}
|
||||
}
|
||||
|
||||
if !root.isAncestorOf(root) {
|
||||
t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: root is expected to be an ancestor of root")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntervalContains(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
this, other *reachabilityInterval
|
||||
thisContainsOther bool
|
||||
}{
|
||||
{
|
||||
name: "this == other",
|
||||
this: newReachabilityInterval(10, 100),
|
||||
other: newReachabilityInterval(10, 100),
|
||||
thisContainsOther: true,
|
||||
},
|
||||
{
|
||||
name: "this.start == other.start && this.end < other.end",
|
||||
this: newReachabilityInterval(10, 90),
|
||||
other: newReachabilityInterval(10, 100),
|
||||
thisContainsOther: false,
|
||||
},
|
||||
{
|
||||
name: "this.start == other.start && this.end > other.end",
|
||||
this: newReachabilityInterval(10, 100),
|
||||
other: newReachabilityInterval(10, 90),
|
||||
thisContainsOther: true,
|
||||
},
|
||||
{
|
||||
name: "this.start > other.start && this.end == other.end",
|
||||
this: newReachabilityInterval(20, 100),
|
||||
other: newReachabilityInterval(10, 100),
|
||||
thisContainsOther: false,
|
||||
},
|
||||
{
|
||||
name: "this.start < other.start && this.end == other.end",
|
||||
this: newReachabilityInterval(10, 100),
|
||||
other: newReachabilityInterval(20, 100),
|
||||
thisContainsOther: true,
|
||||
},
|
||||
{
|
||||
name: "this.start > other.start && this.end < other.end",
|
||||
this: newReachabilityInterval(20, 90),
|
||||
other: newReachabilityInterval(10, 100),
|
||||
thisContainsOther: false,
|
||||
},
|
||||
{
|
||||
name: "this.start < other.start && this.end > other.end",
|
||||
this: newReachabilityInterval(10, 100),
|
||||
other: newReachabilityInterval(20, 90),
|
||||
thisContainsOther: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if thisContainsOther := test.this.contains(test.other); thisContainsOther != test.thisContainsOther {
|
||||
t.Errorf("test.this.contains(test.other) is expected to be %t but got %t",
|
||||
test.thisContainsOther, thisContainsOther)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitFraction(t *testing.T) {
|
||||
tests := []struct {
|
||||
interval *reachabilityInterval
|
||||
@@ -346,140 +445,140 @@ func TestSplitWithExponentialBias(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsInFuture(t *testing.T) {
|
||||
blocks := futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(2, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
func TestHasAncestorOf(t *testing.T) {
|
||||
treeNodes := futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(2, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
block *futureCoveringBlock
|
||||
treeNode *reachabilityTreeNode
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 1)}},
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 1)},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)}},
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 76)}},
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 76)},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(78, 100)}},
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(78, 100)},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1980, 2000)}},
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1980, 2000)},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1920)}},
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1920)},
|
||||
expectedResult: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
result := blocks.isInFuture(test.block)
|
||||
result := treeNodes.hasAncestorOf(test.treeNode)
|
||||
if result != test.expectedResult {
|
||||
t.Errorf("TestIsInFuture: unexpected result in test #%d. Want: %t, got: %t",
|
||||
t.Errorf("TestHasAncestorOf: unexpected result in test #%d. Want: %t, got: %t",
|
||||
i, test.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertBlock(t *testing.T) {
|
||||
blocks := futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
func TestInsertNode(t *testing.T) {
|
||||
treeNodes := futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
toInsert []*futureCoveringBlock
|
||||
expectedResult futureCoveringBlockSet
|
||||
toInsert []*reachabilityTreeNode
|
||||
expectedResult futureCoveringTreeNodeSet
|
||||
}{
|
||||
{
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)}},
|
||||
toInsert: []*reachabilityTreeNode{
|
||||
{interval: newReachabilityInterval(5, 7)},
|
||||
},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
expectedResult: futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
},
|
||||
},
|
||||
{
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(65, 78)}},
|
||||
toInsert: []*reachabilityTreeNode{
|
||||
{interval: newReachabilityInterval(65, 78)},
|
||||
},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(65, 78)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
expectedResult: futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(65, 78)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
},
|
||||
},
|
||||
{
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
toInsert: []*reachabilityTreeNode{
|
||||
{interval: newReachabilityInterval(88, 97)},
|
||||
},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
expectedResult: futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(88, 97)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
},
|
||||
},
|
||||
{
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)}},
|
||||
toInsert: []*reachabilityTreeNode{
|
||||
{interval: newReachabilityInterval(88, 97)},
|
||||
{interval: newReachabilityInterval(3000, 3010)},
|
||||
},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)}},
|
||||
expectedResult: futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(88, 97)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
// Create a clone of blocks so that we have a clean start for every test
|
||||
blocksClone := make(futureCoveringBlockSet, len(blocks))
|
||||
for i, block := range blocks {
|
||||
blocksClone[i] = block
|
||||
// Create a clone of treeNodes so that we have a clean start for every test
|
||||
treeNodesClone := make(futureCoveringTreeNodeSet, len(treeNodes))
|
||||
for i, treeNode := range treeNodes {
|
||||
treeNodesClone[i] = treeNode
|
||||
}
|
||||
|
||||
for _, block := range test.toInsert {
|
||||
blocksClone.insertBlock(block)
|
||||
for _, treeNode := range test.toInsert {
|
||||
treeNodesClone.insertNode(treeNode)
|
||||
}
|
||||
if !reflect.DeepEqual(blocksClone, test.expectedResult) {
|
||||
t.Errorf("TestInsertBlock: unexpected result in test #%d. Want: %s, got: %s",
|
||||
i, test.expectedResult, blocksClone)
|
||||
if !reflect.DeepEqual(treeNodesClone, test.expectedResult) {
|
||||
t.Errorf("TestInsertNode: unexpected result in test #%d. Want: %s, got: %s",
|
||||
i, test.expectedResult, treeNodesClone)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -580,14 +679,14 @@ func TestSplitWithExponentialBiasErrors(t *testing.T) {
|
||||
func TestReindexIntervalErrors(t *testing.T) {
|
||||
// Create a treeNode and give it size = 100
|
||||
treeNode := newReachabilityTreeNode(&blockNode{})
|
||||
treeNode.setInterval(newReachabilityInterval(0, 99))
|
||||
treeNode.interval = newReachabilityInterval(0, 99)
|
||||
|
||||
// Add a chain of 100 child treeNodes to treeNode
|
||||
var err error
|
||||
currentTreeNode := treeNode
|
||||
for i := 0; i < 100; i++ {
|
||||
childTreeNode := newReachabilityTreeNode(&blockNode{})
|
||||
_, err = currentTreeNode.addChild(childTreeNode)
|
||||
err = currentTreeNode.addChild(childTreeNode, treeNode, newModifiedTreeNodes())
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
@@ -609,33 +708,70 @@ func TestReindexIntervalErrors(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFutureCoveringBlockSetString(t *testing.T) {
|
||||
treeNodeA := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeA.setInterval(newReachabilityInterval(123, 456))
|
||||
treeNodeB := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeB.setInterval(newReachabilityInterval(457, 789))
|
||||
futureCoveringSet := futureCoveringBlockSet{
|
||||
&futureCoveringBlock{treeNode: treeNodeA},
|
||||
&futureCoveringBlock{treeNode: treeNodeB},
|
||||
func BenchmarkReindexInterval(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
root := newReachabilityTreeNode(&blockNode{})
|
||||
|
||||
const subTreeSize = 70000
|
||||
// We set the interval of the root to subTreeSize*2 because
|
||||
// its first child gets half of the interval, so a reindex
|
||||
// from the root should happen after adding subTreeSize
|
||||
// nodes.
|
||||
root.interval = newReachabilityInterval(0, subTreeSize*2)
|
||||
|
||||
currentTreeNode := root
|
||||
for i := 0; i < subTreeSize; i++ {
|
||||
childTreeNode := newReachabilityTreeNode(&blockNode{})
|
||||
err := currentTreeNode.addChild(childTreeNode, root, newModifiedTreeNodes())
|
||||
if err != nil {
|
||||
b.Fatalf("addChild: %s", err)
|
||||
}
|
||||
|
||||
currentTreeNode = childTreeNode
|
||||
}
|
||||
|
||||
originalRemainingInterval := *root.remainingIntervalAfter()
|
||||
// After we added subTreeSize nodes, adding the next
|
||||
// node should lead to a reindex from root.
|
||||
fullReindexTriggeringNode := newReachabilityTreeNode(&blockNode{})
|
||||
b.StartTimer()
|
||||
err := currentTreeNode.addChild(fullReindexTriggeringNode, root, newModifiedTreeNodes())
|
||||
b.StopTimer()
|
||||
if err != nil {
|
||||
b.Fatalf("addChild: %s", err)
|
||||
}
|
||||
|
||||
if *root.remainingIntervalAfter() == originalRemainingInterval {
|
||||
b.Fatal("Expected a reindex from root, but it didn't happen")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFutureCoveringTreeNodeSetString(t *testing.T) {
|
||||
treeNodeA := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeA.interval = newReachabilityInterval(123, 456)
|
||||
treeNodeB := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeB.interval = newReachabilityInterval(457, 789)
|
||||
futureCoveringSet := futureCoveringTreeNodeSet{treeNodeA, treeNodeB}
|
||||
|
||||
str := futureCoveringSet.String()
|
||||
expectedStr := "[123,456][457,789]"
|
||||
if str != expectedStr {
|
||||
t.Fatalf("TestFutureCoveringBlockSetString: unexpected "+
|
||||
t.Fatalf("TestFutureCoveringTreeNodeSetString: unexpected "+
|
||||
"string. Want: %s, got: %s", expectedStr, str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReachabilityTreeNodeString(t *testing.T) {
|
||||
treeNodeA := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeA.setInterval(newReachabilityInterval(100, 199))
|
||||
treeNodeA.interval = newReachabilityInterval(100, 199)
|
||||
treeNodeB1 := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeB1.setInterval(newReachabilityInterval(100, 150))
|
||||
treeNodeB1.interval = newReachabilityInterval(100, 150)
|
||||
treeNodeB2 := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeB2.setInterval(newReachabilityInterval(150, 199))
|
||||
treeNodeB2.interval = newReachabilityInterval(150, 199)
|
||||
treeNodeC := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeC.setInterval(newReachabilityInterval(100, 149))
|
||||
treeNodeC.interval = newReachabilityInterval(100, 149)
|
||||
treeNodeA.children = []*reachabilityTreeNode{treeNodeB1, treeNodeB2}
|
||||
treeNodeB2.children = []*reachabilityTreeNode{treeNodeC}
|
||||
|
||||
@@ -646,3 +782,268 @@ func TestReachabilityTreeNodeString(t *testing.T) {
|
||||
"string. Want: %s, got: %s", expectedStr, str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsInPast(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestIsInPast", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestIsInPast: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Add a chain of two blocks above the genesis. This will be the
|
||||
// selected parent chain.
|
||||
blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil)
|
||||
|
||||
// Add another block above the genesis
|
||||
blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
nodeC, ok := dag.index.LookupNode(blockC.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("TestIsInPast: block C is not in the block index")
|
||||
}
|
||||
|
||||
// Add a block whose parents are the two tips
|
||||
blockD := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash(), blockC.BlockHash()}, nil)
|
||||
nodeD, ok := dag.index.LookupNode(blockD.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("TestIsInPast: block C is not in the block index")
|
||||
}
|
||||
|
||||
// Make sure that node C is in the past of node D
|
||||
isInFuture, err := dag.reachabilityTree.isInPast(nodeC, nodeD)
|
||||
if err != nil {
|
||||
t.Fatalf("TestIsInPast: isInPast unexpectedly failed: %s", err)
|
||||
}
|
||||
if !isInFuture {
|
||||
t.Fatalf("TestIsInPast: node C is unexpectedly not the past of node D")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot",
|
||||
true, Config{DAGParams: &dagconfig.SimnetParams})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Set the reindex window to a low number to make this test run fast
|
||||
originalReachabilityReindexWindow := reachabilityReindexWindow
|
||||
reachabilityReindexWindow = 10
|
||||
defer func() {
|
||||
reachabilityReindexWindow = originalReachabilityReindexWindow
|
||||
}()
|
||||
|
||||
// Add a block on top of the genesis block
|
||||
chainRootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
|
||||
// Add chain of reachabilityReindexWindow blocks above chainRootBlock.
|
||||
// This should move the reindex root
|
||||
chainRootBlockTipHash := chainRootBlock.BlockHash()
|
||||
for i := uint64(0); i < reachabilityReindexWindow; i++ {
|
||||
chainBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chainRootBlockTipHash}, nil)
|
||||
chainRootBlockTipHash = chainBlock.BlockHash()
|
||||
}
|
||||
|
||||
// Add another block over genesis
|
||||
PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
}
|
||||
|
||||
func TestUpdateReindexRoot(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestUpdateReindexRoot", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Set the reindex window to a low number to make this test run fast
|
||||
originalReachabilityReindexWindow := reachabilityReindexWindow
|
||||
reachabilityReindexWindow = 10
|
||||
defer func() {
|
||||
reachabilityReindexWindow = originalReachabilityReindexWindow
|
||||
}()
|
||||
|
||||
// Add two blocks on top of the genesis block
|
||||
chain1RootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
chain2RootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
|
||||
// Add chain of reachabilityReindexWindow - 1 blocks above chain1RootBlock and
|
||||
// chain2RootBlock, respectively. This should not move the reindex root
|
||||
chain1RootBlockTipHash := chain1RootBlock.BlockHash()
|
||||
chain2RootBlockTipHash := chain2RootBlock.BlockHash()
|
||||
genesisTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(dag.genesis.hash)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
for i := uint64(0); i < reachabilityReindexWindow-1; i++ {
|
||||
chain1Block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain1RootBlockTipHash}, nil)
|
||||
chain1RootBlockTipHash = chain1Block.BlockHash()
|
||||
|
||||
chain2Block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain2RootBlockTipHash}, nil)
|
||||
chain2RootBlockTipHash = chain2Block.BlockHash()
|
||||
|
||||
if dag.reachabilityTree.reindexRoot != genesisTreeNode {
|
||||
t.Fatalf("reindex root unexpectedly moved")
|
||||
}
|
||||
}
|
||||
|
||||
// Add another block over chain1. This will move the reindex root to chain1RootBlock
|
||||
PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain1RootBlockTipHash}, nil)
|
||||
|
||||
// Make sure that chain1RootBlock is now the reindex root
|
||||
chain1RootTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(chain1RootBlock.BlockHash())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
if dag.reachabilityTree.reindexRoot != chain1RootTreeNode {
|
||||
t.Fatalf("chain1RootBlock is not the reindex root after reindex")
|
||||
}
|
||||
|
||||
// Make sure that tight intervals have been applied to chain2. Since
|
||||
// we added reachabilityReindexWindow-1 blocks to chain2, the size
|
||||
// of the interval at its root should be equal to reachabilityReindexWindow
|
||||
chain2RootTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(chain2RootBlock.BlockHash())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
if chain2RootTreeNode.interval.size() != reachabilityReindexWindow {
|
||||
t.Fatalf("got unexpected chain2RootNode interval. Want: %d, got: %d",
|
||||
chain2RootTreeNode.interval.size(), reachabilityReindexWindow)
|
||||
}
|
||||
|
||||
// Make sure that the rest of the interval has been allocated to
|
||||
// chain1RootNode, minus slack from both sides
|
||||
expectedChain1RootIntervalSize := genesisTreeNode.interval.size() - 1 -
|
||||
chain2RootTreeNode.interval.size() - 2*reachabilityReindexSlack
|
||||
if chain1RootTreeNode.interval.size() != expectedChain1RootIntervalSize {
|
||||
t.Fatalf("got unexpected chain1RootNode interval. Want: %d, got: %d",
|
||||
chain1RootTreeNode.interval.size(), expectedChain1RootIntervalSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReindexIntervalsEarlierThanReindexRoot(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestReindexIntervalsEarlierThanReindexRoot", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Set the reindex window and slack to low numbers to make this test
|
||||
// run fast
|
||||
originalReachabilityReindexWindow := reachabilityReindexWindow
|
||||
originalReachabilityReindexSlack := reachabilityReindexSlack
|
||||
reachabilityReindexWindow = 10
|
||||
reachabilityReindexSlack = 5
|
||||
defer func() {
|
||||
reachabilityReindexWindow = originalReachabilityReindexWindow
|
||||
reachabilityReindexSlack = originalReachabilityReindexSlack
|
||||
}()
|
||||
|
||||
// Add three children to the genesis: leftBlock, centerBlock, rightBlock
|
||||
leftBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
centerBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
rightBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
|
||||
// Add a chain of reachabilityReindexWindow blocks above centerBlock.
|
||||
// This will move the reindex root to centerBlock
|
||||
centerTipHash := centerBlock.BlockHash()
|
||||
for i := uint64(0); i < reachabilityReindexWindow; i++ {
|
||||
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{centerTipHash}, nil)
|
||||
centerTipHash = block.BlockHash()
|
||||
}
|
||||
|
||||
// Make sure that centerBlock is now the reindex root
|
||||
centerTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(centerBlock.BlockHash())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
if dag.reachabilityTree.reindexRoot != centerTreeNode {
|
||||
t.Fatalf("centerBlock is not the reindex root after reindex")
|
||||
}
|
||||
|
||||
// Get the current interval for leftBlock. The reindex should have
|
||||
// resulted in a tight interval there
|
||||
leftTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(leftBlock.BlockHash())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
if leftTreeNode.interval.size() != 1 {
|
||||
t.Fatalf("leftBlock interval not tight after reindex")
|
||||
}
|
||||
|
||||
// Get the current interval for rightBlock. The reindex should have
|
||||
// resulted in a tight interval there
|
||||
rightTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(rightBlock.BlockHash())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
if rightTreeNode.interval.size() != 1 {
|
||||
t.Fatalf("rightBlock interval not tight after reindex")
|
||||
}
|
||||
|
||||
// Get the current interval for centerBlock. Its interval should be:
|
||||
// genesisInterval - 1 - leftInterval - leftSlack - rightInterval - rightSlack
|
||||
genesisTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(dag.genesis.hash)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
expectedCenterInterval := genesisTreeNode.interval.size() - 1 -
|
||||
leftTreeNode.interval.size() - reachabilityReindexSlack -
|
||||
rightTreeNode.interval.size() - reachabilityReindexSlack
|
||||
if centerTreeNode.interval.size() != expectedCenterInterval {
|
||||
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
|
||||
expectedCenterInterval, centerTreeNode.interval.size())
|
||||
}
|
||||
|
||||
// Add a chain of reachabilityReindexWindow - 1 blocks above leftBlock.
|
||||
// Each addition will trigger a low-than-reindex-root reindex. We
|
||||
// expect the centerInterval to shrink by 1 each time, but its child
|
||||
// to remain unaffected
|
||||
treeChildOfCenterBlock := centerTreeNode.children[0]
|
||||
treeChildOfCenterBlockOriginalIntervalSize := treeChildOfCenterBlock.interval.size()
|
||||
leftTipHash := leftBlock.BlockHash()
|
||||
for i := uint64(0); i < reachabilityReindexWindow-1; i++ {
|
||||
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{leftTipHash}, nil)
|
||||
leftTipHash = block.BlockHash()
|
||||
|
||||
expectedCenterInterval--
|
||||
if centerTreeNode.interval.size() != expectedCenterInterval {
|
||||
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
|
||||
expectedCenterInterval, centerTreeNode.interval.size())
|
||||
}
|
||||
|
||||
if treeChildOfCenterBlock.interval.size() != treeChildOfCenterBlockOriginalIntervalSize {
|
||||
t.Fatalf("the interval of centerBlock's child unexpectedly changed")
|
||||
}
|
||||
}
|
||||
|
||||
// Add a chain of reachabilityReindexWindow - 1 blocks above rightBlock.
|
||||
// Each addition will trigger a low-than-reindex-root reindex. We
|
||||
// expect the centerInterval to shrink by 1 each time, but its child
|
||||
// to remain unaffected
|
||||
rightTipHash := rightBlock.BlockHash()
|
||||
for i := uint64(0); i < reachabilityReindexWindow-1; i++ {
|
||||
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{rightTipHash}, nil)
|
||||
rightTipHash = block.BlockHash()
|
||||
|
||||
expectedCenterInterval--
|
||||
if centerTreeNode.interval.size() != expectedCenterInterval {
|
||||
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
|
||||
expectedCenterInterval, centerTreeNode.interval.size())
|
||||
}
|
||||
|
||||
if treeChildOfCenterBlock.interval.size() != treeChildOfCenterBlockOriginalIntervalSize {
|
||||
t.Fatalf("the interval of centerBlock's child unexpectedly changed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,15 +3,16 @@ package blockdag
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type reachabilityData struct {
|
||||
treeNode *reachabilityTreeNode
|
||||
futureCoveringSet futureCoveringBlockSet
|
||||
futureCoveringSet futureCoveringTreeNodeSet
|
||||
}
|
||||
|
||||
type reachabilityStore struct {
|
||||
@@ -40,11 +41,11 @@ func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) {
|
||||
store.setBlockAsDirty(node.hash)
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) setFutureCoveringSet(node *blockNode, futureCoveringSet futureCoveringBlockSet) error {
|
||||
func (store *reachabilityStore) setFutureCoveringSet(node *blockNode, futureCoveringSet futureCoveringTreeNodeSet) error {
|
||||
// load the reachability data from DB to store.loaded
|
||||
_, exists := store.reachabilityDataByHash(node.hash)
|
||||
if !exists {
|
||||
return reachabilityNotFoundError(node)
|
||||
return reachabilityNotFoundError(node.hash)
|
||||
}
|
||||
|
||||
store.loaded[*node.hash].futureCoveringSet = futureCoveringSet
|
||||
@@ -56,22 +57,26 @@ func (store *reachabilityStore) setBlockAsDirty(blockHash *daghash.Hash) {
|
||||
store.dirty[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func reachabilityNotFoundError(node *blockNode) error {
|
||||
return errors.Errorf("Couldn't find reachability data for block %s", node.hash)
|
||||
func reachabilityNotFoundError(hash *daghash.Hash) error {
|
||||
return errors.Errorf("couldn't find reachability data for block %s", hash)
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) treeNodeByBlockNode(node *blockNode) (*reachabilityTreeNode, error) {
|
||||
reachabilityData, exists := store.reachabilityDataByHash(node.hash)
|
||||
func (store *reachabilityStore) treeNodeByBlockHash(hash *daghash.Hash) (*reachabilityTreeNode, error) {
|
||||
reachabilityData, exists := store.reachabilityDataByHash(hash)
|
||||
if !exists {
|
||||
return nil, reachabilityNotFoundError(node)
|
||||
return nil, reachabilityNotFoundError(hash)
|
||||
}
|
||||
return reachabilityData.treeNode, nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) futureCoveringSetByBlockNode(node *blockNode) (futureCoveringBlockSet, error) {
|
||||
func (store *reachabilityStore) treeNodeByBlockNode(node *blockNode) (*reachabilityTreeNode, error) {
|
||||
return store.treeNodeByBlockHash(node.hash)
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) futureCoveringSetByBlockNode(node *blockNode) (futureCoveringTreeNodeSet, error) {
|
||||
reachabilityData, exists := store.reachabilityDataByHash(node.hash)
|
||||
if !exists {
|
||||
return nil, reachabilityNotFoundError(node)
|
||||
return nil, reachabilityNotFoundError(node.hash)
|
||||
}
|
||||
return reachabilityData.futureCoveringSet, nil
|
||||
}
|
||||
@@ -82,7 +87,7 @@ func (store *reachabilityStore) reachabilityDataByHash(hash *daghash.Hash) (*rea
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty reachability data to the database.
|
||||
func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
|
||||
func (store *reachabilityStore) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
if len(store.dirty) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -90,7 +95,7 @@ func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
|
||||
for hash := range store.dirty {
|
||||
hash := hash // Copy hash to a new variable to avoid passing the same pointer
|
||||
reachabilityData := store.loaded[hash]
|
||||
err := store.dbStoreReachabilityData(dbTx, &hash, reachabilityData)
|
||||
err := store.storeReachabilityData(dbContext, &hash, reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -102,22 +107,25 @@ func (store *reachabilityStore) clearDirtyEntries() {
|
||||
store.dirty = make(map[daghash.Hash]struct{})
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) init(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
|
||||
|
||||
func (store *reachabilityStore) init(dbContext dbaccess.Context) error {
|
||||
// TODO: (Stas) This is a quick and dirty hack.
|
||||
// We iterate over the entire bucket twice:
|
||||
// * First, populate the loaded set with all entries
|
||||
// * Second, connect the parent/children pointers in each entry
|
||||
// with other nodes, which are now guaranteed to exist
|
||||
cursor := bucket.Cursor()
|
||||
cursor, err := dbaccess.ReachabilityDataCursor(dbContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
err := store.initReachabilityData(cursor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cursor = bucket.Cursor()
|
||||
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
err := store.loadReachabilityDataFromCursor(cursor)
|
||||
if err != nil {
|
||||
@@ -128,7 +136,12 @@ func (store *reachabilityStore) init(dbTx database.Tx) error {
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) error {
|
||||
hash, err := daghash.NewHash(cursor.Key())
|
||||
key, err := cursor.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := daghash.NewHash(key.Suffix())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -141,7 +154,12 @@ func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) err
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.Cursor) error {
|
||||
hash, err := daghash.NewHash(cursor.Key())
|
||||
key, err := cursor.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := daghash.NewHash(key.Suffix())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -151,26 +169,34 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
|
||||
return errors.Errorf("cannot find reachability data for block hash: %s", hash)
|
||||
}
|
||||
|
||||
err = store.deserializeReachabilityData(cursor.Value(), reachabilityData)
|
||||
serializedReachabilityData, err := cursor.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = store.deserializeReachabilityData(serializedReachabilityData, reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Connect the treeNode with its blockNode
|
||||
reachabilityData.treeNode.blockNode = store.dag.index.LookupNode(hash)
|
||||
reachabilityData.treeNode.blockNode, ok = store.dag.index.LookupNode(hash)
|
||||
if !ok {
|
||||
return errors.Errorf("block %s does not exist in the DAG", hash)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbStoreReachabilityData stores the reachability data to the database.
|
||||
// storeReachabilityData stores the reachability data to the database.
|
||||
// This overwrites the current entry if there exists one.
|
||||
func (store *reachabilityStore) dbStoreReachabilityData(dbTx database.Tx, hash *daghash.Hash, reachabilityData *reachabilityData) error {
|
||||
func (store *reachabilityStore) storeReachabilityData(dbContext dbaccess.Context, hash *daghash.Hash, reachabilityData *reachabilityData) error {
|
||||
serializedReachabilyData, err := store.serializeReachabilityData(reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbTx.Metadata().Bucket(reachabilityDataBucketName).Put(hash[:], serializedReachabilyData)
|
||||
return dbaccess.StoreReachabilityData(dbContext, hash, serializedReachabilyData)
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) serializeReachabilityData(reachabilityData *reachabilityData) ([]byte, error) {
|
||||
@@ -193,32 +219,26 @@ func (store *reachabilityStore) serializeTreeNode(w io.Writer, treeNode *reachab
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the remaining interval
|
||||
err = store.serializeReachabilityInterval(w, treeNode.remainingInterval)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the parent
|
||||
// If this is the genesis block, write the zero hash instead
|
||||
parentHash := &daghash.ZeroHash
|
||||
if treeNode.parent != nil {
|
||||
parentHash = treeNode.parent.blockNode.hash
|
||||
}
|
||||
err = wire.WriteElement(w, parentHash)
|
||||
err = domainmessage.WriteElement(w, parentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the amount of children
|
||||
err = wire.WriteVarInt(w, uint64(len(treeNode.children)))
|
||||
err = domainmessage.WriteVarInt(w, uint64(len(treeNode.children)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the children
|
||||
for _, child := range treeNode.children {
|
||||
err = wire.WriteElement(w, child.blockNode.hash)
|
||||
err = domainmessage.WriteElement(w, child.blockNode.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -229,13 +249,13 @@ func (store *reachabilityStore) serializeTreeNode(w io.Writer, treeNode *reachab
|
||||
|
||||
func (store *reachabilityStore) serializeReachabilityInterval(w io.Writer, interval *reachabilityInterval) error {
|
||||
// Serialize start
|
||||
err := wire.WriteElement(w, interval.start)
|
||||
err := domainmessage.WriteElement(w, interval.start)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize end
|
||||
err = wire.WriteElement(w, interval.end)
|
||||
err = domainmessage.WriteElement(w, interval.end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -243,16 +263,16 @@ func (store *reachabilityStore) serializeReachabilityInterval(w io.Writer, inter
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) serializeFutureCoveringSet(w io.Writer, futureCoveringSet futureCoveringBlockSet) error {
|
||||
func (store *reachabilityStore) serializeFutureCoveringSet(w io.Writer, futureCoveringSet futureCoveringTreeNodeSet) error {
|
||||
// Serialize the set size
|
||||
err := wire.WriteVarInt(w, uint64(len(futureCoveringSet)))
|
||||
err := domainmessage.WriteVarInt(w, uint64(len(futureCoveringSet)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize each block in the set
|
||||
for _, block := range futureCoveringSet {
|
||||
err = wire.WriteElement(w, block.blockNode.hash)
|
||||
// Serialize each node in the set
|
||||
for _, node := range futureCoveringSet {
|
||||
err = domainmessage.WriteElement(w, node.blockNode.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -289,17 +309,10 @@ func (store *reachabilityStore) deserializeTreeNode(r io.Reader, destination *re
|
||||
}
|
||||
destination.treeNode.interval = interval
|
||||
|
||||
// Deserialize the remaining interval
|
||||
remainingInterval, err := store.deserializeReachabilityInterval(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destination.treeNode.remainingInterval = remainingInterval
|
||||
|
||||
// Deserialize the parent
|
||||
// If this is the zero hash, this node is the genesis and as such doesn't have a parent
|
||||
parentHash := &daghash.Hash{}
|
||||
err = wire.ReadElement(r, parentHash)
|
||||
err = domainmessage.ReadElement(r, parentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -312,7 +325,7 @@ func (store *reachabilityStore) deserializeTreeNode(r io.Reader, destination *re
|
||||
}
|
||||
|
||||
// Deserialize the amount of children
|
||||
childCount, err := wire.ReadVarInt(r)
|
||||
childCount, err := domainmessage.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -321,7 +334,7 @@ func (store *reachabilityStore) deserializeTreeNode(r io.Reader, destination *re
|
||||
children := make([]*reachabilityTreeNode, childCount)
|
||||
for i := uint64(0); i < childCount; i++ {
|
||||
childHash := &daghash.Hash{}
|
||||
err = wire.ReadElement(r, childHash)
|
||||
err = domainmessage.ReadElement(r, childHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -341,7 +354,7 @@ func (store *reachabilityStore) deserializeReachabilityInterval(r io.Reader) (*r
|
||||
|
||||
// Deserialize start
|
||||
start := uint64(0)
|
||||
err := wire.ReadElement(r, &start)
|
||||
err := domainmessage.ReadElement(r, &start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -349,7 +362,7 @@ func (store *reachabilityStore) deserializeReachabilityInterval(r io.Reader) (*r
|
||||
|
||||
// Deserialize end
|
||||
end := uint64(0)
|
||||
err = wire.ReadElement(r, &end)
|
||||
err = domainmessage.ReadElement(r, &end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -360,31 +373,24 @@ func (store *reachabilityStore) deserializeReachabilityInterval(r io.Reader) (*r
|
||||
|
||||
func (store *reachabilityStore) deserializeFutureCoveringSet(r io.Reader, destination *reachabilityData) error {
|
||||
// Deserialize the set size
|
||||
setSize, err := wire.ReadVarInt(r)
|
||||
setSize, err := domainmessage.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Deserialize each block in the set
|
||||
futureCoveringSet := make(futureCoveringBlockSet, setSize)
|
||||
futureCoveringSet := make(futureCoveringTreeNodeSet, setSize)
|
||||
for i := uint64(0); i < setSize; i++ {
|
||||
blockHash := &daghash.Hash{}
|
||||
err = wire.ReadElement(r, blockHash)
|
||||
err = domainmessage.ReadElement(r, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockNode := store.dag.index.LookupNode(blockHash)
|
||||
if blockNode == nil {
|
||||
return errors.Errorf("blockNode not found for hash %s", blockHash)
|
||||
}
|
||||
blockReachabilityData, ok := store.reachabilityDataByHash(blockHash)
|
||||
if !ok {
|
||||
return errors.Errorf("block reachability data not found for hash: %s", blockHash)
|
||||
}
|
||||
futureCoveringSet[i] = &futureCoveringBlock{
|
||||
blockNode: blockNode,
|
||||
treeNode: blockReachabilityData.treeNode,
|
||||
}
|
||||
futureCoveringSet[i] = blockReachabilityData.treeNode
|
||||
}
|
||||
destination.futureCoveringSet = futureCoveringSet
|
||||
|
||||
|
||||
@@ -9,15 +9,15 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// txValidateItem holds a transaction along with which input to validate.
|
||||
type txValidateItem struct {
|
||||
txInIndex int
|
||||
txIn *wire.TxIn
|
||||
txIn *domainmessage.TxIn
|
||||
tx *util.Tx
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ func (v *txValidator) Validate(items []*txValidateItem) error {
|
||||
// Start up validation handlers that are used to asynchronously
|
||||
// validate each transaction input.
|
||||
for i := 0; i < maxGoRoutines; i++ {
|
||||
spawn(v.validateHandler)
|
||||
spawn("txValidator.validateHandler", v.validateHandler)
|
||||
}
|
||||
|
||||
// Validate each of the inputs. The quit channel is closed when any
|
||||
@@ -179,11 +179,6 @@ func newTxValidator(utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscr
|
||||
// ValidateTransactionScripts validates the scripts for the passed transaction
|
||||
// using multiple goroutines.
|
||||
func ValidateTransactionScripts(tx *util.Tx, utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscript.SigCache) error {
|
||||
// Don't validate coinbase transaction scripts.
|
||||
if tx.IsCoinBase() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect all of the transaction inputs and required information for
|
||||
// validation.
|
||||
txIns := tx.MsgTx().TxIn
|
||||
@@ -213,10 +208,6 @@ func checkBlockScripts(block *blockNode, utxoSet UTXOSet, transactions []*util.T
|
||||
}
|
||||
txValItems := make([]*txValidateItem, 0, numInputs)
|
||||
for _, tx := range transactions {
|
||||
// Skip coinbase transactions.
|
||||
if tx.IsCoinBase() {
|
||||
continue
|
||||
}
|
||||
for txInIdx, txIn := range tx.MsgTx().TxIn {
|
||||
txVI := &txValidateItem{
|
||||
txInIndex: txInIdx,
|
||||
|
||||
@@ -4,32 +4,22 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// SubnetworkStore stores the subnetworks data
|
||||
type SubnetworkStore struct {
|
||||
db database.DB
|
||||
}
|
||||
|
||||
func newSubnetworkStore(db database.DB) *SubnetworkStore {
|
||||
return &SubnetworkStore{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// registerSubnetworks scans a list of transactions, singles out
|
||||
// subnetwork registry transactions, validates them, and registers a new
|
||||
// subnetwork based on it.
|
||||
// This function returns an error if one or more transactions are invalid
|
||||
func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
|
||||
subnetworkRegistryTxs := make([]*wire.MsgTx, 0)
|
||||
func registerSubnetworks(dbContext dbaccess.Context, txs []*util.Tx) error {
|
||||
subnetworkRegistryTxs := make([]*domainmessage.MsgTx, 0)
|
||||
for _, tx := range txs {
|
||||
msgTx := tx.MsgTx()
|
||||
|
||||
@@ -50,13 +40,13 @@ func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sNet, err := dbGetSubnetwork(dbTx, subnetworkID)
|
||||
exists, err := dbaccess.HasSubnetwork(dbContext, subnetworkID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sNet == nil {
|
||||
if !exists {
|
||||
createdSubnetwork := newSubnetwork(registryTx)
|
||||
err := dbRegisterSubnetwork(dbTx, subnetworkID, createdSubnetwork)
|
||||
err := registerSubnetwork(dbContext, subnetworkID, createdSubnetwork)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed registering subnetwork"+
|
||||
"for tx '%s': %s", registryTx.TxHash(), err)
|
||||
@@ -70,7 +60,7 @@ func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
|
||||
// validateSubnetworkRegistryTransaction makes sure that a given subnetwork registry
|
||||
// transaction is valid. Such a transaction is valid iff:
|
||||
// - Its entire payload is a uint64 (8 bytes)
|
||||
func validateSubnetworkRegistryTransaction(tx *wire.MsgTx) error {
|
||||
func validateSubnetworkRegistryTransaction(tx *domainmessage.MsgTx) error {
|
||||
if len(tx.Payload) != 8 {
|
||||
return ruleError(ErrSubnetworkRegistry, fmt.Sprintf("validation failed: subnetwork registry"+
|
||||
"tx '%s' has an invalid payload", tx.TxHash()))
|
||||
@@ -80,85 +70,58 @@ func validateSubnetworkRegistryTransaction(tx *wire.MsgTx) error {
|
||||
}
|
||||
|
||||
// TxToSubnetworkID creates a subnetwork ID from a subnetwork registry transaction
|
||||
func TxToSubnetworkID(tx *wire.MsgTx) (*subnetworkid.SubnetworkID, error) {
|
||||
func TxToSubnetworkID(tx *domainmessage.MsgTx) (*subnetworkid.SubnetworkID, error) {
|
||||
txHash := tx.TxHash()
|
||||
return subnetworkid.New(util.Hash160(txHash[:]))
|
||||
}
|
||||
|
||||
// subnetwork returns a registered subnetwork. If the subnetwork does not exist
|
||||
// this method returns an error.
|
||||
func (s *SubnetworkStore) subnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
var sNet *subnetwork
|
||||
var err error
|
||||
dbErr := s.db.View(func(dbTx database.Tx) error {
|
||||
sNet, err = dbGetSubnetwork(dbTx, subnetworkID)
|
||||
return nil
|
||||
})
|
||||
if dbErr != nil {
|
||||
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, dbErr)
|
||||
}
|
||||
// fetchSubnetwork returns a registered subnetwork.
|
||||
func (dag *BlockDAG) fetchSubnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
serializedSubnetwork, err := dbaccess.FetchSubnetworkData(dag.databaseContext, subnetworkID)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sNet, nil
|
||||
subnet, err := deserializeSubnetwork(serializedSubnetwork)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return subnet, nil
|
||||
}
|
||||
|
||||
// GasLimit returns the gas limit of a registered subnetwork. If the subnetwork does not
|
||||
// exist this method returns an error.
|
||||
func (s *SubnetworkStore) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
|
||||
sNet, err := s.subnetwork(subnetworkID)
|
||||
func (dag *BlockDAG) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
|
||||
sNet, err := dag.fetchSubnetwork(subnetworkID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if sNet == nil {
|
||||
return 0, errors.Errorf("subnetwork '%s' not found", subnetworkID)
|
||||
}
|
||||
|
||||
return sNet.gasLimit, nil
|
||||
}
|
||||
|
||||
// dbRegisterSubnetwork stores mappings from ID of the subnetwork to the subnetwork data.
|
||||
func dbRegisterSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
|
||||
// Serialize the subnetwork
|
||||
func registerSubnetwork(dbContext dbaccess.Context, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
|
||||
serializedSubnetwork, err := serializeSubnetwork(network)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to serialize sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
// Store the subnetwork
|
||||
subnetworksBucket := dbTx.Metadata().Bucket(subnetworksBucketName)
|
||||
err = subnetworksBucket.Put(subnetworkID[:], serializedSubnetwork)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to write sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbGetSubnetwork returns the subnetwork associated with subnetworkID or nil if the subnetwork was not found.
|
||||
func dbGetSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
bucket := dbTx.Metadata().Bucket(subnetworksBucketName)
|
||||
serializedSubnetwork := bucket.Get(subnetworkID[:])
|
||||
if serializedSubnetwork == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return deserializeSubnetwork(serializedSubnetwork)
|
||||
return dbaccess.StoreSubnetwork(dbContext, subnetworkID, serializedSubnetwork)
|
||||
}
|
||||
|
||||
type subnetwork struct {
|
||||
gasLimit uint64
|
||||
}
|
||||
|
||||
func newSubnetwork(tx *wire.MsgTx) *subnetwork {
|
||||
func newSubnetwork(tx *domainmessage.MsgTx) *subnetwork {
|
||||
return &subnetwork{
|
||||
gasLimit: ExtractGasLimit(tx),
|
||||
}
|
||||
}
|
||||
|
||||
// ExtractGasLimit extracts the gas limit from the transaction payload
|
||||
func ExtractGasLimit(tx *wire.MsgTx) uint64 {
|
||||
func ExtractGasLimit(tx *domainmessage.MsgTx) uint64 {
|
||||
return binary.LittleEndian.Uint64(tx.Payload[:8])
|
||||
}
|
||||
|
||||
|
||||
60
blockdag/sync_rate.go
Normal file
60
blockdag/sync_rate.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"time"
|
||||
)
|
||||
|
||||
const syncRateWindowDuration = 15 * time.Minute
|
||||
|
||||
// addBlockProcessingTimestamp adds the last block processing timestamp in order to measure the recent sync rate.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) addBlockProcessingTimestamp() {
|
||||
now := mstime.Now()
|
||||
dag.recentBlockProcessingTimestamps = append(dag.recentBlockProcessingTimestamps, now)
|
||||
dag.removeNonRecentTimestampsFromRecentBlockProcessingTimestamps()
|
||||
}
|
||||
|
||||
// removeNonRecentTimestampsFromRecentBlockProcessingTimestamps removes timestamps older than syncRateWindowDuration
|
||||
// from dag.recentBlockProcessingTimestamps
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) removeNonRecentTimestampsFromRecentBlockProcessingTimestamps() {
|
||||
dag.recentBlockProcessingTimestamps = dag.recentBlockProcessingTimestampsRelevantWindow()
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) recentBlockProcessingTimestampsRelevantWindow() []mstime.Time {
|
||||
minTime := mstime.Now().Add(-syncRateWindowDuration)
|
||||
windowStartIndex := len(dag.recentBlockProcessingTimestamps)
|
||||
for i, processTime := range dag.recentBlockProcessingTimestamps {
|
||||
if processTime.After(minTime) {
|
||||
windowStartIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
return dag.recentBlockProcessingTimestamps[windowStartIndex:]
|
||||
}
|
||||
|
||||
// syncRate returns the rate of processed
|
||||
// blocks in the last syncRateWindowDuration
|
||||
// duration.
|
||||
func (dag *BlockDAG) syncRate() float64 {
|
||||
dag.RLock()
|
||||
defer dag.RUnlock()
|
||||
return float64(len(dag.recentBlockProcessingTimestampsRelevantWindow())) / syncRateWindowDuration.Seconds()
|
||||
}
|
||||
|
||||
// IsSyncRateBelowThreshold checks whether the sync rate
|
||||
// is below the expected threshold.
|
||||
func (dag *BlockDAG) IsSyncRateBelowThreshold(maxDeviation float64) bool {
|
||||
if dag.uptime() < syncRateWindowDuration {
|
||||
return false
|
||||
}
|
||||
|
||||
return dag.syncRate() < 1/dag.Params.TargetTimePerBlock.Seconds()*maxDeviation
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) uptime() time.Duration {
|
||||
return mstime.Now().Sub(dag.startTime)
|
||||
}
|
||||
@@ -5,45 +5,28 @@ package blockdag
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/database/ffldb/ldb"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb" // blank import ffldb so that its init() function runs before tests
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// testDbType is the database backend type to use for the tests.
|
||||
testDbType = "ffldb"
|
||||
|
||||
// blockDataNet is the expected network in the test block data.
|
||||
blockDataNet = wire.Mainnet
|
||||
)
|
||||
|
||||
// isSupportedDbType returns whether or not the passed database type is
|
||||
// currently supported.
|
||||
func isSupportedDbType(dbType string) bool {
|
||||
supportedDrivers := database.SupportedDrivers()
|
||||
for _, driver := range supportedDrivers {
|
||||
if dbType == driver {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// FileExists returns whether or not the named file or directory exists.
|
||||
func FileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
@@ -57,53 +40,66 @@ func FileExists(name string) bool {
|
||||
// DAGSetup is used to create a new db and DAG instance with the genesis
|
||||
// block already inserted. In addition to the new DAG instance, it returns
|
||||
// a teardown function the caller should invoke when done testing to clean up.
|
||||
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
||||
if !isSupportedDbType(testDbType) {
|
||||
return nil, nil, errors.Errorf("unsupported db type %s", testDbType)
|
||||
}
|
||||
|
||||
// The openDB parameter instructs DAGSetup whether or not to also open the
|
||||
// database. Setting it to false is useful in tests that handle database
|
||||
// opening/closing by themselves.
|
||||
func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), error) {
|
||||
var teardown func()
|
||||
|
||||
// To make sure that the teardown function is not called before any goroutines finished to run -
|
||||
// overwrite `spawn` to count the number of running goroutines
|
||||
spawnWaitGroup := sync.WaitGroup{}
|
||||
realSpawn := spawn
|
||||
spawn = func(f func()) {
|
||||
spawn = func(name string, f func()) {
|
||||
spawnWaitGroup.Add(1)
|
||||
realSpawn(func() {
|
||||
realSpawn(name, func() {
|
||||
f()
|
||||
spawnWaitGroup.Done()
|
||||
})
|
||||
}
|
||||
|
||||
if config.DB == nil {
|
||||
tmpDir := os.TempDir()
|
||||
if openDb {
|
||||
var err error
|
||||
tmpDir, err := ioutil.TempDir("", "DAGSetup")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("error creating temp dir: %s", err)
|
||||
}
|
||||
|
||||
// We set ldb.Options here to return nil because normally
|
||||
// the database is initialized with very large caches that
|
||||
// can make opening/closing the database for every test
|
||||
// quite heavy.
|
||||
originalLDBOptions := ldb.Options
|
||||
ldb.Options = func() *opt.Options {
|
||||
return nil
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(tmpDir, dbName)
|
||||
_ = os.RemoveAll(dbPath)
|
||||
var err error
|
||||
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
|
||||
databaseContext, err := dbaccess.New(dbPath)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
config.DatabaseContext = databaseContext
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
spawnWaitGroup.Wait()
|
||||
spawn = realSpawn
|
||||
config.DB.Close()
|
||||
databaseContext.Close()
|
||||
ldb.Options = originalLDBOptions
|
||||
os.RemoveAll(dbPath)
|
||||
}
|
||||
} else {
|
||||
teardown = func() {
|
||||
spawnWaitGroup.Wait()
|
||||
spawn = realSpawn
|
||||
config.DB.Close()
|
||||
}
|
||||
}
|
||||
|
||||
config.TimeSource = NewMedianTime()
|
||||
config.TimeSource = NewTimeSource()
|
||||
config.SigCache = txscript.NewSigCache(1000)
|
||||
|
||||
// Create the DAG instance.
|
||||
@@ -125,30 +121,30 @@ type txSubnetworkData struct {
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, subnetworkData *txSubnetworkData) *wire.MsgTx {
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, subnetworkData *txSubnetworkData) *domainmessage.MsgTx {
|
||||
txIns := []*domainmessage.TxIn{}
|
||||
txOuts := []*domainmessage.TxOut{}
|
||||
|
||||
for i := uint32(0); i < numInputs; i++ {
|
||||
txIns = append(txIns, &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(&daghash.TxID{}, i),
|
||||
txIns = append(txIns, &domainmessage.TxIn{
|
||||
PreviousOutpoint: *domainmessage.NewOutpoint(&daghash.TxID{}, i),
|
||||
SignatureScript: []byte{},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
})
|
||||
}
|
||||
|
||||
for i := uint32(0); i < numOutputs; i++ {
|
||||
txOuts = append(txOuts, &wire.TxOut{
|
||||
txOuts = append(txOuts, &domainmessage.TxOut{
|
||||
ScriptPubKey: OpTrueScript,
|
||||
Value: outputValue,
|
||||
})
|
||||
}
|
||||
|
||||
if subnetworkData != nil {
|
||||
return wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkData.subnetworkID, subnetworkData.Gas, subnetworkData.Payload)
|
||||
return domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, txIns, txOuts, subnetworkData.subnetworkID, subnetworkData.Gas, subnetworkData.Payload)
|
||||
}
|
||||
|
||||
return wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
|
||||
return domainmessage.NewNativeMsgTx(domainmessage.TxVersion, txIns, txOuts)
|
||||
}
|
||||
|
||||
// VirtualForTest is an exported version for virtualBlock, so that it can be returned by exported test_util methods
|
||||
@@ -165,15 +161,15 @@ func SetVirtualForTest(dag *BlockDAG, virtual VirtualForTest) VirtualForTest {
|
||||
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (VirtualForTest, error) {
|
||||
parents := newBlockSet()
|
||||
for _, hash := range parentHashes {
|
||||
parent := dag.index.LookupNode(hash)
|
||||
if parent == nil {
|
||||
parent, ok := dag.index.LookupNode(hash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash)
|
||||
}
|
||||
parents.add(parent)
|
||||
}
|
||||
virtual := newVirtualBlock(dag, parents)
|
||||
|
||||
pastUTXO, _, err := dag.pastUTXO(&virtual.blockNode)
|
||||
pastUTXO, _, _, err := dag.pastUTXO(&virtual.blockNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -190,7 +186,7 @@ func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (
|
||||
// LoadBlocks reads files containing kaspa gzipped block data from disk
|
||||
// and returns them as an array of util.Block.
|
||||
func LoadBlocks(filename string) (blocks []*util.Block, err error) {
|
||||
var network = wire.Mainnet
|
||||
var network = domainmessage.Mainnet
|
||||
var dr io.Reader
|
||||
var fi io.ReadCloser
|
||||
|
||||
@@ -248,7 +244,7 @@ func opTrueAddress(prefix util.Bech32Prefix) (util.Address, error) {
|
||||
}
|
||||
|
||||
// PrepareBlockForTest generates a block with the proper merkle roots, coinbase transaction etc. This function is used for test purposes only
|
||||
func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*wire.MsgTx) (*wire.MsgBlock, error) {
|
||||
func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*domainmessage.MsgTx) (*domainmessage.MsgBlock, error) {
|
||||
newVirtual, err := GetVirtualFromParentsForTest(dag, parentHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -256,7 +252,7 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
|
||||
oldVirtual := SetVirtualForTest(dag, newVirtual)
|
||||
defer SetVirtualForTest(dag, oldVirtual)
|
||||
|
||||
OpTrueAddr, err := opTrueAddress(dag.dagParams.Prefix)
|
||||
OpTrueAddr, err := opTrueAddress(dag.Params.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -299,6 +295,28 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// PrepareAndProcessBlockForTest prepares a block that points to the given parent
|
||||
// hashes and process it.
|
||||
func PrepareAndProcessBlockForTest(t *testing.T, dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*domainmessage.MsgTx) *domainmessage.MsgBlock {
|
||||
daghash.Sort(parentHashes)
|
||||
block, err := PrepareBlockForTest(dag, parentHashes, transactions)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in ProcessBlock: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("block is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("block was unexpectedly orphan")
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
// generateDeterministicExtraNonceForTest returns a unique deterministic extra nonce for coinbase data, in order to create unique coinbase transactions.
|
||||
func generateDeterministicExtraNonceForTest() uint64 {
|
||||
extraNonceForTest++
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsSupportedDbType(t *testing.T) {
|
||||
if !isSupportedDbType("ffldb") {
|
||||
t.Errorf("ffldb should be a supported DB driver")
|
||||
}
|
||||
if isSupportedDbType("madeUpDb") {
|
||||
t.Errorf("madeUpDb should not be a supported DB driver")
|
||||
}
|
||||
}
|
||||
BIN
blockdag/testdata/blk_0_to_4.dat
vendored
BIN
blockdag/testdata/blk_0_to_4.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3A.dat
vendored
BIN
blockdag/testdata/blk_3A.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3B.dat
vendored
BIN
blockdag/testdata/blk_3B.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3C.dat
vendored
BIN
blockdag/testdata/blk_3C.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3D.dat
vendored
BIN
blockdag/testdata/blk_3D.dat
vendored
Binary file not shown.
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ThresholdState define the various threshold states used when voting on
|
||||
@@ -156,7 +157,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
|
||||
// The state is simply defined if the start time hasn't been
|
||||
// been reached yet.
|
||||
if uint64(medianTime.Unix()) < checker.BeginTime() {
|
||||
if uint64(medianTime.UnixMilliseconds()) < checker.BeginTime() {
|
||||
cache.Update(prevNode.hash, ThresholdDefined)
|
||||
break
|
||||
}
|
||||
@@ -177,9 +178,9 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
var ok bool
|
||||
state, ok = cache.Lookup(prevNode.hash)
|
||||
if !ok {
|
||||
return ThresholdFailed, AssertError(fmt.Sprintf(
|
||||
return ThresholdFailed, errors.Errorf(
|
||||
"thresholdState: cache lookup failed for %s",
|
||||
prevNode.hash))
|
||||
prevNode.hash)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,7 +194,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
// The deployment of the rule change fails if it expires
|
||||
// before it is accepted and locked in.
|
||||
medianTime := prevNode.PastMedianTime(dag)
|
||||
medianTimeUnix := uint64(medianTime.Unix())
|
||||
medianTimeUnix := uint64(medianTime.UnixMilliseconds())
|
||||
if medianTimeUnix >= checker.EndTime() {
|
||||
state = ThresholdFailed
|
||||
break
|
||||
@@ -210,7 +211,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
// The deployment of the rule change fails if it expires
|
||||
// before it is accepted and locked in.
|
||||
medianTime := prevNode.PastMedianTime(dag)
|
||||
if uint64(medianTime.Unix()) >= checker.EndTime() {
|
||||
if uint64(medianTime.UnixMilliseconds()) >= checker.EndTime() {
|
||||
state = ThresholdFailed
|
||||
break
|
||||
}
|
||||
@@ -296,11 +297,11 @@ func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
|
||||
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
|
||||
return ThresholdFailed, DeploymentError(deploymentID)
|
||||
if deploymentID > uint32(len(dag.Params.Deployments)) {
|
||||
return ThresholdFailed, errors.Errorf("deployment ID %d does not exist", deploymentID)
|
||||
}
|
||||
|
||||
deployment := &dag.dagParams.Deployments[deploymentID]
|
||||
deployment := &dag.Params.Deployments[deploymentID]
|
||||
checker := deploymentChecker{deployment: deployment, dag: dag}
|
||||
cache := &dag.deploymentCaches[deploymentID]
|
||||
|
||||
@@ -324,8 +325,8 @@ func (dag *BlockDAG) initThresholdCaches() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
||||
deployment := &dag.dagParams.Deployments[id]
|
||||
for id := 0; id < len(dag.Params.Deployments); id++ {
|
||||
deployment := &dag.Params.Deployments[id]
|
||||
cache := &dag.deploymentCaches[id]
|
||||
checker := deploymentChecker{deployment: deployment, dag: dag}
|
||||
_, err := dag.thresholdState(prevNode, checker, cache)
|
||||
@@ -335,8 +336,8 @@ func (dag *BlockDAG) initThresholdCaches() error {
|
||||
}
|
||||
|
||||
// No warnings about unknown rules or versions until the DAG is
|
||||
// current.
|
||||
if dag.isCurrent() {
|
||||
// synced.
|
||||
if dag.isSynced() {
|
||||
// Warn if a high enough percentage of the last blocks have
|
||||
// unexpected versions.
|
||||
bestNode := dag.selectedTip()
|
||||
|
||||
25
blockdag/timesource.go
Normal file
25
blockdag/timesource.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
)
|
||||
|
||||
// TimeSource is the interface to access time.
|
||||
type TimeSource interface {
|
||||
// Now returns the current time.
|
||||
Now() mstime.Time
|
||||
}
|
||||
|
||||
// timeSource provides an implementation of the TimeSource interface
|
||||
// that simply returns the current local time.
|
||||
type timeSource struct{}
|
||||
|
||||
// Now returns the current local time, with one millisecond precision.
|
||||
func (m *timeSource) Now() mstime.Time {
|
||||
return mstime.Now()
|
||||
}
|
||||
|
||||
// NewTimeSource returns a new instance of a TimeSource
|
||||
func NewTimeSource() TimeSource {
|
||||
return &timeSource{}
|
||||
}
|
||||
@@ -2,31 +2,26 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
)
|
||||
|
||||
const ecmhCacheSize = 4_000_000
|
||||
|
||||
var (
|
||||
utxoToECMHCache = lru.New(ecmhCacheSize)
|
||||
)
|
||||
|
||||
func utxoMultiset(entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
|
||||
func addUTXOToMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *domainmessage.Outpoint) (*secp256k1.MultiSet, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := serializeUTXO(w, entry, outpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serializedUTXO := w.Bytes()
|
||||
utxoHash := daghash.DoubleHashH(serializedUTXO)
|
||||
|
||||
if cachedMSPoint, ok := utxoToECMHCache.Get(utxoHash); ok {
|
||||
return cachedMSPoint.(*ecc.Multiset), nil
|
||||
}
|
||||
msPoint := ecc.NewMultiset(ecc.S256()).Add(serializedUTXO)
|
||||
utxoToECMHCache.Add(utxoHash, msPoint)
|
||||
return msPoint, nil
|
||||
ms.Add(w.Bytes())
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
func removeUTXOFromMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *domainmessage.Outpoint) (*secp256k1.MultiSet, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := serializeUTXO(w, entry, outpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ms.Remove(w.Bytes())
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
@@ -2,14 +2,12 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/locks"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var multisetPointSize = 32
|
||||
|
||||
type blockUTXODiffData struct {
|
||||
diff *UTXODiff
|
||||
diffChild *blockNode
|
||||
@@ -17,16 +15,16 @@ type blockUTXODiffData struct {
|
||||
|
||||
type utxoDiffStore struct {
|
||||
dag *BlockDAG
|
||||
dirty map[daghash.Hash]struct{}
|
||||
loaded map[daghash.Hash]*blockUTXODiffData
|
||||
dirty map[*blockNode]struct{}
|
||||
loaded map[*blockNode]*blockUTXODiffData
|
||||
mtx *locks.PriorityMutex
|
||||
}
|
||||
|
||||
func newUTXODiffStore(dag *BlockDAG) *utxoDiffStore {
|
||||
return &utxoDiffStore{
|
||||
dag: dag,
|
||||
dirty: make(map[daghash.Hash]struct{}),
|
||||
loaded: make(map[daghash.Hash]*blockUTXODiffData),
|
||||
dirty: make(map[*blockNode]struct{}),
|
||||
loaded: make(map[*blockNode]*blockUTXODiffData),
|
||||
mtx: locks.NewPriorityMutex(),
|
||||
}
|
||||
}
|
||||
@@ -35,16 +33,15 @@ func (diffStore *utxoDiffStore) setBlockDiff(node *blockNode, diff *UTXODiff) er
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
// load the diff data from DB to diffStore.loaded
|
||||
_, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
_, err := diffStore.diffDataByBlockNode(node)
|
||||
if dbaccess.IsNotFoundError(err) {
|
||||
diffStore.loaded[node] = &blockUTXODiffData{}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
diffStore.loaded[*node.hash] = &blockUTXODiffData{}
|
||||
}
|
||||
|
||||
diffStore.loaded[*node.hash].diff = diff
|
||||
diffStore.setBlockAsDirty(node.hash)
|
||||
diffStore.loaded[node].diff = diff
|
||||
diffStore.setBlockAsDirty(node)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -52,22 +49,19 @@ func (diffStore *utxoDiffStore) setBlockDiffChild(node *blockNode, diffChild *bl
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
// load the diff data from DB to diffStore.loaded
|
||||
_, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
_, err := diffStore.diffDataByBlockNode(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return diffNotFoundError(node)
|
||||
}
|
||||
|
||||
diffStore.loaded[*node.hash].diffChild = diffChild
|
||||
diffStore.setBlockAsDirty(node.hash)
|
||||
diffStore.loaded[node].diffChild = diffChild
|
||||
diffStore.setBlockAsDirty(node)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHashes []*daghash.Hash) error {
|
||||
for _, hash := range blockHashes {
|
||||
err := diffStore.removeBlockDiffData(dbTx, hash)
|
||||
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context, nodes []*blockNode) error {
|
||||
for _, node := range nodes {
|
||||
err := diffStore.removeBlockDiffData(dbContext, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -75,87 +69,64 @@ func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHash
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) removeBlockDiffData(dbTx database.Tx, blockHash *daghash.Hash) error {
|
||||
func (diffStore *utxoDiffStore) removeBlockDiffData(dbContext dbaccess.Context, node *blockNode) error {
|
||||
diffStore.mtx.LowPriorityWriteLock()
|
||||
defer diffStore.mtx.LowPriorityWriteUnlock()
|
||||
delete(diffStore.loaded, *blockHash)
|
||||
err := dbRemoveDiffData(dbTx, blockHash)
|
||||
delete(diffStore.loaded, node)
|
||||
err := dbaccess.RemoveDiffData(dbContext, node.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) setBlockAsDirty(blockHash *daghash.Hash) {
|
||||
diffStore.dirty[*blockHash] = struct{}{}
|
||||
func (diffStore *utxoDiffStore) setBlockAsDirty(node *blockNode) {
|
||||
diffStore.dirty[node] = struct{}{}
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, bool, error) {
|
||||
if diffData, ok := diffStore.loaded[*hash]; ok {
|
||||
return diffData, true, nil
|
||||
func (diffStore *utxoDiffStore) diffDataByBlockNode(node *blockNode) (*blockUTXODiffData, error) {
|
||||
if diffData, ok := diffStore.loaded[node]; ok {
|
||||
return diffData, nil
|
||||
}
|
||||
diffData, err := diffStore.diffDataFromDB(hash)
|
||||
diffData, err := diffStore.diffDataFromDB(node.hash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
exists := diffData != nil
|
||||
if exists {
|
||||
diffStore.loaded[*hash] = diffData
|
||||
}
|
||||
return diffData, exists, nil
|
||||
}
|
||||
|
||||
func diffNotFoundError(node *blockNode) error {
|
||||
return errors.Errorf("Couldn't find diff data for block %s", node.hash)
|
||||
diffStore.loaded[node] = diffData
|
||||
return diffData, nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) {
|
||||
diffStore.mtx.HighPriorityReadLock()
|
||||
defer diffStore.mtx.HighPriorityReadUnlock()
|
||||
diffData, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
diffData, err := diffStore.diffDataByBlockNode(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, diffNotFoundError(node)
|
||||
}
|
||||
return diffData.diff, nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffChildByNode(node *blockNode) (*blockNode, error) {
|
||||
diffStore.mtx.HighPriorityReadLock()
|
||||
defer diffStore.mtx.HighPriorityReadUnlock()
|
||||
diffData, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
diffData, err := diffStore.diffDataByBlockNode(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, diffNotFoundError(node)
|
||||
}
|
||||
return diffData.diffChild, nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffDataFromDB(hash *daghash.Hash) (*blockUTXODiffData, error) {
|
||||
var diffData *blockUTXODiffData
|
||||
err := diffStore.dag.db.View(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(utxoDiffsBucketName)
|
||||
serializedBlockDiffData := bucket.Get(hash[:])
|
||||
if serializedBlockDiffData != nil {
|
||||
var err error
|
||||
diffData, err = diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
serializedBlockDiffData, err := dbaccess.FetchUTXODiffData(diffStore.dag.databaseContext, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return diffData, nil
|
||||
|
||||
return diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty diff data to the database. If all writes
|
||||
// succeed, this clears the dirty set.
|
||||
func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
|
||||
// flushToDB writes all dirty diff data to the database.
|
||||
func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
if len(diffStore.dirty) == 0 {
|
||||
@@ -165,11 +136,10 @@ func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
|
||||
// Allocate a buffer here to avoid needless allocations/grows
|
||||
// while writing each entry.
|
||||
buffer := &bytes.Buffer{}
|
||||
for hash := range diffStore.dirty {
|
||||
hash := hash // Copy hash to a new variable to avoid passing the same pointer
|
||||
for node := range diffStore.dirty {
|
||||
buffer.Reset()
|
||||
diffData := diffStore.loaded[hash]
|
||||
err := dbStoreDiffData(dbTx, buffer, &hash, diffData)
|
||||
diffData := diffStore.loaded[node]
|
||||
err := storeDiffData(dbContext, buffer, node.hash, diffData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -178,31 +148,53 @@ func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) clearDirtyEntries() {
|
||||
diffStore.dirty = make(map[daghash.Hash]struct{})
|
||||
diffStore.dirty = make(map[*blockNode]struct{})
|
||||
}
|
||||
|
||||
// dbStoreDiffData stores the UTXO diff data to the database.
|
||||
// maxBlueScoreDifferenceToKeepLoaded is the maximum difference
|
||||
// between the virtual's blueScore and a blockNode's blueScore
|
||||
// under which to keep diff data loaded in memory.
|
||||
var maxBlueScoreDifferenceToKeepLoaded uint64 = 100
|
||||
|
||||
// clearOldEntries removes entries whose blue score is lower than
|
||||
// virtual.blueScore - maxBlueScoreDifferenceToKeepLoaded. Note
|
||||
// that tips are not removed either even if their blue score is
|
||||
// lower than the above.
|
||||
func (diffStore *utxoDiffStore) clearOldEntries() {
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
|
||||
virtualBlueScore := diffStore.dag.VirtualBlueScore()
|
||||
minBlueScore := virtualBlueScore - maxBlueScoreDifferenceToKeepLoaded
|
||||
if maxBlueScoreDifferenceToKeepLoaded > virtualBlueScore {
|
||||
minBlueScore = 0
|
||||
}
|
||||
|
||||
tips := diffStore.dag.virtual.tips()
|
||||
|
||||
toRemove := make(map[*blockNode]struct{})
|
||||
for node := range diffStore.loaded {
|
||||
if node.blueScore < minBlueScore && !tips.contains(node) {
|
||||
toRemove[node] = struct{}{}
|
||||
}
|
||||
}
|
||||
for node := range toRemove {
|
||||
delete(diffStore.loaded, node)
|
||||
}
|
||||
}
|
||||
|
||||
// storeDiffData stores the UTXO diff data to the database.
|
||||
// This overwrites the current entry if there exists one.
|
||||
func dbStoreDiffData(dbTx database.Tx, writeBuffer *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
|
||||
// To avoid a ton of allocs, use the given writeBuffer
|
||||
func storeDiffData(dbContext dbaccess.Context, w *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
|
||||
// To avoid a ton of allocs, use the io.Writer
|
||||
// instead of allocating one. We expect the buffer to
|
||||
// already be initalized and, in most cases, to already
|
||||
// already be initialized and, in most cases, to already
|
||||
// be large enough to accommodate the serialized data
|
||||
// without growing.
|
||||
err := serializeBlockUTXODiffData(writeBuffer, diffData)
|
||||
err := serializeBlockUTXODiffData(w, diffData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Bucket.Put doesn't copy on its own, so we manually
|
||||
// copy here. We do so because we expect the buffer
|
||||
// to be reused once we're done with it.
|
||||
serializedDiffData := make([]byte, writeBuffer.Len())
|
||||
copy(serializedDiffData, writeBuffer.Bytes())
|
||||
|
||||
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Put(hash[:], serializedDiffData)
|
||||
}
|
||||
|
||||
func dbRemoveDiffData(dbTx database.Tx, hash *daghash.Hash) error {
|
||||
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Delete(hash[:])
|
||||
return dbaccess.StoreUTXODiffData(dbContext, hash, w.Bytes())
|
||||
}
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
func TestUTXODiffStore(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -31,16 +31,19 @@ func TestUTXODiffStore(t *testing.T) {
|
||||
// Check that an error is returned when asking for non existing node
|
||||
nonExistingNode := createNode()
|
||||
_, err = dag.utxoDiffStore.diffByNode(nonExistingNode)
|
||||
expectedErrString := fmt.Sprintf("Couldn't find diff data for block %s", nonExistingNode.hash)
|
||||
if err == nil || err.Error() != expectedErrString {
|
||||
t.Errorf("diffByNode: expected error %s but got %s", expectedErrString, err)
|
||||
if !dbaccess.IsNotFoundError(err) {
|
||||
if err != nil {
|
||||
t.Errorf("diffByNode: %s", err)
|
||||
} else {
|
||||
t.Errorf("diffByNode: unexpectedly found diff data")
|
||||
}
|
||||
}
|
||||
|
||||
// Add node's diff data to the utxoDiffStore and check if it's checked correctly.
|
||||
node := createNode()
|
||||
diff := NewUTXODiff()
|
||||
diff.toAdd.add(wire.Outpoint{TxID: daghash.TxID{0x01}, Index: 0}, &UTXOEntry{amount: 1, scriptPubKey: []byte{0x01}})
|
||||
diff.toRemove.add(wire.Outpoint{TxID: daghash.TxID{0x02}, Index: 0}, &UTXOEntry{amount: 2, scriptPubKey: []byte{0x02}})
|
||||
diff.toAdd.add(domainmessage.Outpoint{TxID: daghash.TxID{0x01}, Index: 0}, &UTXOEntry{amount: 1, scriptPubKey: []byte{0x01}})
|
||||
diff.toRemove.add(domainmessage.Outpoint{TxID: daghash.TxID{0x02}, Index: 0}, &UTXOEntry{amount: 2, scriptPubKey: []byte{0x02}})
|
||||
if err := dag.utxoDiffStore.setBlockDiff(node, diff); err != nil {
|
||||
t.Fatalf("setBlockDiff: unexpected error: %s", err)
|
||||
}
|
||||
@@ -63,13 +66,20 @@ func TestUTXODiffStore(t *testing.T) {
|
||||
|
||||
// Flush changes to db, delete them from the dag.utxoDiffStore.loaded
|
||||
// map, and check if the diff data is re-fetched from the database.
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
return dag.utxoDiffStore.flushToDB(dbTx)
|
||||
})
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open database transaction: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.utxoDiffStore.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
|
||||
}
|
||||
delete(dag.utxoDiffStore.loaded, *node.hash)
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to commit database transaction: %s", err)
|
||||
}
|
||||
delete(dag.utxoDiffStore.loaded, node)
|
||||
|
||||
if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil {
|
||||
t.Fatalf("diffByNode: unexpected error: %s", err)
|
||||
@@ -78,9 +88,80 @@ func TestUTXODiffStore(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check if getBlockDiff caches the result in dag.utxoDiffStore.loaded
|
||||
if loadedDiffData, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok {
|
||||
if loadedDiffData, ok := dag.utxoDiffStore.loaded[node]; !ok {
|
||||
t.Errorf("the diff data wasn't added to loaded map after requesting it")
|
||||
} else if !reflect.DeepEqual(loadedDiffData.diff, diff) {
|
||||
t.Errorf("Expected diff and loadedDiff to be equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearOldEntries(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestClearOldEntries", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestClearOldEntries: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Set maxBlueScoreDifferenceToKeepLoaded to 10 to make this test fast to run
|
||||
currentDifference := maxBlueScoreDifferenceToKeepLoaded
|
||||
maxBlueScoreDifferenceToKeepLoaded = 10
|
||||
defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }()
|
||||
|
||||
// Add 10 blocks
|
||||
blockNodes := make([]*blockNode, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
processedBlock := PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
|
||||
|
||||
node, ok := dag.index.LookupNode(processedBlock.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash())
|
||||
}
|
||||
blockNodes[i] = node
|
||||
}
|
||||
|
||||
// Make sure that all of them exist in the loaded set
|
||||
for _, node := range blockNodes {
|
||||
_, ok := dag.utxoDiffStore.loaded[node]
|
||||
if !ok {
|
||||
t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash)
|
||||
}
|
||||
}
|
||||
|
||||
// Add 10 more blocks on top of the others
|
||||
for i := 0; i < 10; i++ {
|
||||
PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
|
||||
}
|
||||
|
||||
// Make sure that all the old nodes no longer exist in the loaded set
|
||||
for _, node := range blockNodes {
|
||||
_, ok := dag.utxoDiffStore.loaded[node]
|
||||
if ok {
|
||||
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
|
||||
}
|
||||
}
|
||||
|
||||
// Add a block on top of the genesis to force the retrieval of all diffData
|
||||
processedBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
node, ok := dag.index.LookupNode(processedBlock.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash())
|
||||
}
|
||||
|
||||
// Make sure that the child-of-genesis node is in the loaded set, since it
|
||||
// is a tip.
|
||||
_, ok = dag.utxoDiffStore.loaded[node]
|
||||
if !ok {
|
||||
t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash)
|
||||
}
|
||||
|
||||
// Make sure that all the old nodes still do not exist in the loaded set
|
||||
for _, node := range blockNodes {
|
||||
_, ok := dag.utxoDiffStore.loaded[node]
|
||||
if ok {
|
||||
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,15 +2,11 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/binaryserializer"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// serializeBlockUTXODiffData serializes diff data in the following format:
|
||||
@@ -21,12 +17,12 @@ import (
|
||||
// diff | UTXODiff | The diff data's diff
|
||||
func serializeBlockUTXODiffData(w io.Writer, diffData *blockUTXODiffData) error {
|
||||
hasDiffChild := diffData.diffChild != nil
|
||||
err := wire.WriteElement(w, hasDiffChild)
|
||||
err := domainmessage.WriteElement(w, hasDiffChild)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hasDiffChild {
|
||||
err := wire.WriteElement(w, diffData.diffChild.hash)
|
||||
err := domainmessage.WriteElement(w, diffData.diffChild.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -40,54 +36,31 @@ func serializeBlockUTXODiffData(w io.Writer, diffData *blockUTXODiffData) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// utxoEntryHeaderCode returns the calculated header code to be used when
|
||||
// serializing the provided utxo entry.
|
||||
func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
|
||||
// As described in the serialization format comments, the header code
|
||||
// encodes the blue score shifted over one bit and the block reward flag
|
||||
// in the lowest bit.
|
||||
headerCode := uint64(entry.BlockBlueScore()) << 1
|
||||
if entry.IsCoinbase() {
|
||||
headerCode |= 0x01
|
||||
}
|
||||
|
||||
return headerCode
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataBytes []byte) (*blockUTXODiffData, error) {
|
||||
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffData []byte) (*blockUTXODiffData, error) {
|
||||
diffData := &blockUTXODiffData{}
|
||||
serializedDiffData := bytes.NewBuffer(serializedDiffDataBytes)
|
||||
r := bytes.NewBuffer(serializedDiffData)
|
||||
|
||||
var hasDiffChild bool
|
||||
err := wire.ReadElement(serializedDiffData, &hasDiffChild)
|
||||
err := domainmessage.ReadElement(r, &hasDiffChild)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hasDiffChild {
|
||||
hash := &daghash.Hash{}
|
||||
err := wire.ReadElement(serializedDiffData, hash)
|
||||
err := domainmessage.ReadElement(r, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffData.diffChild = diffStore.dag.index.LookupNode(hash)
|
||||
|
||||
var ok bool
|
||||
diffData.diffChild, ok = diffStore.dag.index.LookupNode(hash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("block %s does not exist in the DAG", hash)
|
||||
}
|
||||
}
|
||||
|
||||
diffData.diff = &UTXODiff{
|
||||
useMultiset: true,
|
||||
}
|
||||
|
||||
diffData.diff.toAdd, err = deserializeDiffEntries(serializedDiffData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffData.diff.toRemove, err = deserializeDiffEntries(serializedDiffData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffData.diff.diffMultiset, err = deserializeMultiset(serializedDiffData)
|
||||
diffData.diff, err = deserializeUTXODiff(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -95,38 +68,31 @@ func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataB
|
||||
return diffData, nil
|
||||
}
|
||||
|
||||
func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
|
||||
count, err := wire.ReadVarInt(r)
|
||||
func deserializeUTXODiff(r io.Reader) (*UTXODiff, error) {
|
||||
diff := &UTXODiff{}
|
||||
|
||||
var err error
|
||||
diff.toAdd, err = deserializeUTXOCollection(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diff.toRemove, err = deserializeUTXOCollection(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
func deserializeUTXOCollection(r io.Reader) (utxoCollection, error) {
|
||||
count, err := domainmessage.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
collection := utxoCollection{}
|
||||
for i := uint64(0); i < count; i++ {
|
||||
outpointSize, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serializedOutpoint := make([]byte, outpointSize)
|
||||
err = binary.Read(r, byteOrder, serializedOutpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outpoint, err := deserializeOutpoint(serializedOutpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxoEntrySize, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serializedEntry := make([]byte, utxoEntrySize)
|
||||
err = binary.Read(r, byteOrder, serializedEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoEntry, err := deserializeUTXOEntry(serializedEntry)
|
||||
utxoEntry, outpoint, err := deserializeUTXO(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -135,31 +101,22 @@ func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
|
||||
return collection, nil
|
||||
}
|
||||
|
||||
// deserializeMultiset deserializes an EMCH multiset.
|
||||
// See serializeMultiset for more details.
|
||||
func deserializeMultiset(r io.Reader) (*ecc.Multiset, error) {
|
||||
xBytes := make([]byte, multisetPointSize)
|
||||
yBytes := make([]byte, multisetPointSize)
|
||||
err := binary.Read(r, byteOrder, xBytes)
|
||||
func deserializeUTXO(r io.Reader) (*UTXOEntry, *domainmessage.Outpoint, error) {
|
||||
outpoint, err := deserializeOutpoint(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
err = binary.Read(r, byteOrder, yBytes)
|
||||
|
||||
utxoEntry, err := deserializeUTXOEntry(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
var x, y big.Int
|
||||
x.SetBytes(xBytes)
|
||||
y.SetBytes(yBytes)
|
||||
return ecc.NewMultisetFromPoint(ecc.S256(), &x, &y), nil
|
||||
return utxoEntry, outpoint, nil
|
||||
}
|
||||
|
||||
// serializeUTXODiff serializes UTXODiff by serializing
|
||||
// UTXODiff.toAdd, UTXODiff.toRemove and UTXODiff.Multiset one after the other.
|
||||
func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
|
||||
if !diff.useMultiset {
|
||||
return errors.New("Cannot serialize a UTXO diff without a multiset")
|
||||
}
|
||||
err := serializeUTXOCollection(w, diff.toAdd)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -169,10 +126,7 @@ func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = serializeMultiset(w, diff.diffMultiset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -180,7 +134,7 @@ func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
|
||||
// the utxo entries and serializing them and their corresponding outpoint
|
||||
// prefixed by a varint that indicates their size.
|
||||
func serializeUTXOCollection(w io.Writer, collection utxoCollection) error {
|
||||
err := wire.WriteVarInt(w, uint64(len(collection)))
|
||||
err := domainmessage.WriteVarInt(w, uint64(len(collection)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -193,120 +147,93 @@ func serializeUTXOCollection(w io.Writer, collection utxoCollection) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// serializeMultiset serializes an ECMH multiset. The serialization
|
||||
// is done by taking the (x,y) coordinnates of the multiset point and
|
||||
// padding each one of them with 32 byte (it'll be 32 byte in most
|
||||
// cases anyway except one of the coordinates is zero) and writing
|
||||
// them one after the other.
|
||||
func serializeMultiset(w io.Writer, ms *ecc.Multiset) error {
|
||||
x, y := ms.Point()
|
||||
xBytes := make([]byte, multisetPointSize)
|
||||
copy(xBytes, x.Bytes())
|
||||
yBytes := make([]byte, multisetPointSize)
|
||||
copy(yBytes, y.Bytes())
|
||||
|
||||
err := binary.Write(w, byteOrder, xBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(w, byteOrder, yBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// serializeUTXO serializes a utxo entry-outpoint pair
|
||||
func serializeUTXO(w io.Writer, entry *UTXOEntry, outpoint *wire.Outpoint) error {
|
||||
serializedOutpoint := *outpointKey(*outpoint)
|
||||
err := wire.WriteVarInt(w, uint64(len(serializedOutpoint)))
|
||||
func serializeUTXO(w io.Writer, entry *UTXOEntry, outpoint *domainmessage.Outpoint) error {
|
||||
err := serializeOutpoint(w, outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = binary.Write(w, byteOrder, serializedOutpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serializedUTXOEntry := serializeUTXOEntry(entry)
|
||||
err = wire.WriteVarInt(w, uint64(len(serializedUTXOEntry)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(w, byteOrder, serializedUTXOEntry)
|
||||
err = serializeUTXOEntry(w, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// serializeUTXOEntry returns the entry serialized to a format that is suitable
|
||||
// for long-term storage. The format is described in detail above.
|
||||
func serializeUTXOEntry(entry *UTXOEntry) []byte {
|
||||
// Encode the header code.
|
||||
headerCode := utxoEntryHeaderCode(entry)
|
||||
// p2pkhUTXOEntrySerializeSize is the serialized size for a P2PKH UTXO entry.
|
||||
// 8 bytes (header code) + 8 bytes (amount) + varint for script pub key length of 25 (for P2PKH) + 25 bytes for P2PKH script.
|
||||
var p2pkhUTXOEntrySerializeSize = 8 + 8 + domainmessage.VarIntSerializeSize(25) + 25
|
||||
|
||||
// Calculate the size needed to serialize the entry.
|
||||
size := serializeSizeVLQ(headerCode) +
|
||||
compressedTxOutSize(uint64(entry.Amount()), entry.ScriptPubKey())
|
||||
|
||||
// Serialize the header code followed by the compressed unspent
|
||||
// transaction output.
|
||||
serialized := make([]byte, size)
|
||||
offset := putVLQ(serialized, headerCode)
|
||||
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
|
||||
entry.ScriptPubKey())
|
||||
|
||||
return serialized
|
||||
}
|
||||
|
||||
// deserializeOutpoint decodes an outpoint from the passed serialized byte
|
||||
// slice into a new wire.Outpoint using a format that is suitable for long-
|
||||
// term storage. this format is described in detail above.
|
||||
func deserializeOutpoint(serialized []byte) (*wire.Outpoint, error) {
|
||||
if len(serialized) <= daghash.HashSize {
|
||||
return nil, errDeserialize("unexpected end of data")
|
||||
}
|
||||
|
||||
txID := daghash.TxID{}
|
||||
txID.SetBytes(serialized[:daghash.HashSize])
|
||||
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
|
||||
return wire.NewOutpoint(&txID, uint32(index)), nil
|
||||
}
|
||||
|
||||
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
|
||||
// slice into a new UTXOEntry using a format that is suitable for long-term
|
||||
// storage. The format is described in detail above.
|
||||
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
|
||||
// Deserialize the header code.
|
||||
code, offset := deserializeVLQ(serialized)
|
||||
if offset >= len(serialized) {
|
||||
return nil, errDeserialize("unexpected end of data after header")
|
||||
}
|
||||
|
||||
// Decode the header code.
|
||||
//
|
||||
// Bit 0 indicates whether the containing transaction is a coinbase.
|
||||
// Bits 1-x encode blue score of the containing transaction.
|
||||
isCoinbase := code&0x01 != 0
|
||||
blockBlueScore := code >> 1
|
||||
|
||||
// Decode the compressed unspent transaction output.
|
||||
amount, scriptPubKey, _, err := decodeCompressedTxOut(serialized[offset:])
|
||||
// serializeUTXOEntry encodes the entry to the given io.Writer and use compression if useCompression is true.
|
||||
// The compression format is described in detail above.
|
||||
func serializeUTXOEntry(w io.Writer, entry *UTXOEntry) error {
|
||||
// Encode the blueScore.
|
||||
err := binaryserializer.PutUint64(w, byteOrder, entry.blockBlueScore)
|
||||
if err != nil {
|
||||
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
|
||||
"UTXO: %s", err))
|
||||
return err
|
||||
}
|
||||
|
||||
// Encode the packedFlags.
|
||||
err = binaryserializer.PutUint8(w, uint8(entry.packedFlags))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = binaryserializer.PutUint64(w, byteOrder, entry.Amount())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = domainmessage.WriteVarInt(w, uint64(len(entry.ScriptPubKey())))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.Write(entry.ScriptPubKey())
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deserializeUTXOEntry decodes a UTXO entry from the passed reader
|
||||
// into a new UTXOEntry. If isCompressed is used it will decompress
|
||||
// the entry according to the format that is described in detail
|
||||
// above.
|
||||
func deserializeUTXOEntry(r io.Reader) (*UTXOEntry, error) {
|
||||
// Deserialize the blueScore.
|
||||
blockBlueScore, err := binaryserializer.Uint64(r, byteOrder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Decode the packedFlags.
|
||||
packedFlags, err := binaryserializer.Uint8(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entry := &UTXOEntry{
|
||||
amount: amount,
|
||||
scriptPubKey: scriptPubKey,
|
||||
blockBlueScore: blockBlueScore,
|
||||
packedFlags: 0,
|
||||
packedFlags: txoFlags(packedFlags),
|
||||
}
|
||||
if isCoinbase {
|
||||
entry.packedFlags |= tfCoinbase
|
||||
|
||||
entry.amount, err = binaryserializer.Uint64(r, byteOrder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scriptPubKeyLen, err := domainmessage.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entry.scriptPubKey = make([]byte, scriptPubKeyLen)
|
||||
_, err = r.Read(entry.scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -78,7 +78,7 @@ const (
|
||||
)
|
||||
|
||||
// NewUTXOEntry creates a new utxoEntry representing the given txOut
|
||||
func NewUTXOEntry(txOut *wire.TxOut, isCoinbase bool, blockBlueScore uint64) *UTXOEntry {
|
||||
func NewUTXOEntry(txOut *domainmessage.TxOut, isCoinbase bool, blockBlueScore uint64) *UTXOEntry {
|
||||
entry := &UTXOEntry{
|
||||
amount: txOut.Value,
|
||||
scriptPubKey: txOut.ScriptPubKey,
|
||||
@@ -93,7 +93,7 @@ func NewUTXOEntry(txOut *wire.TxOut, isCoinbase bool, blockBlueScore uint64) *UT
|
||||
}
|
||||
|
||||
// utxoCollection represents a set of UTXOs indexed by their outpoints
|
||||
type utxoCollection map[wire.Outpoint]*UTXOEntry
|
||||
type utxoCollection map[domainmessage.Outpoint]*UTXOEntry
|
||||
|
||||
func (uc utxoCollection) String() string {
|
||||
utxoStrings := make([]string, len(uc))
|
||||
@@ -112,31 +112,31 @@ func (uc utxoCollection) String() string {
|
||||
}
|
||||
|
||||
// add adds a new UTXO entry to this collection
|
||||
func (uc utxoCollection) add(outpoint wire.Outpoint, entry *UTXOEntry) {
|
||||
func (uc utxoCollection) add(outpoint domainmessage.Outpoint, entry *UTXOEntry) {
|
||||
uc[outpoint] = entry
|
||||
}
|
||||
|
||||
// remove removes a UTXO entry from this collection if it exists
|
||||
func (uc utxoCollection) remove(outpoint wire.Outpoint) {
|
||||
func (uc utxoCollection) remove(outpoint domainmessage.Outpoint) {
|
||||
delete(uc, outpoint)
|
||||
}
|
||||
|
||||
// get returns the UTXOEntry represented by provided outpoint,
|
||||
// and a boolean value indicating if said UTXOEntry is in the set or not
|
||||
func (uc utxoCollection) get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
|
||||
func (uc utxoCollection) get(outpoint domainmessage.Outpoint) (*UTXOEntry, bool) {
|
||||
entry, ok := uc[outpoint]
|
||||
return entry, ok
|
||||
}
|
||||
|
||||
// contains returns a boolean value indicating whether a UTXO entry is in the set
|
||||
func (uc utxoCollection) contains(outpoint wire.Outpoint) bool {
|
||||
func (uc utxoCollection) contains(outpoint domainmessage.Outpoint) bool {
|
||||
_, ok := uc[outpoint]
|
||||
return ok
|
||||
}
|
||||
|
||||
// containsWithBlueScore returns a boolean value indicating whether a UTXOEntry
|
||||
// is in the set and its blue score is equal to the given blue score.
|
||||
func (uc utxoCollection) containsWithBlueScore(outpoint wire.Outpoint, blueScore uint64) bool {
|
||||
func (uc utxoCollection) containsWithBlueScore(outpoint domainmessage.Outpoint, blueScore uint64) bool {
|
||||
entry, ok := uc.get(outpoint)
|
||||
return ok && entry.blockBlueScore == blueScore
|
||||
}
|
||||
@@ -153,29 +153,16 @@ func (uc utxoCollection) clone() utxoCollection {
|
||||
|
||||
// UTXODiff represents a diff between two UTXO Sets.
|
||||
type UTXODiff struct {
|
||||
toAdd utxoCollection
|
||||
toRemove utxoCollection
|
||||
diffMultiset *ecc.Multiset
|
||||
useMultiset bool
|
||||
toAdd utxoCollection
|
||||
toRemove utxoCollection
|
||||
}
|
||||
|
||||
// NewUTXODiffWithoutMultiset creates a new, empty utxoDiff
|
||||
// NewUTXODiff creates a new, empty utxoDiff
|
||||
// without a multiset.
|
||||
func NewUTXODiffWithoutMultiset() *UTXODiff {
|
||||
return &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
useMultiset: false,
|
||||
}
|
||||
}
|
||||
|
||||
// NewUTXODiff creates a new, empty utxoDiff.
|
||||
func NewUTXODiff() *UTXODiff {
|
||||
return &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
useMultiset: true,
|
||||
diffMultiset: ecc.NewMultiset(ecc.S256()),
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,9 +196,8 @@ func NewUTXODiff() *UTXODiff {
|
||||
// diffFrom results in the UTXO being added to toAdd
|
||||
func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
|
||||
result := UTXODiff{
|
||||
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
|
||||
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
|
||||
useMultiset: d.useMultiset,
|
||||
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
|
||||
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
|
||||
}
|
||||
|
||||
// Note that the following cases are not accounted for, as they are impossible
|
||||
@@ -293,17 +279,12 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if d.useMultiset {
|
||||
// Create a new diffMultiset as the subtraction of the two diffs.
|
||||
result.diffMultiset = other.diffMultiset.Subtract(d.diffMultiset)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// WithDiffInPlace applies provided diff to this diff in-place, that would be the result if
|
||||
// withDiffInPlace applies provided diff to this diff in-place, that would be the result if
|
||||
// first d, and than diff were applied to the same base
|
||||
func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
|
||||
func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error {
|
||||
for outpoint, entryToRemove := range diff.toRemove {
|
||||
if d.toAdd.containsWithBlueScore(outpoint, entryToRemove.blockBlueScore) {
|
||||
// If already exists in toAdd with the same blueScore - remove from toAdd
|
||||
@@ -312,8 +293,8 @@ func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
|
||||
}
|
||||
if d.toRemove.contains(outpoint) {
|
||||
// If already exists - this is an error
|
||||
return ruleError(ErrWithDiff, fmt.Sprintf(
|
||||
"WithDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint))
|
||||
return errors.Errorf(
|
||||
"withDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint)
|
||||
}
|
||||
|
||||
// If not exists neither in toAdd nor in toRemove - add to toRemove
|
||||
@@ -324,9 +305,9 @@ func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
|
||||
if d.toRemove.containsWithBlueScore(outpoint, entryToAdd.blockBlueScore) {
|
||||
// If already exists in toRemove with the same blueScore - remove from toRemove
|
||||
if d.toAdd.contains(outpoint) && !diff.toRemove.contains(outpoint) {
|
||||
return ruleError(ErrWithDiff, fmt.Sprintf(
|
||||
"WithDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd with no "+
|
||||
"corresponding entry in diff.toRemove", outpoint))
|
||||
return errors.Errorf(
|
||||
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd with no "+
|
||||
"corresponding entry in diff.toRemove", outpoint)
|
||||
}
|
||||
d.toRemove.remove(outpoint)
|
||||
continue
|
||||
@@ -335,130 +316,35 @@ func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
|
||||
(existingEntry.blockBlueScore == entryToAdd.blockBlueScore ||
|
||||
!diff.toRemove.containsWithBlueScore(outpoint, existingEntry.blockBlueScore)) {
|
||||
// If already exists - this is an error
|
||||
return ruleError(ErrWithDiff, fmt.Sprintf(
|
||||
"WithDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint))
|
||||
return errors.Errorf(
|
||||
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint)
|
||||
}
|
||||
|
||||
// If not exists neither in toAdd nor in toRemove, or exists in toRemove with different blueScore - add to toAdd
|
||||
d.toAdd.add(outpoint, entryToAdd)
|
||||
}
|
||||
|
||||
// Apply diff.diffMultiset to d.diffMultiset
|
||||
if d.useMultiset {
|
||||
d.diffMultiset = d.diffMultiset.Union(diff.diffMultiset)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithDiff applies provided diff to this diff, creating a new utxoDiff, that would be the result if
|
||||
// first d, and than diff were applied to the same base
|
||||
//
|
||||
// WithDiff follows a set of rules represented by the following 3 by 3 table:
|
||||
//
|
||||
// | | this | |
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | | toAdd | toRemove | None
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// other | toAdd | X | - | toAdd
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | toRemove | - | X | toRemove
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | None | toAdd | toRemove | -
|
||||
//
|
||||
// Key:
|
||||
// - Don't add anything to the result
|
||||
// X Return an error
|
||||
// toAdd Add the UTXO into the toAdd collection of the result
|
||||
// toRemove Add the UTXO into the toRemove collection of the result
|
||||
//
|
||||
// Examples:
|
||||
// 1. This diff contains a UTXO in toAdd, and the other diff contains it in toRemove
|
||||
// WithDiff results in nothing being added
|
||||
// 2. This diff contains a UTXO in toRemove, and the other diff does not contain it
|
||||
// WithDiff results in the UTXO being added to toRemove
|
||||
// first d, and than diff were applied to some base
|
||||
func (d *UTXODiff) WithDiff(diff *UTXODiff) (*UTXODiff, error) {
|
||||
result := UTXODiff{
|
||||
toAdd: make(utxoCollection, len(d.toAdd)+len(diff.toAdd)),
|
||||
toRemove: make(utxoCollection, len(d.toRemove)+len(diff.toRemove)),
|
||||
useMultiset: d.useMultiset,
|
||||
clone := d.clone()
|
||||
|
||||
err := clone.withDiffInPlace(diff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// All transactions in d.toAdd:
|
||||
// If they are not in diff.toRemove - should be added in result.toAdd
|
||||
// If they are in diff.toAdd - should throw an error
|
||||
// Otherwise - should be ignored
|
||||
for outpoint, utxoEntry := range d.toAdd {
|
||||
if !diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
|
||||
result.toAdd.add(outpoint, utxoEntry)
|
||||
}
|
||||
if diffEntry, ok := diff.toAdd.get(outpoint); ok {
|
||||
// An exception is made for entries with unequal blue scores
|
||||
// as long as the appropriate entry exists in either d.toRemove
|
||||
// or diff.toRemove.
|
||||
// These are just "updates" to accepted blue score
|
||||
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
|
||||
diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
|
||||
continue
|
||||
}
|
||||
return nil, ruleError(ErrWithDiff, fmt.Sprintf("WithDiff: outpoint %s both in d.toAdd and in other.toAdd", outpoint))
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in d.toRemove:
|
||||
// If they are not in diff.toAdd - should be added in result.toRemove
|
||||
// If they are in diff.toRemove - should throw an error
|
||||
// Otherwise - should be ignored
|
||||
for outpoint, utxoEntry := range d.toRemove {
|
||||
if !diff.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
|
||||
result.toRemove.add(outpoint, utxoEntry)
|
||||
}
|
||||
if diffEntry, ok := diff.toRemove.get(outpoint); ok {
|
||||
// An exception is made for entries with unequal blue scores
|
||||
// as long as the appropriate entry exists in either d.toAdd
|
||||
// or diff.toAdd.
|
||||
// These are just "updates" to accepted blue score
|
||||
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
|
||||
d.toAdd.containsWithBlueScore(outpoint, diffEntry.blockBlueScore) {
|
||||
continue
|
||||
}
|
||||
return nil, ruleError(ErrWithDiff, "WithDiff: outpoint both in d.toRemove and in other.toRemove")
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in diff.toAdd:
|
||||
// If they are not in d.toRemove - should be added in result.toAdd
|
||||
for outpoint, utxoEntry := range diff.toAdd {
|
||||
if !d.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
|
||||
result.toAdd.add(outpoint, utxoEntry)
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in diff.toRemove:
|
||||
// If they are not in d.toAdd - should be added in result.toRemove
|
||||
for outpoint, utxoEntry := range diff.toRemove {
|
||||
if !d.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
|
||||
result.toRemove.add(outpoint, utxoEntry)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply diff.diffMultiset to d.diffMultiset
|
||||
if d.useMultiset {
|
||||
result.diffMultiset = d.diffMultiset.Union(diff.diffMultiset)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
return clone, nil
|
||||
}
|
||||
|
||||
// clone returns a clone of this utxoDiff
|
||||
func (d *UTXODiff) clone() *UTXODiff {
|
||||
clone := &UTXODiff{
|
||||
toAdd: d.toAdd.clone(),
|
||||
toRemove: d.toRemove.clone(),
|
||||
useMultiset: d.useMultiset,
|
||||
}
|
||||
if d.useMultiset {
|
||||
clone.diffMultiset = d.diffMultiset.Clone()
|
||||
toAdd: d.toAdd.clone(),
|
||||
toRemove: d.toRemove.clone(),
|
||||
}
|
||||
return clone
|
||||
}
|
||||
@@ -467,7 +353,7 @@ func (d *UTXODiff) clone() *UTXODiff {
|
||||
//
|
||||
// If d.useMultiset is true, this function MUST be
|
||||
// called with the DAG lock held.
|
||||
func (d *UTXODiff) AddEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
|
||||
func (d *UTXODiff) AddEntry(outpoint domainmessage.Outpoint, entry *UTXOEntry) error {
|
||||
if d.toRemove.containsWithBlueScore(outpoint, entry.blockBlueScore) {
|
||||
d.toRemove.remove(outpoint)
|
||||
} else if _, exists := d.toAdd[outpoint]; exists {
|
||||
@@ -475,14 +361,6 @@ func (d *UTXODiff) AddEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
|
||||
} else {
|
||||
d.toAdd.add(outpoint, entry)
|
||||
}
|
||||
|
||||
if d.useMultiset {
|
||||
newMs, err := addUTXOToMultiset(d.diffMultiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.diffMultiset = newMs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -490,7 +368,7 @@ func (d *UTXODiff) AddEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
|
||||
//
|
||||
// If d.useMultiset is true, this function MUST be
|
||||
// called with the DAG lock held.
|
||||
func (d *UTXODiff) RemoveEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
|
||||
func (d *UTXODiff) RemoveEntry(outpoint domainmessage.Outpoint, entry *UTXOEntry) error {
|
||||
if d.toAdd.containsWithBlueScore(outpoint, entry.blockBlueScore) {
|
||||
d.toAdd.remove(outpoint)
|
||||
} else if _, exists := d.toRemove[outpoint]; exists {
|
||||
@@ -498,21 +376,10 @@ func (d *UTXODiff) RemoveEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
|
||||
} else {
|
||||
d.toRemove.add(outpoint, entry)
|
||||
}
|
||||
|
||||
if d.useMultiset {
|
||||
newMs, err := removeUTXOFromMultiset(d.diffMultiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.diffMultiset = newMs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d UTXODiff) String() string {
|
||||
if d.useMultiset {
|
||||
return fmt.Sprintf("toAdd: %s; toRemove: %s, Multiset-Hash: %s", d.toAdd, d.toRemove, d.diffMultiset.Hash())
|
||||
}
|
||||
return fmt.Sprintf("toAdd: %s; toRemove: %s", d.toAdd, d.toRemove)
|
||||
}
|
||||
|
||||
@@ -532,97 +399,27 @@ type UTXOSet interface {
|
||||
fmt.Stringer
|
||||
diffFrom(other UTXOSet) (*UTXODiff, error)
|
||||
WithDiff(utxoDiff *UTXODiff) (UTXOSet, error)
|
||||
diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error)
|
||||
diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error)
|
||||
AddTx(tx *wire.MsgTx, blockBlueScore uint64) (ok bool, err error)
|
||||
AddTx(tx *domainmessage.MsgTx, blockBlueScore uint64) (ok bool, err error)
|
||||
clone() UTXOSet
|
||||
Get(outpoint wire.Outpoint) (*UTXOEntry, bool)
|
||||
Multiset() *ecc.Multiset
|
||||
WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error)
|
||||
}
|
||||
|
||||
// diffFromTx is a common implementation for diffFromTx, that works
|
||||
// for both diff-based and full UTXO sets
|
||||
// Returns a diff that is equivalent to provided transaction,
|
||||
// or an error if provided transaction is not valid in the context of this UTXOSet
|
||||
func diffFromTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
diff := NewUTXODiff()
|
||||
isCoinbase := tx.IsCoinBase()
|
||||
if !isCoinbase {
|
||||
for _, txIn := range tx.TxIn {
|
||||
if entry, ok := u.Get(txIn.PreviousOutpoint); ok {
|
||||
err := diff.RemoveEntry(txIn.PreviousOutpoint, entry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, ruleError(ErrMissingTxOut, fmt.Sprintf(
|
||||
"Transaction %s is invalid because spends outpoint %s that is not in utxo set",
|
||||
tx.TxID(), txIn.PreviousOutpoint))
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, txOut := range tx.TxOut {
|
||||
entry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore)
|
||||
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
|
||||
err := diff.AddEntry(outpoint, entry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
// diffFromAcceptedTx is a common implementation for diffFromAcceptedTx, that works
|
||||
// for both diff-based and full UTXO sets.
|
||||
// Returns a diff that replaces an entry's blockBlueScore with the given acceptingBlueScore.
|
||||
// Returns an error if the provided transaction's entry is not valid in the context
|
||||
// of this UTXOSet.
|
||||
func diffFromAcceptedTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
diff := NewUTXODiff()
|
||||
isCoinbase := tx.IsCoinBase()
|
||||
for i, txOut := range tx.TxOut {
|
||||
// Fetch any unaccepted transaction
|
||||
existingOutpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
|
||||
existingEntry, ok := u.Get(existingOutpoint)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("cannot accept outpoint %s because it doesn't exist in the given UTXO", existingOutpoint)
|
||||
}
|
||||
|
||||
// Remove unaccepted entries
|
||||
err := diff.RemoveEntry(existingOutpoint, existingEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add new entries with their accepting blue score
|
||||
newEntry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore)
|
||||
err = diff.AddEntry(existingOutpoint, newEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return diff, nil
|
||||
Get(outpoint domainmessage.Outpoint) (*UTXOEntry, bool)
|
||||
}
|
||||
|
||||
// FullUTXOSet represents a full list of transaction outputs and their values
|
||||
type FullUTXOSet struct {
|
||||
utxoCollection
|
||||
UTXOMultiset *ecc.Multiset
|
||||
}
|
||||
|
||||
// NewFullUTXOSet creates a new utxoSet with full list of transaction outputs and their values
|
||||
func NewFullUTXOSet() *FullUTXOSet {
|
||||
return &FullUTXOSet{
|
||||
utxoCollection: utxoCollection{},
|
||||
UTXOMultiset: ecc.NewMultiset(ecc.S256()),
|
||||
}
|
||||
}
|
||||
|
||||
// newFullUTXOSetFromUTXOCollection converts a utxoCollection to a FullUTXOSet
|
||||
func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet, error) {
|
||||
var err error
|
||||
multiset := ecc.NewMultiset(ecc.S256())
|
||||
multiset := secp256k1.NewMultiset()
|
||||
for outpoint, utxoEntry := range collection {
|
||||
multiset, err = addUTXOToMultiset(multiset, utxoEntry, &outpoint)
|
||||
if err != nil {
|
||||
@@ -631,7 +428,6 @@ func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet,
|
||||
}
|
||||
return &FullUTXOSet{
|
||||
utxoCollection: collection,
|
||||
UTXOMultiset: multiset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -660,44 +456,28 @@ func (fus *FullUTXOSet) WithDiff(other *UTXODiff) (UTXOSet, error) {
|
||||
// necessarily means there's an error).
|
||||
//
|
||||
// This function MUST be called with the DAG lock held.
|
||||
func (fus *FullUTXOSet) AddTx(tx *wire.MsgTx, blueScore uint64) (isAccepted bool, err error) {
|
||||
isCoinbase := tx.IsCoinBase()
|
||||
if !isCoinbase {
|
||||
if !fus.containsInputs(tx) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, txIn := range tx.TxIn {
|
||||
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
err := fus.removeAndUpdateMultiset(outpoint)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
func (fus *FullUTXOSet) AddTx(tx *domainmessage.MsgTx, blueScore uint64) (isAccepted bool, err error) {
|
||||
if !fus.containsInputs(tx) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for i, txOut := range tx.TxOut {
|
||||
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
|
||||
entry := NewUTXOEntry(txOut, isCoinbase, blueScore)
|
||||
for _, txIn := range tx.TxIn {
|
||||
fus.remove(txIn.PreviousOutpoint)
|
||||
}
|
||||
|
||||
err := fus.addAndUpdateMultiset(outpoint, entry)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isCoinbase := tx.IsCoinBase()
|
||||
for i, txOut := range tx.TxOut {
|
||||
outpoint := *domainmessage.NewOutpoint(tx.TxID(), uint32(i))
|
||||
entry := NewUTXOEntry(txOut, isCoinbase, blueScore)
|
||||
fus.add(outpoint, entry)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// diffFromTx returns a diff that is equivalent to provided transaction,
|
||||
// or an error if provided transaction is not valid in the context of this UTXOSet
|
||||
func (fus *FullUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
return diffFromTx(fus, tx, acceptingBlueScore)
|
||||
}
|
||||
|
||||
func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool {
|
||||
func (fus *FullUTXOSet) containsInputs(tx *domainmessage.MsgTx) bool {
|
||||
for _, txIn := range tx.TxIn {
|
||||
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
outpoint := *domainmessage.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
if !fus.contains(outpoint) {
|
||||
return false
|
||||
}
|
||||
@@ -706,70 +486,17 @@ func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (fus *FullUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
return diffFromAcceptedTx(fus, tx, acceptingBlueScore)
|
||||
}
|
||||
|
||||
// clone returns a clone of this utxoSet
|
||||
func (fus *FullUTXOSet) clone() UTXOSet {
|
||||
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone(), UTXOMultiset: fus.UTXOMultiset.Clone()}
|
||||
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone()}
|
||||
}
|
||||
|
||||
// Get returns the UTXOEntry associated with the given Outpoint, and a boolean indicating if such entry was found
|
||||
func (fus *FullUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
|
||||
func (fus *FullUTXOSet) Get(outpoint domainmessage.Outpoint) (*UTXOEntry, bool) {
|
||||
utxoEntry, ok := fus.utxoCollection[outpoint]
|
||||
return utxoEntry, ok
|
||||
}
|
||||
|
||||
// Multiset returns the ecmh-Multiset of this utxoSet
|
||||
func (fus *FullUTXOSet) Multiset() *ecc.Multiset {
|
||||
return fus.UTXOMultiset
|
||||
}
|
||||
|
||||
// addAndUpdateMultiset adds a UTXOEntry to this utxoSet and updates its multiset accordingly
|
||||
func (fus *FullUTXOSet) addAndUpdateMultiset(outpoint wire.Outpoint, entry *UTXOEntry) error {
|
||||
fus.add(outpoint, entry)
|
||||
newMs, err := addUTXOToMultiset(fus.UTXOMultiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fus.UTXOMultiset = newMs
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeAndUpdateMultiset removes a UTXOEntry from this utxoSet and updates its multiset accordingly
|
||||
func (fus *FullUTXOSet) removeAndUpdateMultiset(outpoint wire.Outpoint) error {
|
||||
entry, ok := fus.Get(outpoint)
|
||||
if !ok {
|
||||
return errors.Errorf("Couldn't find outpoint %s", outpoint)
|
||||
}
|
||||
fus.remove(outpoint)
|
||||
var err error
|
||||
newMs, err := removeUTXOFromMultiset(fus.UTXOMultiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fus.UTXOMultiset = newMs
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithTransactions returns a new UTXO Set with the added transactions.
|
||||
//
|
||||
// This function MUST be called with the DAG lock held.
|
||||
func (fus *FullUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
|
||||
diffSet := NewDiffUTXOSet(fus, NewUTXODiff())
|
||||
for _, tx := range transactions {
|
||||
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ignoreDoubleSpends && !isAccepted {
|
||||
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
|
||||
}
|
||||
}
|
||||
return UTXOSet(diffSet), nil
|
||||
}
|
||||
|
||||
// DiffUTXOSet represents a utxoSet with a base fullUTXOSet and a UTXODiff
|
||||
type DiffUTXOSet struct {
|
||||
base *FullUTXOSet
|
||||
@@ -813,13 +540,12 @@ func (dus *DiffUTXOSet) WithDiff(other *UTXODiff) (UTXOSet, error) {
|
||||
//
|
||||
// If dus.UTXODiff.useMultiset is true, this function MUST be
|
||||
// called with the DAG lock held.
|
||||
func (dus *DiffUTXOSet) AddTx(tx *wire.MsgTx, blockBlueScore uint64) (bool, error) {
|
||||
isCoinbase := tx.IsCoinBase()
|
||||
if !isCoinbase && !dus.containsInputs(tx) {
|
||||
func (dus *DiffUTXOSet) AddTx(tx *domainmessage.MsgTx, blockBlueScore uint64) (bool, error) {
|
||||
if !dus.containsInputs(tx) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err := dus.appendTx(tx, blockBlueScore, isCoinbase)
|
||||
err := dus.appendTx(tx, blockBlueScore)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -827,23 +553,21 @@ func (dus *DiffUTXOSet) AddTx(tx *wire.MsgTx, blockBlueScore uint64) (bool, erro
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) appendTx(tx *wire.MsgTx, blockBlueScore uint64, isCoinbase bool) error {
|
||||
if !isCoinbase {
|
||||
for _, txIn := range tx.TxIn {
|
||||
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
entry, ok := dus.Get(outpoint)
|
||||
if !ok {
|
||||
return errors.Errorf("Couldn't find entry for outpoint %s", outpoint)
|
||||
}
|
||||
err := dus.UTXODiff.RemoveEntry(outpoint, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (dus *DiffUTXOSet) appendTx(tx *domainmessage.MsgTx, blockBlueScore uint64) error {
|
||||
for _, txIn := range tx.TxIn {
|
||||
entry, ok := dus.Get(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
return errors.Errorf("couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
|
||||
}
|
||||
err := dus.UTXODiff.RemoveEntry(txIn.PreviousOutpoint, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
isCoinbase := tx.IsCoinBase()
|
||||
for i, txOut := range tx.TxOut {
|
||||
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
|
||||
outpoint := *domainmessage.NewOutpoint(tx.TxID(), uint32(i))
|
||||
entry := NewUTXOEntry(txOut, isCoinbase, blockBlueScore)
|
||||
|
||||
err := dus.UTXODiff.AddEntry(outpoint, entry)
|
||||
@@ -854,9 +578,9 @@ func (dus *DiffUTXOSet) appendTx(tx *wire.MsgTx, blockBlueScore uint64, isCoinba
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) containsInputs(tx *wire.MsgTx) bool {
|
||||
func (dus *DiffUTXOSet) containsInputs(tx *domainmessage.MsgTx) bool {
|
||||
for _, txIn := range tx.TxIn {
|
||||
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
outpoint := *domainmessage.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
isInBase := dus.base.contains(outpoint)
|
||||
isInDiffToAdd := dus.UTXODiff.toAdd.contains(outpoint)
|
||||
isInDiffToRemove := dus.UTXODiff.toRemove.contains(outpoint)
|
||||
@@ -881,31 +605,12 @@ func (dus *DiffUTXOSet) meldToBase() error {
|
||||
for outpoint, utxoEntry := range dus.UTXODiff.toAdd {
|
||||
dus.base.add(outpoint, utxoEntry)
|
||||
}
|
||||
|
||||
if dus.UTXODiff.useMultiset {
|
||||
dus.base.UTXOMultiset = dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
|
||||
}
|
||||
|
||||
if dus.UTXODiff.useMultiset {
|
||||
dus.UTXODiff = NewUTXODiff()
|
||||
} else {
|
||||
dus.UTXODiff = NewUTXODiffWithoutMultiset()
|
||||
}
|
||||
dus.UTXODiff = NewUTXODiff()
|
||||
return nil
|
||||
}
|
||||
|
||||
// diffFromTx returns a diff that is equivalent to provided transaction,
|
||||
// or an error if provided transaction is not valid in the context of this UTXOSet
|
||||
func (dus *DiffUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
return diffFromTx(dus, tx, acceptingBlueScore)
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
return diffFromAcceptedTx(dus, tx, acceptingBlueScore)
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) String() string {
|
||||
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s, Multiset-Hash:%s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove, dus.Multiset().Hash())
|
||||
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove)
|
||||
}
|
||||
|
||||
// clone returns a clone of this UTXO Set
|
||||
@@ -913,9 +618,15 @@ func (dus *DiffUTXOSet) clone() UTXOSet {
|
||||
return NewDiffUTXOSet(dus.base.clone().(*FullUTXOSet), dus.UTXODiff.clone())
|
||||
}
|
||||
|
||||
// cloneWithoutBase returns a *DiffUTXOSet with same
|
||||
// base as this *DiffUTXOSet and a cloned diff.
|
||||
func (dus *DiffUTXOSet) cloneWithoutBase() UTXOSet {
|
||||
return NewDiffUTXOSet(dus.base, dus.UTXODiff.clone())
|
||||
}
|
||||
|
||||
// Get returns the UTXOEntry associated with provided outpoint in this UTXOSet.
|
||||
// Returns false in second output if this UTXOEntry was not found
|
||||
func (dus *DiffUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
|
||||
func (dus *DiffUTXOSet) Get(outpoint domainmessage.Outpoint) (*UTXOEntry, bool) {
|
||||
if toRemoveEntry, ok := dus.UTXODiff.toRemove.get(outpoint); ok {
|
||||
// An exception is made for entries with unequal blue scores
|
||||
// These are just "updates" to accepted blue score
|
||||
@@ -930,42 +641,3 @@ func (dus *DiffUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
|
||||
txOut, ok := dus.UTXODiff.toAdd.get(outpoint)
|
||||
return txOut, ok
|
||||
}
|
||||
|
||||
// Multiset returns the ecmh-Multiset of this utxoSet
|
||||
func (dus *DiffUTXOSet) Multiset() *ecc.Multiset {
|
||||
return dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
|
||||
}
|
||||
|
||||
// WithTransactions returns a new UTXO Set with the added transactions.
|
||||
//
|
||||
// If dus.UTXODiff.useMultiset is true, this function MUST be
|
||||
// called with the DAG lock held.
|
||||
func (dus *DiffUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
|
||||
diffSet := NewDiffUTXOSet(dus.base, dus.UTXODiff.clone())
|
||||
for _, tx := range transactions {
|
||||
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ignoreDoubleSpends && !isAccepted {
|
||||
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
|
||||
}
|
||||
}
|
||||
return UTXOSet(diffSet), nil
|
||||
}
|
||||
|
||||
func addUTXOToMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
|
||||
utxoMS, err := utxoMultiset(entry, outpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ms.Union(utxoMS), nil
|
||||
}
|
||||
|
||||
func removeUTXOFromMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
|
||||
utxoMS, err := utxoMultiset(entry, outpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ms.Subtract(utxoMS), nil
|
||||
}
|
||||
|
||||
@@ -1,25 +1,23 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// TestUTXOCollection makes sure that utxoCollection cloning and string representations work as expected.
|
||||
func TestUTXOCollection(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outpoint0 := *wire.NewOutpoint(txID0, 0)
|
||||
outpoint1 := *wire.NewOutpoint(txID1, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 20}, false, 1)
|
||||
outpoint0 := *domainmessage.NewOutpoint(txID0, 0)
|
||||
outpoint1 := *domainmessage.NewOutpoint(txID1, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 20}, false, 1)
|
||||
|
||||
// For each of the following test cases, we will:
|
||||
// .String() the given collection and compare it to expectedStringWithMultiset
|
||||
@@ -75,54 +73,45 @@ func TestUTXOCollection(t *testing.T) {
|
||||
func TestUTXODiff(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outpoint0 := *wire.NewOutpoint(txID0, 0)
|
||||
outpoint1 := *wire.NewOutpoint(txID1, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 20}, false, 1)
|
||||
outpoint0 := *domainmessage.NewOutpoint(txID0, 0)
|
||||
outpoint1 := *domainmessage.NewOutpoint(txID1, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 20}, false, 1)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
withMultiset := i == 0
|
||||
// Test utxoDiff creation
|
||||
var diff *UTXODiff
|
||||
if withMultiset {
|
||||
diff = NewUTXODiff()
|
||||
} else {
|
||||
diff = NewUTXODiffWithoutMultiset()
|
||||
}
|
||||
if len(diff.toAdd) != 0 || len(diff.toRemove) != 0 {
|
||||
t.Errorf("new diff is not empty")
|
||||
}
|
||||
// Test utxoDiff creation
|
||||
|
||||
err := diff.AddEntry(outpoint0, utxoEntry0)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding entry to utxo diff: %s", err)
|
||||
}
|
||||
diff := NewUTXODiff()
|
||||
|
||||
err = diff.RemoveEntry(outpoint1, utxoEntry1)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding entry to utxo diff: %s", err)
|
||||
}
|
||||
if len(diff.toAdd) != 0 || len(diff.toRemove) != 0 {
|
||||
t.Errorf("new diff is not empty")
|
||||
}
|
||||
|
||||
// Test utxoDiff cloning
|
||||
clonedDiff := diff.clone()
|
||||
if clonedDiff == diff {
|
||||
t.Errorf("cloned diff is reference-equal to the original")
|
||||
}
|
||||
if !reflect.DeepEqual(clonedDiff, diff) {
|
||||
t.Errorf("cloned diff not equal to the original"+
|
||||
"Original: \"%v\", cloned: \"%v\".", diff, clonedDiff)
|
||||
}
|
||||
err := diff.AddEntry(outpoint0, utxoEntry0)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding entry to utxo diff: %s", err)
|
||||
}
|
||||
|
||||
// Test utxoDiff string representation
|
||||
expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ]"
|
||||
if withMultiset {
|
||||
expectedDiffString = "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], Multiset-Hash: 7cb61e48005b0c817211d04589d719bff87d86a6a6ce2454515f57265382ded7"
|
||||
}
|
||||
diffString := clonedDiff.String()
|
||||
if diffString != expectedDiffString {
|
||||
t.Errorf("unexpected diff string. "+
|
||||
"Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString)
|
||||
}
|
||||
err = diff.RemoveEntry(outpoint1, utxoEntry1)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding entry to utxo diff: %s", err)
|
||||
}
|
||||
|
||||
// Test utxoDiff cloning
|
||||
clonedDiff := diff.clone()
|
||||
if clonedDiff == diff {
|
||||
t.Errorf("cloned diff is reference-equal to the original")
|
||||
}
|
||||
if !reflect.DeepEqual(clonedDiff, diff) {
|
||||
t.Errorf("cloned diff not equal to the original"+
|
||||
"Original: \"%v\", cloned: \"%v\".", diff, clonedDiff)
|
||||
}
|
||||
|
||||
// Test utxoDiff string representation
|
||||
expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ]"
|
||||
diffString := clonedDiff.String()
|
||||
if diffString != expectedDiffString {
|
||||
t.Errorf("unexpected diff string. "+
|
||||
"Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,14 +119,14 @@ func TestUTXODiff(t *testing.T) {
|
||||
// Each test case represents a cell in the two tables outlined in the documentation for utxoDiff.
|
||||
func TestUTXODiffRules(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
outpoint0 := *wire.NewOutpoint(txID0, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 10)
|
||||
utxoEntry2 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 20)
|
||||
outpoint0 := *domainmessage.NewOutpoint(txID0, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 10)
|
||||
utxoEntry2 := NewUTXOEntry(&domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 20)
|
||||
|
||||
// For each of the following test cases, we will:
|
||||
// this.diffFrom(other) and compare it to expectedDiffFromResult
|
||||
// this.WithDiff(other) and compare it to expectedWithDiffResult
|
||||
// this.WithDiffInPlace(other) and compare it to expectedWithDiffResult
|
||||
// this.withDiffInPlace(other) and compare it to expectedWithDiffResult
|
||||
//
|
||||
// Note: an expected nil result means that we expect the respective operation to fail
|
||||
// See the following spreadsheet for a summary of all test-cases:
|
||||
@@ -542,171 +531,115 @@ func TestUTXODiffRules(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
this := addMultisetToDiff(t, test.this)
|
||||
other := addMultisetToDiff(t, test.other)
|
||||
expectedDiffFromResult := addMultisetToDiff(t, test.expectedDiffFromResult)
|
||||
expectedWithDiffResult := addMultisetToDiff(t, test.expectedWithDiffResult)
|
||||
|
||||
// diffFrom from this to other
|
||||
diffResult, err := this.diffFrom(other)
|
||||
// diffFrom from test.this to test.other
|
||||
diffResult, err := test.this.diffFrom(test.other)
|
||||
|
||||
// Test whether diffFrom returned an error
|
||||
isDiffFromOk := err == nil
|
||||
expectedIsDiffFromOk := expectedDiffFromResult != nil
|
||||
expectedIsDiffFromOk := test.expectedDiffFromResult != nil
|
||||
if isDiffFromOk != expectedIsDiffFromOk {
|
||||
t.Errorf("unexpected diffFrom error in test \"%s\". "+
|
||||
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsDiffFromOk, isDiffFromOk)
|
||||
}
|
||||
|
||||
// If not error, test the diffFrom result
|
||||
if isDiffFromOk && !expectedDiffFromResult.equal(diffResult) {
|
||||
if isDiffFromOk && !test.expectedDiffFromResult.equal(diffResult) {
|
||||
t.Errorf("unexpected diffFrom result in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, expectedDiffFromResult, diffResult)
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedDiffFromResult, diffResult)
|
||||
}
|
||||
|
||||
// Make sure that WithDiff after diffFrom results in the original other
|
||||
// Make sure that WithDiff after diffFrom results in the original test.other
|
||||
if isDiffFromOk {
|
||||
otherResult, err := this.WithDiff(diffResult)
|
||||
otherResult, err := test.this.WithDiff(diffResult)
|
||||
if err != nil {
|
||||
t.Errorf("WithDiff unexpectedly failed in test \"%s\": %s", test.name, err)
|
||||
}
|
||||
if !other.equal(otherResult) {
|
||||
if !test.other.equal(otherResult) {
|
||||
t.Errorf("unexpected WithDiff result in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, other, otherResult)
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult)
|
||||
}
|
||||
}
|
||||
|
||||
// WithDiff from this to other
|
||||
withDiffResult, err := this.WithDiff(other)
|
||||
// WithDiff from test.this to test.other
|
||||
withDiffResult, err := test.this.WithDiff(test.other)
|
||||
|
||||
// Test whether WithDiff returned an error
|
||||
isWithDiffOk := err == nil
|
||||
expectedIsWithDiffOk := expectedWithDiffResult != nil
|
||||
expectedIsWithDiffOk := test.expectedWithDiffResult != nil
|
||||
if isWithDiffOk != expectedIsWithDiffOk {
|
||||
t.Errorf("unexpected WithDiff error in test \"%s\". "+
|
||||
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffOk, isWithDiffOk)
|
||||
}
|
||||
|
||||
// If not error, test the WithDiff result
|
||||
if isWithDiffOk && !withDiffResult.equal(expectedWithDiffResult) {
|
||||
if isWithDiffOk && !withDiffResult.equal(test.expectedWithDiffResult) {
|
||||
t.Errorf("unexpected WithDiff result in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, expectedWithDiffResult, withDiffResult)
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, withDiffResult)
|
||||
}
|
||||
|
||||
// Repeat WithDiff check this time using WithDiffInPlace
|
||||
thisClone := this.clone()
|
||||
err = thisClone.WithDiffInPlace(other)
|
||||
// Repeat WithDiff check test.this time using withDiffInPlace
|
||||
thisClone := test.this.clone()
|
||||
err = thisClone.withDiffInPlace(test.other)
|
||||
|
||||
// Test whether WithDiffInPlace returned an error
|
||||
// Test whether withDiffInPlace returned an error
|
||||
isWithDiffInPlaceOk := err == nil
|
||||
expectedIsWithDiffInPlaceOk := expectedWithDiffResult != nil
|
||||
expectedIsWithDiffInPlaceOk := test.expectedWithDiffResult != nil
|
||||
if isWithDiffInPlaceOk != expectedIsWithDiffInPlaceOk {
|
||||
t.Errorf("unexpected WithDiffInPlace error in test \"%s\". "+
|
||||
t.Errorf("unexpected withDiffInPlace error in test \"%s\". "+
|
||||
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffInPlaceOk, isWithDiffInPlaceOk)
|
||||
}
|
||||
|
||||
// If not error, test the WithDiffInPlace result
|
||||
if isWithDiffInPlaceOk && !thisClone.equal(expectedWithDiffResult) {
|
||||
t.Errorf("unexpected WithDiffInPlace result in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, expectedWithDiffResult, thisClone)
|
||||
// If not error, test the withDiffInPlace result
|
||||
if isWithDiffInPlaceOk && !thisClone.equal(test.expectedWithDiffResult) {
|
||||
t.Errorf("unexpected withDiffInPlace result in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, thisClone)
|
||||
}
|
||||
|
||||
// Make sure that diffFrom after WithDiff results in the original other
|
||||
// Make sure that diffFrom after WithDiff results in the original test.other
|
||||
if isWithDiffOk {
|
||||
otherResult, err := this.diffFrom(withDiffResult)
|
||||
otherResult, err := test.this.diffFrom(withDiffResult)
|
||||
if err != nil {
|
||||
t.Errorf("diffFrom unexpectedly failed in test \"%s\": %s", test.name, err)
|
||||
}
|
||||
if !other.equal(otherResult) {
|
||||
if !test.other.equal(otherResult) {
|
||||
t.Errorf("unexpected diffFrom result in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, other, otherResult)
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func areMultisetsEqual(a *ecc.Multiset, b *ecc.Multiset) bool {
|
||||
aX, aY := a.Point()
|
||||
bX, bY := b.Point()
|
||||
return aX.Cmp(bX) == 0 && aY.Cmp(bY) == 0
|
||||
}
|
||||
|
||||
func (d *UTXODiff) equal(other *UTXODiff) bool {
|
||||
if d == nil || other == nil {
|
||||
return d == other
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(d.toAdd, other.toAdd) &&
|
||||
reflect.DeepEqual(d.toRemove, other.toRemove) &&
|
||||
areMultisetsEqual(d.diffMultiset, other.diffMultiset)
|
||||
reflect.DeepEqual(d.toRemove, other.toRemove)
|
||||
}
|
||||
|
||||
func (fus *FullUTXOSet) equal(other *FullUTXOSet) bool {
|
||||
return reflect.DeepEqual(fus.utxoCollection, other.utxoCollection) &&
|
||||
areMultisetsEqual(fus.UTXOMultiset, other.UTXOMultiset)
|
||||
return reflect.DeepEqual(fus.utxoCollection, other.utxoCollection)
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) equal(other *DiffUTXOSet) bool {
|
||||
return dus.base.equal(other.base) && dus.UTXODiff.equal(other.UTXODiff)
|
||||
}
|
||||
|
||||
func addMultisetToDiff(t *testing.T, diff *UTXODiff) *UTXODiff {
|
||||
if diff == nil {
|
||||
return nil
|
||||
}
|
||||
diffWithMs := NewUTXODiff()
|
||||
for outpoint, entry := range diff.toAdd {
|
||||
err := diffWithMs.AddEntry(outpoint, entry)
|
||||
if err != nil {
|
||||
t.Fatalf("Error with diffWithMs.AddEntry: %s", err)
|
||||
}
|
||||
}
|
||||
for outpoint, entry := range diff.toRemove {
|
||||
err := diffWithMs.RemoveEntry(outpoint, entry)
|
||||
if err != nil {
|
||||
t.Fatalf("Error with diffWithMs.removeEntry: %s", err)
|
||||
}
|
||||
}
|
||||
return diffWithMs
|
||||
}
|
||||
|
||||
func addMultisetToFullUTXOSet(t *testing.T, fus *FullUTXOSet) *FullUTXOSet {
|
||||
if fus == nil {
|
||||
return nil
|
||||
}
|
||||
fusWithMs := NewFullUTXOSet()
|
||||
for outpoint, entry := range fus.utxoCollection {
|
||||
err := fusWithMs.addAndUpdateMultiset(outpoint, entry)
|
||||
if err != nil {
|
||||
t.Fatalf("Error with diffWithMs.AddEntry: %s", err)
|
||||
}
|
||||
}
|
||||
return fusWithMs
|
||||
}
|
||||
|
||||
func addMultisetToDiffUTXOSet(t *testing.T, diffSet *DiffUTXOSet) *DiffUTXOSet {
|
||||
if diffSet == nil {
|
||||
return nil
|
||||
}
|
||||
diffWithMs := addMultisetToDiff(t, diffSet.UTXODiff)
|
||||
baseWithMs := addMultisetToFullUTXOSet(t, diffSet.base)
|
||||
return NewDiffUTXOSet(baseWithMs, diffWithMs)
|
||||
}
|
||||
|
||||
// TestFullUTXOSet makes sure that fullUTXOSet is working as expected.
|
||||
func TestFullUTXOSet(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outpoint0 := *wire.NewOutpoint(txID0, 0)
|
||||
outpoint1 := *wire.NewOutpoint(txID1, 0)
|
||||
txOut0 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 10}
|
||||
txOut1 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 20}
|
||||
outpoint0 := *domainmessage.NewOutpoint(txID0, 0)
|
||||
outpoint1 := *domainmessage.NewOutpoint(txID1, 0)
|
||||
txOut0 := &domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 10}
|
||||
txOut1 := &domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 20}
|
||||
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
|
||||
diff := addMultisetToDiff(t, &UTXODiff{
|
||||
diff := &UTXODiff{
|
||||
toAdd: utxoCollection{outpoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{outpoint1: utxoEntry1},
|
||||
})
|
||||
}
|
||||
|
||||
// Test fullUTXOSet creation
|
||||
emptySet := NewFullUTXOSet()
|
||||
@@ -728,14 +661,14 @@ func TestFullUTXOSet(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test fullUTXOSet addTx
|
||||
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: wire.Outpoint{TxID: *txID0, Index: 0}, Sequence: 0}
|
||||
transaction0 := wire.NewNativeMsgTx(1, []*wire.TxIn{txIn0}, []*wire.TxOut{txOut0})
|
||||
txIn0 := &domainmessage.TxIn{SignatureScript: []byte{}, PreviousOutpoint: domainmessage.Outpoint{TxID: *txID0, Index: 0}, Sequence: 0}
|
||||
transaction0 := domainmessage.NewNativeMsgTx(1, []*domainmessage.TxIn{txIn0}, []*domainmessage.TxOut{txOut0})
|
||||
if isAccepted, err := emptySet.AddTx(transaction0, 0); err != nil {
|
||||
t.Errorf("AddTx unexpectedly failed: %s", err)
|
||||
} else if isAccepted {
|
||||
t.Errorf("addTx unexpectedly succeeded")
|
||||
}
|
||||
emptySet = addMultisetToFullUTXOSet(t, &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}})
|
||||
emptySet = &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}}
|
||||
if isAccepted, err := emptySet.AddTx(transaction0, 0); err != nil {
|
||||
t.Errorf("addTx unexpectedly failed. Error: %s", err)
|
||||
} else if !isAccepted {
|
||||
@@ -761,16 +694,16 @@ func TestFullUTXOSet(t *testing.T) {
|
||||
func TestDiffUTXOSet(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outpoint0 := *wire.NewOutpoint(txID0, 0)
|
||||
outpoint1 := *wire.NewOutpoint(txID1, 0)
|
||||
txOut0 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 10}
|
||||
txOut1 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 20}
|
||||
outpoint0 := *domainmessage.NewOutpoint(txID0, 0)
|
||||
outpoint1 := *domainmessage.NewOutpoint(txID1, 0)
|
||||
txOut0 := &domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 10}
|
||||
txOut1 := &domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 20}
|
||||
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
|
||||
diff := addMultisetToDiff(t, &UTXODiff{
|
||||
diff := &UTXODiff{
|
||||
toAdd: utxoCollection{outpoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{outpoint1: utxoEntry1},
|
||||
})
|
||||
}
|
||||
|
||||
// Test diffUTXOSet creation
|
||||
emptySet := NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff())
|
||||
@@ -828,7 +761,7 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ ], Multiset-Hash:0000000000000000000000000000000000000000000000000000000000000000}",
|
||||
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ ]}",
|
||||
expectedCollection: utxoCollection{},
|
||||
},
|
||||
{
|
||||
@@ -847,7 +780,7 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedString: "{Base: [ ], To Add: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Remove: [ ], Multiset-Hash:da4768bd0359c3426268d6707c1fc17a68c45ef1ea734331b07568418234487f}",
|
||||
expectedString: "{Base: [ ], To Add: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Remove: [ ]}",
|
||||
expectedCollection: utxoCollection{outpoint0: utxoEntry0},
|
||||
},
|
||||
{
|
||||
@@ -860,7 +793,7 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expectedMeldSet: nil,
|
||||
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], Multiset-Hash:046242cb1bb1e6d3fd91d0f181e1b2d4a597ac57fa2584fc3c2eb0e0f46c9369}",
|
||||
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]}",
|
||||
expectedCollection: utxoCollection{},
|
||||
expectedMeldToBaseError: "Couldn't remove outpoint 0000000000000000000000000000000000000000000000000000000000000000:0 because it doesn't exist in the DiffUTXOSet base",
|
||||
},
|
||||
@@ -885,7 +818,7 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], To Remove: [ ], Multiset-Hash:556cc61fd4d7e74d7807ca2298c5320375a6a20310a18920e54667220924baff}",
|
||||
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], To Remove: [ ]}",
|
||||
expectedCollection: utxoCollection{
|
||||
outpoint0: utxoEntry0,
|
||||
outpoint1: utxoEntry1,
|
||||
@@ -909,24 +842,21 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], Multiset-Hash:0000000000000000000000000000000000000000000000000000000000000000}",
|
||||
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]}",
|
||||
expectedCollection: utxoCollection{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
diffSet := addMultisetToDiffUTXOSet(t, test.diffSet)
|
||||
expectedMeldSet := addMultisetToDiffUTXOSet(t, test.expectedMeldSet)
|
||||
|
||||
// Test string representation
|
||||
setString := diffSet.String()
|
||||
setString := test.diffSet.String()
|
||||
if setString != test.expectedString {
|
||||
t.Errorf("unexpected string in test \"%s\". "+
|
||||
"Expected: \"%s\", got: \"%s\".", test.name, test.expectedString, setString)
|
||||
}
|
||||
|
||||
// Test meldToBase
|
||||
meldSet := diffSet.clone().(*DiffUTXOSet)
|
||||
meldSet := test.diffSet.clone().(*DiffUTXOSet)
|
||||
err := meldSet.meldToBase()
|
||||
errString := ""
|
||||
if err != nil {
|
||||
@@ -938,27 +868,27 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !meldSet.equal(expectedMeldSet) {
|
||||
if !meldSet.equal(test.expectedMeldSet) {
|
||||
t.Errorf("unexpected melded set in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, expectedMeldSet, meldSet)
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedMeldSet, meldSet)
|
||||
}
|
||||
|
||||
// Test collection
|
||||
setCollection, err := diffSet.collection()
|
||||
setCollection, err := test.diffSet.collection()
|
||||
if err != nil {
|
||||
t.Errorf("Error getting diffSet collection: %s", err)
|
||||
t.Errorf("Error getting test.diffSet collection: %s", err)
|
||||
} else if !reflect.DeepEqual(setCollection, test.expectedCollection) {
|
||||
t.Errorf("unexpected set collection in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedCollection, setCollection)
|
||||
}
|
||||
|
||||
// Test cloning
|
||||
clonedSet := diffSet.clone().(*DiffUTXOSet)
|
||||
if !reflect.DeepEqual(clonedSet, diffSet) {
|
||||
clonedSet := test.diffSet.clone().(*DiffUTXOSet)
|
||||
if !reflect.DeepEqual(clonedSet, test.diffSet) {
|
||||
t.Errorf("unexpected set clone in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, diffSet, clonedSet)
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.diffSet, clonedSet)
|
||||
}
|
||||
if clonedSet == diffSet {
|
||||
if clonedSet == test.diffSet {
|
||||
t.Errorf("cloned set is reference-equal to the original")
|
||||
}
|
||||
}
|
||||
@@ -1016,32 +946,29 @@ func TestUTXOSetDiffRules(t *testing.T) {
|
||||
|
||||
// TestDiffUTXOSet_addTx makes sure that diffUTXOSet addTx works as expected
|
||||
func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
// coinbaseTX is coinbase. As such, it has exactly one input with hash zero and MaxUInt32 index
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: wire.Outpoint{TxID: *txID0, Index: math.MaxUint32}, Sequence: 0}
|
||||
txOut0 := &wire.TxOut{ScriptPubKey: []byte{0}, Value: 10}
|
||||
txOut0 := &domainmessage.TxOut{ScriptPubKey: []byte{0}, Value: 10}
|
||||
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
|
||||
coinbaseTX := wire.NewSubnetworkMsgTx(1, []*wire.TxIn{txIn0}, []*wire.TxOut{txOut0}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
|
||||
coinbaseTX := domainmessage.NewSubnetworkMsgTx(1, []*domainmessage.TxIn{}, []*domainmessage.TxOut{txOut0}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
|
||||
|
||||
// transaction1 spends coinbaseTX
|
||||
id1 := coinbaseTX.TxID()
|
||||
outpoint1 := *wire.NewOutpoint(id1, 0)
|
||||
txIn1 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: outpoint1, Sequence: 0}
|
||||
txOut1 := &wire.TxOut{ScriptPubKey: []byte{1}, Value: 20}
|
||||
outpoint1 := *domainmessage.NewOutpoint(id1, 0)
|
||||
txIn1 := &domainmessage.TxIn{SignatureScript: []byte{}, PreviousOutpoint: outpoint1, Sequence: 0}
|
||||
txOut1 := &domainmessage.TxOut{ScriptPubKey: []byte{1}, Value: 20}
|
||||
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
|
||||
transaction1 := wire.NewNativeMsgTx(1, []*wire.TxIn{txIn1}, []*wire.TxOut{txOut1})
|
||||
transaction1 := domainmessage.NewNativeMsgTx(1, []*domainmessage.TxIn{txIn1}, []*domainmessage.TxOut{txOut1})
|
||||
|
||||
// transaction2 spends transaction1
|
||||
id2 := transaction1.TxID()
|
||||
outpoint2 := *wire.NewOutpoint(id2, 0)
|
||||
txIn2 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: outpoint2, Sequence: 0}
|
||||
txOut2 := &wire.TxOut{ScriptPubKey: []byte{2}, Value: 30}
|
||||
outpoint2 := *domainmessage.NewOutpoint(id2, 0)
|
||||
txIn2 := &domainmessage.TxIn{SignatureScript: []byte{}, PreviousOutpoint: outpoint2, Sequence: 0}
|
||||
txOut2 := &domainmessage.TxOut{ScriptPubKey: []byte{2}, Value: 30}
|
||||
utxoEntry2 := NewUTXOEntry(txOut2, false, 2)
|
||||
transaction2 := wire.NewNativeMsgTx(1, []*wire.TxIn{txIn2}, []*wire.TxOut{txOut2})
|
||||
transaction2 := domainmessage.NewNativeMsgTx(1, []*domainmessage.TxIn{txIn2}, []*domainmessage.TxOut{txOut2})
|
||||
|
||||
// outpoint3 is the outpoint for transaction2
|
||||
id3 := transaction2.TxID()
|
||||
outpoint3 := *wire.NewOutpoint(id3, 0)
|
||||
outpoint3 := *domainmessage.NewOutpoint(id3, 0)
|
||||
|
||||
// For each of the following test cases, we will:
|
||||
// 1. startSet.addTx() all the transactions in toAdd, in order, with the initial block height startHeight
|
||||
@@ -1050,14 +977,14 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
name string
|
||||
startSet *DiffUTXOSet
|
||||
startHeight uint64
|
||||
toAdd []*wire.MsgTx
|
||||
toAdd []*domainmessage.MsgTx
|
||||
expectedSet *DiffUTXOSet
|
||||
}{
|
||||
{
|
||||
name: "add coinbase transaction to empty set",
|
||||
startSet: NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff()),
|
||||
startHeight: 0,
|
||||
toAdd: []*wire.MsgTx{coinbaseTX},
|
||||
toAdd: []*domainmessage.MsgTx{coinbaseTX},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{}},
|
||||
UTXODiff: &UTXODiff{
|
||||
@@ -1070,7 +997,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
name: "add regular transaction to empty set",
|
||||
startSet: NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff()),
|
||||
startHeight: 0,
|
||||
toAdd: []*wire.MsgTx{transaction1},
|
||||
toAdd: []*domainmessage.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{}},
|
||||
UTXODiff: &UTXODiff{
|
||||
@@ -1089,7 +1016,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
},
|
||||
},
|
||||
startHeight: 1,
|
||||
toAdd: []*wire.MsgTx{transaction1},
|
||||
toAdd: []*domainmessage.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outpoint1: utxoEntry0}},
|
||||
UTXODiff: &UTXODiff{
|
||||
@@ -1108,7 +1035,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
},
|
||||
},
|
||||
startHeight: 1,
|
||||
toAdd: []*wire.MsgTx{transaction1},
|
||||
toAdd: []*domainmessage.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
UTXODiff: &UTXODiff{
|
||||
@@ -1127,7 +1054,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
},
|
||||
},
|
||||
startHeight: 1,
|
||||
toAdd: []*wire.MsgTx{transaction1},
|
||||
toAdd: []*domainmessage.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
UTXODiff: &UTXODiff{
|
||||
@@ -1146,7 +1073,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
},
|
||||
},
|
||||
startHeight: 1,
|
||||
toAdd: []*wire.MsgTx{transaction1, transaction2},
|
||||
toAdd: []*domainmessage.MsgTx{transaction1, transaction2},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outpoint1: utxoEntry0}},
|
||||
UTXODiff: &UTXODiff{
|
||||
@@ -1159,10 +1086,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
|
||||
testLoop:
|
||||
for _, test := range tests {
|
||||
startSet := addMultisetToDiffUTXOSet(t, test.startSet)
|
||||
expectedSet := addMultisetToDiffUTXOSet(t, test.expectedSet)
|
||||
|
||||
diffSet := startSet.clone()
|
||||
diffSet := test.startSet.clone()
|
||||
|
||||
// Apply all transactions to diffSet, in order, with the initial block height startHeight
|
||||
for i, transaction := range test.toAdd {
|
||||
@@ -1174,89 +1098,14 @@ testLoop:
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that the result diffSet equals to the expectedSet
|
||||
if !diffSet.(*DiffUTXOSet).equal(expectedSet) {
|
||||
// Make sure that the result diffSet equals to test.expectedSet
|
||||
if !diffSet.(*DiffUTXOSet).equal(test.expectedSet) {
|
||||
t.Errorf("unexpected diffSet in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, expectedSet, diffSet)
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedSet, diffSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiffFromTx(t *testing.T) {
|
||||
fus := addMultisetToFullUTXOSet(t, &FullUTXOSet{
|
||||
utxoCollection: utxoCollection{},
|
||||
})
|
||||
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: wire.Outpoint{TxID: *txID0, Index: math.MaxUint32}, Sequence: 0}
|
||||
txOut0 := &wire.TxOut{ScriptPubKey: []byte{0}, Value: 10}
|
||||
cbTx := wire.NewSubnetworkMsgTx(1, []*wire.TxIn{txIn0}, []*wire.TxOut{txOut0}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
|
||||
if isAccepted, err := fus.AddTx(cbTx, 1); err != nil {
|
||||
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
|
||||
} else if !isAccepted {
|
||||
t.Fatalf("AddTx unexpectedly didn't add tx %s", cbTx.TxID())
|
||||
}
|
||||
acceptingBlueScore := uint64(2)
|
||||
cbOutpoint := wire.Outpoint{TxID: *cbTx.TxID(), Index: 0}
|
||||
txIns := []*wire.TxIn{{
|
||||
PreviousOutpoint: cbOutpoint,
|
||||
SignatureScript: nil,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}}
|
||||
txOuts := []*wire.TxOut{{
|
||||
ScriptPubKey: OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}}
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
|
||||
diff, err := fus.diffFromTx(tx, acceptingBlueScore)
|
||||
if err != nil {
|
||||
t.Errorf("diffFromTx: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(diff.toAdd, utxoCollection{
|
||||
wire.Outpoint{TxID: *tx.TxID(), Index: 0}: NewUTXOEntry(tx.TxOut[0], false, 2),
|
||||
}) {
|
||||
t.Errorf("diff.toAdd doesn't have the expected values")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(diff.toRemove, utxoCollection{
|
||||
wire.Outpoint{TxID: *cbTx.TxID(), Index: 0}: NewUTXOEntry(cbTx.TxOut[0], true, 1),
|
||||
}) {
|
||||
t.Errorf("diff.toRemove doesn't have the expected values")
|
||||
}
|
||||
|
||||
//Test that we get an error if we don't have the outpoint inside the utxo set
|
||||
invalidTxIns := []*wire.TxIn{{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: daghash.TxID{}, Index: 0},
|
||||
SignatureScript: nil,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}}
|
||||
invalidTxOuts := []*wire.TxOut{{
|
||||
ScriptPubKey: OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}}
|
||||
invalidTx := wire.NewNativeMsgTx(wire.TxVersion, invalidTxIns, invalidTxOuts)
|
||||
_, err = fus.diffFromTx(invalidTx, acceptingBlueScore)
|
||||
if err == nil {
|
||||
t.Errorf("diffFromTx: expected an error but got <nil>")
|
||||
}
|
||||
|
||||
//Test that we get an error if the outpoint is inside diffUTXOSet's toRemove
|
||||
diff2 := addMultisetToDiff(t, &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
})
|
||||
dus := NewDiffUTXOSet(fus, diff2)
|
||||
if isAccepted, err := dus.AddTx(tx, 2); err != nil {
|
||||
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
|
||||
} else if !isAccepted {
|
||||
t.Fatalf("AddTx unexpectedly didn't add tx %s", tx.TxID())
|
||||
}
|
||||
_, err = dus.diffFromTx(tx, acceptingBlueScore)
|
||||
if err == nil {
|
||||
t.Errorf("diffFromTx: expected an error but got <nil>")
|
||||
}
|
||||
}
|
||||
|
||||
// collection returns a collection of all UTXOs in this set
|
||||
func (fus *FullUTXOSet) collection() utxoCollection {
|
||||
return fus.utxoCollection.clone()
|
||||
@@ -1276,16 +1125,16 @@ func (dus *DiffUTXOSet) collection() (utxoCollection, error) {
|
||||
func TestUTXOSetAddEntry(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outpoint0 := wire.NewOutpoint(txID0, 0)
|
||||
outpoint1 := wire.NewOutpoint(txID1, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 20}, false, 1)
|
||||
outpoint0 := domainmessage.NewOutpoint(txID0, 0)
|
||||
outpoint1 := domainmessage.NewOutpoint(txID1, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&domainmessage.TxOut{ScriptPubKey: []byte{}, Value: 20}, false, 1)
|
||||
|
||||
utxoDiff := NewUTXODiff()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
outpointToAdd *wire.Outpoint
|
||||
outpointToAdd *domainmessage.Outpoint
|
||||
utxoEntryToAdd *UTXOEntry
|
||||
expectedUTXODiff *UTXODiff
|
||||
expectedError string
|
||||
@@ -1321,7 +1170,6 @@ func TestUTXOSetAddEntry(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
expectedUTXODiff := addMultisetToDiff(t, test.expectedUTXODiff)
|
||||
err := utxoDiff.AddEntry(*test.outpointToAdd, test.utxoEntryToAdd)
|
||||
errString := ""
|
||||
if err != nil {
|
||||
@@ -1330,9 +1178,9 @@ func TestUTXOSetAddEntry(t *testing.T) {
|
||||
if errString != test.expectedError {
|
||||
t.Fatalf("utxoDiff.AddEntry: unexpected err in test \"%s\". Expected: %s but got: %s", test.name, test.expectedError, err)
|
||||
}
|
||||
if err == nil && !utxoDiff.equal(expectedUTXODiff) {
|
||||
if err == nil && !utxoDiff.equal(test.expectedUTXODiff) {
|
||||
t.Fatalf("utxoDiff.AddEntry: unexpected utxoDiff in test \"%s\". "+
|
||||
"Expected: %v, got: %v", test.name, expectedUTXODiff, utxoDiff)
|
||||
"Expected: %v, got: %v", test.name, test.expectedUTXODiff, utxoDiff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,14 +10,16 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -45,7 +47,7 @@ const (
|
||||
|
||||
// isNullOutpoint determines whether or not a previous transaction outpoint
|
||||
// is set.
|
||||
func isNullOutpoint(outpoint *wire.Outpoint) bool {
|
||||
func isNullOutpoint(outpoint *domainmessage.Outpoint) bool {
|
||||
if outpoint.Index == math.MaxUint32 && outpoint.TxID == daghash.ZeroTxID {
|
||||
return true
|
||||
}
|
||||
@@ -56,12 +58,12 @@ func isNullOutpoint(outpoint *wire.Outpoint) bool {
|
||||
// met, meaning that all the inputs of a given transaction have reached a
|
||||
// blue score or time sufficient for their relative lock-time maturity.
|
||||
func SequenceLockActive(sequenceLock *SequenceLock, blockBlueScore uint64,
|
||||
medianTimePast time.Time) bool {
|
||||
medianTimePast mstime.Time) bool {
|
||||
|
||||
// If either the seconds, or blue score relative-lock time has not yet
|
||||
// If either the milliseconds, or blue score relative-lock time has not yet
|
||||
// reached, then the transaction is not yet mature according to its
|
||||
// sequence locks.
|
||||
if sequenceLock.Seconds >= medianTimePast.Unix() ||
|
||||
if sequenceLock.Milliseconds >= medianTimePast.UnixMilliseconds() ||
|
||||
sequenceLock.BlockBlueScore >= int64(blockBlueScore) {
|
||||
return false
|
||||
}
|
||||
@@ -70,7 +72,7 @@ func SequenceLockActive(sequenceLock *SequenceLock, blockBlueScore uint64,
|
||||
}
|
||||
|
||||
// IsFinalizedTransaction determines whether or not a transaction is finalized.
|
||||
func IsFinalizedTransaction(tx *util.Tx, blockBlueScore uint64, blockTime time.Time) bool {
|
||||
func IsFinalizedTransaction(tx *util.Tx, blockBlueScore uint64, blockTime mstime.Time) bool {
|
||||
msgTx := tx.MsgTx()
|
||||
|
||||
// Lock time of zero means the transaction is finalized.
|
||||
@@ -87,7 +89,7 @@ func IsFinalizedTransaction(tx *util.Tx, blockBlueScore uint64, blockTime time.T
|
||||
if lockTime < txscript.LockTimeThreshold {
|
||||
blockTimeOrBlueScore = int64(blockBlueScore)
|
||||
} else {
|
||||
blockTimeOrBlueScore = blockTime.Unix()
|
||||
blockTimeOrBlueScore = blockTime.UnixMilliseconds()
|
||||
}
|
||||
if int64(lockTime) < blockTimeOrBlueScore {
|
||||
return true
|
||||
@@ -133,15 +135,6 @@ func CheckTransactionSanity(tx *util.Tx, subnetworkID *subnetworkid.SubnetworkID
|
||||
return ruleError(ErrNoTxInputs, "transaction has no inputs")
|
||||
}
|
||||
|
||||
// A transaction must not exceed the maximum allowed block mass when
|
||||
// serialized.
|
||||
serializedTxSize := msgTx.SerializeSize()
|
||||
if serializedTxSize*MassPerTxByte > wire.MaxMassPerTx {
|
||||
str := fmt.Sprintf("serialized transaction is too big - got "+
|
||||
"%d, max %d", serializedTxSize, wire.MaxMassPerBlock)
|
||||
return ruleError(ErrTxMassTooHigh, str)
|
||||
}
|
||||
|
||||
// Ensure the transaction amounts are in range. Each transaction
|
||||
// output must not be negative or more than the max allowed per
|
||||
// transaction. Also, the total of all outputs must abide by the same
|
||||
@@ -179,7 +172,7 @@ func CheckTransactionSanity(tx *util.Tx, subnetworkID *subnetworkid.SubnetworkID
|
||||
}
|
||||
|
||||
// Check for duplicate transaction inputs.
|
||||
existingTxOut := make(map[wire.Outpoint]struct{})
|
||||
existingTxOut := make(map[domainmessage.Outpoint]struct{})
|
||||
for _, txIn := range msgTx.TxIn {
|
||||
if _, exists := existingTxOut[txIn.PreviousOutpoint]; exists {
|
||||
return ruleError(ErrDuplicateTxInputs, "transaction "+
|
||||
@@ -220,10 +213,7 @@ func CheckTransactionSanity(tx *util.Tx, subnetworkID *subnetworkid.SubnetworkID
|
||||
}
|
||||
|
||||
// Transactions in native, registry and coinbase subnetworks must have Gas = 0
|
||||
if (msgTx.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) ||
|
||||
msgTx.SubnetworkID.IsBuiltIn()) &&
|
||||
msgTx.Gas > 0 {
|
||||
|
||||
if msgTx.SubnetworkID.IsBuiltInOrNative() && msgTx.Gas > 0 {
|
||||
return ruleError(ErrInvalidGas, "transaction in the native or "+
|
||||
"registry subnetworks has gas > 0 ")
|
||||
}
|
||||
@@ -263,7 +253,7 @@ func CheckTransactionSanity(tx *util.Tx, subnetworkID *subnetworkid.SubnetworkID
|
||||
// The flags modify the behavior of this function as follows:
|
||||
// - BFNoPoWCheck: The check to ensure the block hash is less than the target
|
||||
// difficulty is not performed.
|
||||
func (dag *BlockDAG) checkProofOfWork(header *wire.BlockHeader, flags BehaviorFlags) error {
|
||||
func (dag *BlockDAG) checkProofOfWork(header *domainmessage.BlockHeader, flags BehaviorFlags) error {
|
||||
// The target difficulty must be larger than zero.
|
||||
target := util.CompactToBig(header.Bits)
|
||||
if target.Sign() <= 0 {
|
||||
@@ -273,9 +263,9 @@ func (dag *BlockDAG) checkProofOfWork(header *wire.BlockHeader, flags BehaviorFl
|
||||
}
|
||||
|
||||
// The target difficulty must be less than the maximum allowed.
|
||||
if target.Cmp(dag.dagParams.PowMax) > 0 {
|
||||
if target.Cmp(dag.Params.PowMax) > 0 {
|
||||
str := fmt.Sprintf("block target difficulty of %064x is "+
|
||||
"higher than max of %064x", target, dag.dagParams.PowMax)
|
||||
"higher than max of %064x", target, dag.Params.PowMax)
|
||||
return ruleError(ErrUnexpectedDifficulty, str)
|
||||
}
|
||||
|
||||
@@ -303,9 +293,9 @@ func ValidateTxMass(tx *util.Tx, utxoSet UTXOSet) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if txMass > wire.MaxMassPerBlock {
|
||||
if txMass > domainmessage.MaxMassPerBlock {
|
||||
str := fmt.Sprintf("tx %s has mass %d, which is above the "+
|
||||
"allowed limit of %d", tx.ID(), txMass, wire.MaxMassPerBlock)
|
||||
"allowed limit of %d", tx.ID(), txMass, domainmessage.MaxMassPerBlock)
|
||||
return ruleError(ErrTxMassTooHigh, str)
|
||||
}
|
||||
return nil
|
||||
@@ -329,9 +319,9 @@ func CalcBlockMass(pastUTXO UTXOSet, transactions []*util.Tx) (uint64, error) {
|
||||
|
||||
// We could potentially overflow the accumulator so check for
|
||||
// overflow as well.
|
||||
if totalMass < txMass || totalMass > wire.MaxMassPerBlock {
|
||||
if totalMass < txMass || totalMass > domainmessage.MaxMassPerBlock {
|
||||
str := fmt.Sprintf("block has total mass %d, which is "+
|
||||
"above the allowed limit of %d", totalMass, wire.MaxMassPerBlock)
|
||||
"above the allowed limit of %d", totalMass, domainmessage.MaxMassPerBlock)
|
||||
return 0, ruleError(ErrBlockMassTooHigh, str)
|
||||
}
|
||||
}
|
||||
@@ -400,17 +390,18 @@ func CalcTxMass(tx *util.Tx, previousScriptPubKeys [][]byte) uint64 {
|
||||
//
|
||||
// The flags do not modify the behavior of this function directly, however they
|
||||
// are needed to pass along to checkProofOfWork.
|
||||
func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags BehaviorFlags) (delay time.Duration, err error) {
|
||||
func (dag *BlockDAG) checkBlockHeaderSanity(block *util.Block, flags BehaviorFlags) (delay time.Duration, err error) {
|
||||
// Ensure the proof of work bits in the block header is in min/max range
|
||||
// and the block hash is less than the target value described by the
|
||||
// bits.
|
||||
header := &block.MsgBlock().Header
|
||||
err = dag.checkProofOfWork(header, flags)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(header.ParentHashes) == 0 {
|
||||
if !header.BlockHash().IsEqual(dag.dagParams.GenesisHash) {
|
||||
if !header.BlockHash().IsEqual(dag.Params.GenesisHash) {
|
||||
return 0, ruleError(ErrNoParents, "block has no parents")
|
||||
}
|
||||
} else {
|
||||
@@ -420,23 +411,11 @@ func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags Beha
|
||||
}
|
||||
}
|
||||
|
||||
// A block timestamp must not have a greater precision than one second.
|
||||
// This check is necessary because Go time.Time values support
|
||||
// nanosecond precision whereas the consensus rules only apply to
|
||||
// seconds and it's much nicer to deal with standard Go time values
|
||||
// instead of converting to seconds everywhere.
|
||||
if !header.Timestamp.Equal(time.Unix(header.Timestamp.Unix(), 0)) {
|
||||
str := fmt.Sprintf("block timestamp of %s has a higher "+
|
||||
"precision than one second", header.Timestamp)
|
||||
return 0, ruleError(ErrInvalidTime, str)
|
||||
}
|
||||
|
||||
// Ensure the block time is not too far in the future. If it's too far, return
|
||||
// the duration of time that should be waited before the block becomes valid.
|
||||
// This check needs to be last as it does not return an error but rather marks the
|
||||
// header as delayed (and valid).
|
||||
maxTimestamp := dag.AdjustedTime().Add(time.Second *
|
||||
time.Duration(int64(dag.TimestampDeviationTolerance)*dag.targetTimePerBlock))
|
||||
maxTimestamp := dag.Now().Add(time.Duration(dag.TimestampDeviationTolerance) * dag.Params.TargetTimePerBlock)
|
||||
if header.Timestamp.After(maxTimestamp) {
|
||||
return header.Timestamp.Sub(maxTimestamp), nil
|
||||
}
|
||||
@@ -445,7 +424,7 @@ func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags Beha
|
||||
}
|
||||
|
||||
//checkBlockParentsOrder ensures that the block's parents are ordered by hash
|
||||
func checkBlockParentsOrder(header *wire.BlockHeader) error {
|
||||
func checkBlockParentsOrder(header *domainmessage.BlockHeader) error {
|
||||
sortedHashes := make([]*daghash.Hash, header.NumParentBlocks())
|
||||
for i, hash := range header.ParentHashes {
|
||||
sortedHashes[i] = hash
|
||||
@@ -465,87 +444,182 @@ func checkBlockParentsOrder(header *wire.BlockHeader) error {
|
||||
// The flags do not modify the behavior of this function directly, however they
|
||||
// are needed to pass along to checkBlockHeaderSanity.
|
||||
func (dag *BlockDAG) checkBlockSanity(block *util.Block, flags BehaviorFlags) (time.Duration, error) {
|
||||
msgBlock := block.MsgBlock()
|
||||
header := &msgBlock.Header
|
||||
delay, err := dag.checkBlockHeaderSanity(header, flags)
|
||||
delay, err := dag.checkBlockHeaderSanity(block, flags)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockContainsAtLeastOneTransaction(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockContainsLessThanMaxBlockMassTransactions(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkFirstBlockTransactionIsCoinbase(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockContainsOnlyOneCoinbase(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockTransactionOrder(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkNoNonNativeTransactions(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockTransactionSanity(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockHashMerkleRoot(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// A block must have at least one transaction.
|
||||
numTx := len(msgBlock.Transactions)
|
||||
// The following check will be fairly quick since the transaction IDs
|
||||
// are already cached due to building the merkle tree above.
|
||||
err = dag.checkBlockDuplicateTransactions(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = dag.checkBlockDoubleSpends(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return delay, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockContainsAtLeastOneTransaction(block *util.Block) error {
|
||||
transactions := block.Transactions()
|
||||
numTx := len(transactions)
|
||||
if numTx == 0 {
|
||||
return 0, ruleError(ErrNoTransactions, "block does not contain "+
|
||||
return ruleError(ErrNoTransactions, "block does not contain "+
|
||||
"any transactions")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockContainsLessThanMaxBlockMassTransactions(block *util.Block) error {
|
||||
// A block must not have more transactions than the max block mass or
|
||||
// else it is certainly over the block mass limit.
|
||||
if numTx > wire.MaxMassPerBlock {
|
||||
transactions := block.Transactions()
|
||||
numTx := len(transactions)
|
||||
if numTx > domainmessage.MaxMassPerBlock {
|
||||
str := fmt.Sprintf("block contains too many transactions - "+
|
||||
"got %d, max %d", numTx, wire.MaxMassPerBlock)
|
||||
return 0, ruleError(ErrBlockMassTooHigh, str)
|
||||
"got %d, max %d", numTx, domainmessage.MaxMassPerBlock)
|
||||
return ruleError(ErrBlockMassTooHigh, str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The first transaction in a block must be a coinbase.
|
||||
func (dag *BlockDAG) checkFirstBlockTransactionIsCoinbase(block *util.Block) error {
|
||||
transactions := block.Transactions()
|
||||
if !transactions[util.CoinbaseTransactionIndex].IsCoinBase() {
|
||||
return 0, ruleError(ErrFirstTxNotCoinbase, "first transaction in "+
|
||||
return ruleError(ErrFirstTxNotCoinbase, "first transaction in "+
|
||||
"block is not a coinbase")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
txOffset := util.CoinbaseTransactionIndex + 1
|
||||
|
||||
// A block must not have more than one coinbase. And transactions must be
|
||||
// ordered by subnetwork
|
||||
for i, tx := range transactions[txOffset:] {
|
||||
func (dag *BlockDAG) checkBlockContainsOnlyOneCoinbase(block *util.Block) error {
|
||||
transactions := block.Transactions()
|
||||
for i, tx := range transactions[util.CoinbaseTransactionIndex+1:] {
|
||||
if tx.IsCoinBase() {
|
||||
str := fmt.Sprintf("block contains second coinbase at "+
|
||||
"index %d", i+2)
|
||||
return 0, ruleError(ErrMultipleCoinbases, str)
|
||||
}
|
||||
if i != 0 && subnetworkid.Less(&tx.MsgTx().SubnetworkID, &transactions[i].MsgTx().SubnetworkID) {
|
||||
return 0, ruleError(ErrTransactionsNotSorted, "transactions must be sorted by subnetwork")
|
||||
return ruleError(ErrMultipleCoinbases, str)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do some preliminary checks on each transaction to ensure they are
|
||||
// sane before continuing.
|
||||
func (dag *BlockDAG) checkBlockTransactionOrder(block *util.Block) error {
|
||||
transactions := block.Transactions()
|
||||
for i, tx := range transactions[util.CoinbaseTransactionIndex+1:] {
|
||||
if i != 0 && subnetworkid.Less(&tx.MsgTx().SubnetworkID, &transactions[i].MsgTx().SubnetworkID) {
|
||||
return ruleError(ErrTransactionsNotSorted, "transactions must be sorted by subnetwork")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkNoNonNativeTransactions(block *util.Block) error {
|
||||
// Disallow non-native/coinbase subnetworks in networks that don't allow them
|
||||
if !dag.Params.EnableNonNativeSubnetworks {
|
||||
transactions := block.Transactions()
|
||||
for _, tx := range transactions {
|
||||
if !(tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) ||
|
||||
tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)) {
|
||||
return ruleError(ErrInvalidSubnetwork, "non-native/coinbase subnetworks are not allowed")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockTransactionSanity(block *util.Block) error {
|
||||
transactions := block.Transactions()
|
||||
for _, tx := range transactions {
|
||||
err := CheckTransactionSanity(tx, dag.subnetworkID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockHashMerkleRoot(block *util.Block) error {
|
||||
// Build merkle tree and ensure the calculated merkle root matches the
|
||||
// entry in the block header. This also has the effect of caching all
|
||||
// of the transaction hashes in the block to speed up future hash
|
||||
// checks.
|
||||
hashMerkleTree := BuildHashMerkleTreeStore(block.Transactions())
|
||||
calculatedHashMerkleRoot := hashMerkleTree.Root()
|
||||
if !header.HashMerkleRoot.IsEqual(calculatedHashMerkleRoot) {
|
||||
if !block.MsgBlock().Header.HashMerkleRoot.IsEqual(calculatedHashMerkleRoot) {
|
||||
str := fmt.Sprintf("block hash merkle root is invalid - block "+
|
||||
"header indicates %s, but calculated value is %s",
|
||||
header.HashMerkleRoot, calculatedHashMerkleRoot)
|
||||
return 0, ruleError(ErrBadMerkleRoot, str)
|
||||
block.MsgBlock().Header.HashMerkleRoot, calculatedHashMerkleRoot)
|
||||
return ruleError(ErrBadMerkleRoot, str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check for duplicate transactions. This check will be fairly quick
|
||||
// since the transaction IDs are already cached due to building the
|
||||
// merkle tree above.
|
||||
func (dag *BlockDAG) checkBlockDuplicateTransactions(block *util.Block) error {
|
||||
existingTxIDs := make(map[daghash.TxID]struct{})
|
||||
transactions := block.Transactions()
|
||||
for _, tx := range transactions {
|
||||
id := tx.ID()
|
||||
if _, exists := existingTxIDs[*id]; exists {
|
||||
str := fmt.Sprintf("block contains duplicate "+
|
||||
"transaction %s", id)
|
||||
return 0, ruleError(ErrDuplicateTx, str)
|
||||
return ruleError(ErrDuplicateTx, str)
|
||||
}
|
||||
existingTxIDs[*id] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return delay, nil
|
||||
func (dag *BlockDAG) checkBlockDoubleSpends(block *util.Block) error {
|
||||
usedOutpoints := make(map[domainmessage.Outpoint]*daghash.TxID)
|
||||
transactions := block.Transactions()
|
||||
for _, tx := range transactions {
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
if spendingTxID, exists := usedOutpoints[txIn.PreviousOutpoint]; exists {
|
||||
str := fmt.Sprintf("transaction %s spends "+
|
||||
"outpoint %s that was already spent by "+
|
||||
"transaction %s in this block", tx.ID(), txIn.PreviousOutpoint, spendingTxID)
|
||||
return ruleError(ErrDoubleSpendInSameBlock, str)
|
||||
}
|
||||
usedOutpoints[txIn.PreviousOutpoint] = tx.ID()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkBlockHeaderContext performs several validation checks on the block header
|
||||
@@ -555,7 +629,7 @@ func (dag *BlockDAG) checkBlockSanity(block *util.Block, flags BehaviorFlags) (t
|
||||
// - BFFastAdd: No checks are performed.
|
||||
//
|
||||
// This function MUST be called with the dag state lock held (for writes).
|
||||
func (dag *BlockDAG) checkBlockHeaderContext(header *wire.BlockHeader, bluestParent *blockNode, fastAdd bool) error {
|
||||
func (dag *BlockDAG) checkBlockHeaderContext(header *domainmessage.BlockHeader, bluestParent *blockNode, fastAdd bool) error {
|
||||
if !fastAdd {
|
||||
if err := dag.validateDifficulty(header, bluestParent); err != nil {
|
||||
return err
|
||||
@@ -568,7 +642,7 @@ func (dag *BlockDAG) checkBlockHeaderContext(header *wire.BlockHeader, bluestPar
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateMedianTime(dag *BlockDAG, header *wire.BlockHeader, bluestParent *blockNode) error {
|
||||
func validateMedianTime(dag *BlockDAG, header *domainmessage.BlockHeader, bluestParent *blockNode) error {
|
||||
if !header.IsGenesis() {
|
||||
// Ensure the timestamp for the block header is not before the
|
||||
// median time of the last several blocks (medianTimeBlocks).
|
||||
@@ -582,7 +656,7 @@ func validateMedianTime(dag *BlockDAG, header *wire.BlockHeader, bluestParent *b
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) validateDifficulty(header *wire.BlockHeader, bluestParent *blockNode) error {
|
||||
func (dag *BlockDAG) validateDifficulty(header *domainmessage.BlockHeader, bluestParent *blockNode) error {
|
||||
// Ensure the difficulty specified in the block header matches
|
||||
// the calculated difficulty based on the previous block and
|
||||
// difficulty retarget rules.
|
||||
@@ -598,11 +672,11 @@ func (dag *BlockDAG) validateDifficulty(header *wire.BlockHeader, bluestParent *
|
||||
}
|
||||
|
||||
// validateParents validates that no parent is an ancestor of another parent, and no parent is finalized
|
||||
func (dag *BlockDAG) validateParents(blockHeader *wire.BlockHeader, parents blockSet) error {
|
||||
func (dag *BlockDAG) validateParents(blockHeader *domainmessage.BlockHeader, parents blockSet) error {
|
||||
for parentA := range parents {
|
||||
// isFinalized might be false-negative because node finality status is
|
||||
// updated in a separate goroutine. This is why later the block is
|
||||
// checked more thoroughly on the finality rules in dag.checkFinalityRules.
|
||||
// checked more thoroughly on the finality rules in dag.checkFinalityViolation.
|
||||
if parentA.isFinalized {
|
||||
return ruleError(ErrFinality, fmt.Sprintf("block %s is a finalized "+
|
||||
"parent of block %s", parentA.hash, blockHeader.BlockHash()))
|
||||
@@ -613,7 +687,7 @@ func (dag *BlockDAG) validateParents(blockHeader *wire.BlockHeader, parents bloc
|
||||
continue
|
||||
}
|
||||
|
||||
isAncestorOf, err := dag.isAncestorOf(parentA, parentB)
|
||||
isAncestorOf, err := dag.isInPast(parentA, parentB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -689,9 +763,9 @@ func (dag *BlockDAG) validateAllTxsFinalized(block *util.Block, node *blockNode,
|
||||
func ensureNoDuplicateTx(utxoSet UTXOSet, transactions []*util.Tx) error {
|
||||
// Fetch utxos for all of the transaction ouputs in this block.
|
||||
// Typically, there will not be any utxos for any of the outputs.
|
||||
fetchSet := make(map[wire.Outpoint]struct{})
|
||||
fetchSet := make(map[domainmessage.Outpoint]struct{})
|
||||
for _, tx := range transactions {
|
||||
prevOut := wire.Outpoint{TxID: *tx.ID()}
|
||||
prevOut := domainmessage.Outpoint{TxID: *tx.ID()}
|
||||
for txOutIdx := range tx.MsgTx().TxOut {
|
||||
prevOut.Index = uint32(txOutIdx)
|
||||
fetchSet[prevOut] = struct{}{}
|
||||
@@ -798,7 +872,7 @@ func CheckTransactionInputsAndCalulateFee(tx *util.Tx, txBlueScore uint64, utxoS
|
||||
return txFeeInSompi, nil
|
||||
}
|
||||
|
||||
func validateCoinbaseMaturity(dagParams *dagconfig.Params, entry *UTXOEntry, txBlueScore uint64, txIn *wire.TxIn) error {
|
||||
func validateCoinbaseMaturity(dagParams *dagconfig.Params, entry *UTXOEntry, txBlueScore uint64, txIn *domainmessage.TxIn) error {
|
||||
// Ensure the transaction is not spending coins which have not
|
||||
// yet reached the required coinbase maturity.
|
||||
if entry.IsCoinbase() {
|
||||
@@ -838,6 +912,11 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = checkDoubleSpendsWithBlockPast(pastUTXO, transactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validateBlockMass(pastUTXO, transactions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -857,7 +936,7 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
|
||||
|
||||
for _, tx := range transactions {
|
||||
txFee, err := CheckTransactionInputsAndCalulateFee(tx, block.blueScore, pastUTXO,
|
||||
dag.dagParams, fastAdd)
|
||||
dag.Params, fastAdd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -913,7 +992,7 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
|
||||
|
||||
// Now that the inexpensive checks are done and have passed, verify the
|
||||
// transactions are actually allowed to spend the coins by running the
|
||||
// expensive ECDSA signature check scripts. Doing this last helps
|
||||
// expensive SCHNORR signature check scripts. Doing this last helps
|
||||
// prevent CPU exhaustion attacks.
|
||||
err := checkBlockScripts(block, pastUTXO, transactions, scriptFlags, dag.sigCache)
|
||||
if err != nil {
|
||||
|
||||
@@ -5,53 +5,56 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// TestSequenceLocksActive tests the SequenceLockActive function to ensure it
|
||||
// works as expected in all possible combinations/scenarios.
|
||||
func TestSequenceLocksActive(t *testing.T) {
|
||||
seqLock := func(h int64, s int64) *SequenceLock {
|
||||
seqLock := func(blueScore int64, milliseconds int64) *SequenceLock {
|
||||
return &SequenceLock{
|
||||
Seconds: s,
|
||||
BlockBlueScore: h,
|
||||
Milliseconds: milliseconds,
|
||||
BlockBlueScore: blueScore,
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
seqLock *SequenceLock
|
||||
blockBlueScore uint64
|
||||
mtp time.Time
|
||||
mtp mstime.Time
|
||||
|
||||
want bool
|
||||
}{
|
||||
// Block based sequence lock with equal block blue score.
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 1001, mtp: time.Unix(9, 0), want: true},
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 1001, mtp: mstime.UnixMilliseconds(9), want: true},
|
||||
|
||||
// Time based sequence lock with mtp past the absolute time.
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: time.Unix(31, 0), want: true},
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: mstime.UnixMilliseconds(31), want: true},
|
||||
|
||||
// Block based sequence lock with current blue score below seq lock block blue score.
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 90, mtp: time.Unix(9, 0), want: false},
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 90, mtp: mstime.UnixMilliseconds(9), want: false},
|
||||
|
||||
// Time based sequence lock with current time before lock time.
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: time.Unix(29, 0), want: false},
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: mstime.UnixMilliseconds(29), want: false},
|
||||
|
||||
// Block based sequence lock at the same blue score, so shouldn't yet be active.
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 1000, mtp: time.Unix(9, 0), want: false},
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 1000, mtp: mstime.UnixMilliseconds(9), want: false},
|
||||
|
||||
// Time based sequence lock with current time equal to lock time, so shouldn't yet be active.
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: time.Unix(30, 0), want: false},
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: mstime.UnixMilliseconds(30), want: false},
|
||||
}
|
||||
|
||||
t.Logf("Running %d sequence locks tests", len(tests))
|
||||
@@ -69,7 +72,7 @@ func TestSequenceLocksActive(t *testing.T) {
|
||||
// ensure it fails.
|
||||
func TestCheckConnectBlockTemplate(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", Config{
|
||||
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -124,12 +127,6 @@ func TestCheckConnectBlockTemplate(t *testing.T) {
|
||||
"block 4: %v", err)
|
||||
}
|
||||
|
||||
blockNode3 := dag.index.LookupNode(blocks[3].Hash())
|
||||
blockNode4 := dag.index.LookupNode(blocks[4].Hash())
|
||||
if blockNode3.children.contains(blockNode4) {
|
||||
t.Errorf("Block 4 wasn't successfully detached as a child from block3")
|
||||
}
|
||||
|
||||
// Block 3a should connect even though it does not build on dag tips.
|
||||
err = dag.CheckConnectBlockTemplateNoLock(blocks[5])
|
||||
if err != nil {
|
||||
@@ -161,7 +158,7 @@ func TestCheckConnectBlockTemplate(t *testing.T) {
|
||||
// as expected.
|
||||
func TestCheckBlockSanity(t *testing.T) {
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -169,6 +166,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
return
|
||||
}
|
||||
defer teardownFunc()
|
||||
dag.timeSource = newFakeTimeSource(mstime.Now())
|
||||
|
||||
block := util.NewBlock(&Block100000)
|
||||
if len(block.Transactions()) < 3 {
|
||||
@@ -191,26 +189,15 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
if !errors.As(err, &ruleErr) {
|
||||
t.Errorf("CheckBlockSanity: wrong error returned, expect RuleError, got %T", err)
|
||||
} else if ruleErr.ErrorCode != ErrTransactionsNotSorted {
|
||||
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got %v, err %s", ruleErr.ErrorCode, err)
|
||||
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got"+
|
||||
" %v, err %s", ruleErr.ErrorCode, err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Errorf("CheckBlockSanity: unexpected return %s delay", delay)
|
||||
}
|
||||
|
||||
// Ensure a block that has a timestamp with a precision higher than one
|
||||
// second fails.
|
||||
timestamp := block.MsgBlock().Header.Timestamp
|
||||
block.MsgBlock().Header.Timestamp = timestamp.Add(time.Nanosecond)
|
||||
delay, err = dag.checkBlockSanity(block, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("CheckBlockSanity: error is nil when it shouldn't be")
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Errorf("CheckBlockSanity: unexpected return %s delay", delay)
|
||||
}
|
||||
|
||||
var invalidParentsOrderBlock = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
var invalidParentsOrderBlock = domainmessage.MsgBlock{
|
||||
Header: domainmessage.BlockHeader{
|
||||
Version: 0x10000000,
|
||||
ParentHashes: []*daghash.Hash{
|
||||
{
|
||||
@@ -244,16 +231,16 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
0x4e, 0x06, 0xba, 0x64, 0xd7, 0x61, 0xda, 0x25,
|
||||
0x1a, 0x0e, 0x21, 0xd4, 0x64, 0x49, 0x02, 0xa2,
|
||||
},
|
||||
Timestamp: time.Unix(0x5cd18053, 0),
|
||||
Timestamp: mstime.UnixMilliseconds(0x5cd18053000),
|
||||
Bits: 0x207fffff,
|
||||
Nonce: 0x1,
|
||||
},
|
||||
Transactions: []*wire.MsgTx{
|
||||
Transactions: []*domainmessage.MsgTx{
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
@@ -265,7 +252,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0x12a05f200, // 5000000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -278,9 +265,9 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
|
||||
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
|
||||
@@ -315,7 +302,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0x2123e300, // 556000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -348,9 +335,9 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
|
||||
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
|
||||
@@ -384,7 +371,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -417,9 +404,9 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73,
|
||||
0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac,
|
||||
@@ -454,7 +441,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -492,8 +479,8 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
|
||||
blockInTheFuture := Block100000
|
||||
expectedDelay := 10 * time.Second
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
blockInTheFuture.Header.Timestamp = now.Add(time.Duration(dag.TimestampDeviationTolerance)*time.Second + expectedDelay)
|
||||
deviationTolerance := time.Duration(dag.TimestampDeviationTolerance) * dag.Params.TargetTimePerBlock
|
||||
blockInTheFuture.Header.Timestamp = dag.Now().Add(deviationTolerance + expectedDelay)
|
||||
delay, err = dag.checkBlockSanity(util.NewBlock(&blockInTheFuture), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("CheckBlockSanity: %v", err)
|
||||
@@ -559,7 +546,7 @@ func TestPastMedianTime(t *testing.T) {
|
||||
|
||||
func TestValidateParents(t *testing.T) {
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -568,15 +555,15 @@ func TestValidateParents(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
a := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
|
||||
b := prepareAndProcessBlock(t, dag, a)
|
||||
c := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
|
||||
a := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
|
||||
b := prepareAndProcessBlockByParentMsgBlocks(t, dag, a)
|
||||
c := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
|
||||
|
||||
aNode := nodeByMsgBlock(t, dag, a)
|
||||
bNode := nodeByMsgBlock(t, dag, b)
|
||||
cNode := nodeByMsgBlock(t, dag, c)
|
||||
|
||||
fakeBlockHeader := &wire.BlockHeader{
|
||||
fakeBlockHeader := &domainmessage.BlockHeader{
|
||||
HashMerkleRoot: &daghash.ZeroHash,
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
@@ -609,13 +596,12 @@ func TestCheckTransactionSanity(t *testing.T) {
|
||||
outputValue uint64
|
||||
nodeSubnetworkID subnetworkid.SubnetworkID
|
||||
txSubnetworkData *txSubnetworkData
|
||||
extraModificationsFunc func(*wire.MsgTx)
|
||||
extraModificationsFunc func(*domainmessage.MsgTx)
|
||||
expectedErr error
|
||||
}{
|
||||
{"good one", 1, 1, 1, *subnetworkid.SubnetworkIDNative, nil, nil, nil},
|
||||
{"no inputs", 0, 1, 1, *subnetworkid.SubnetworkIDNative, nil, nil, ruleError(ErrNoTxInputs, "")},
|
||||
{"no outputs", 1, 0, 1, *subnetworkid.SubnetworkIDNative, nil, nil, nil},
|
||||
{"too massive", 1, 1000000, 1, *subnetworkid.SubnetworkIDNative, nil, nil, ruleError(ErrTxMassTooHigh, "")},
|
||||
{"too much sompi in one output", 1, 1, util.MaxSompi + 1,
|
||||
*subnetworkid.SubnetworkIDNative,
|
||||
nil,
|
||||
@@ -629,7 +615,7 @@ func TestCheckTransactionSanity(t *testing.T) {
|
||||
{"duplicate inputs", 2, 1, 1,
|
||||
*subnetworkid.SubnetworkIDNative,
|
||||
nil,
|
||||
func(tx *wire.MsgTx) { tx.TxIn[1].PreviousOutpoint.Index = 0 },
|
||||
func(tx *domainmessage.MsgTx) { tx.TxIn[1].PreviousOutpoint.Index = 0 },
|
||||
ruleError(ErrDuplicateTxInputs, "")},
|
||||
{"1 input coinbase",
|
||||
1,
|
||||
@@ -683,14 +669,14 @@ func TestCheckTransactionSanity(t *testing.T) {
|
||||
{"invalid payload hash", 1, 1, 0,
|
||||
subnetworkid.SubnetworkID{123},
|
||||
&txSubnetworkData{&subnetworkid.SubnetworkID{123}, 0, []byte{1}},
|
||||
func(tx *wire.MsgTx) {
|
||||
func(tx *domainmessage.MsgTx) {
|
||||
tx.PayloadHash = &daghash.Hash{}
|
||||
},
|
||||
ruleError(ErrInvalidPayloadHash, "")},
|
||||
{"invalid payload hash in native subnetwork", 1, 1, 0,
|
||||
*subnetworkid.SubnetworkIDNative,
|
||||
nil,
|
||||
func(tx *wire.MsgTx) {
|
||||
func(tx *domainmessage.MsgTx) {
|
||||
tx.PayloadHash = daghash.DoubleHashP(tx.Payload)
|
||||
},
|
||||
ruleError(ErrInvalidPayloadHash, "")},
|
||||
@@ -713,8 +699,8 @@ func TestCheckTransactionSanity(t *testing.T) {
|
||||
|
||||
// Block100000 defines block 100,000 of the block DAG. It is used to
|
||||
// test Block operations.
|
||||
var Block100000 = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
var Block100000 = domainmessage.MsgBlock{
|
||||
Header: domainmessage.BlockHeader{
|
||||
Version: 0x10000000,
|
||||
ParentHashes: []*daghash.Hash{
|
||||
{
|
||||
@@ -743,16 +729,16 @@ var Block100000 = wire.MsgBlock{
|
||||
0x3c, 0xb1, 0x16, 0x8f, 0x5f, 0x6b, 0x45, 0x87,
|
||||
},
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
Timestamp: time.Unix(0x5cdac4b1, 0),
|
||||
Timestamp: mstime.UnixMilliseconds(0x17305aa654a),
|
||||
Bits: 0x207fffff,
|
||||
Nonce: 0x00000001,
|
||||
Nonce: 1,
|
||||
},
|
||||
Transactions: []*wire.MsgTx{
|
||||
Transactions: []*domainmessage.MsgTx{
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x9b, 0x22, 0x59, 0x44, 0x66, 0xf0, 0xbe, 0x50,
|
||||
0x7c, 0x1c, 0x8a, 0xf6, 0x06, 0x27, 0xe6, 0x33,
|
||||
@@ -765,7 +751,7 @@ var Block100000 = wire.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0x12a05f200, // 5000000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -787,9 +773,9 @@ var Block100000 = wire.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95,
|
||||
0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3,
|
||||
@@ -801,7 +787,7 @@ var Block100000 = wire.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b,
|
||||
0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87,
|
||||
@@ -817,9 +803,9 @@ var Block100000 = wire.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
|
||||
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
|
||||
@@ -854,7 +840,7 @@ var Block100000 = wire.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0x2123e300, // 556000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -887,9 +873,9 @@ var Block100000 = wire.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
|
||||
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
|
||||
@@ -923,7 +909,7 @@ var Block100000 = wire.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -956,9 +942,9 @@ var Block100000 = wire.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73,
|
||||
0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac,
|
||||
@@ -993,7 +979,7 @@ var Block100000 = wire.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -1015,8 +1001,8 @@ var Block100000 = wire.MsgBlock{
|
||||
}
|
||||
|
||||
// BlockWithWrongTxOrder defines invalid block 100,000 of the block DAG.
|
||||
var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
Header: domainmessage.BlockHeader{
|
||||
Version: 1,
|
||||
ParentHashes: []*daghash.Hash{
|
||||
{
|
||||
@@ -1050,16 +1036,16 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
0x0b, 0x79, 0xf5, 0x29, 0x6d, 0x1c, 0xaa, 0x90,
|
||||
0x2f, 0x01, 0xd4, 0x83, 0x9b, 0x2a, 0x04, 0x5e,
|
||||
},
|
||||
Timestamp: time.Unix(0x5cd16eaa, 0),
|
||||
Timestamp: mstime.UnixMilliseconds(0x5cd16eaa000),
|
||||
Bits: 0x207fffff,
|
||||
Nonce: 0x0,
|
||||
Nonce: 1,
|
||||
},
|
||||
Transactions: []*wire.MsgTx{
|
||||
Transactions: []*domainmessage.MsgTx{
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x9b, 0x22, 0x59, 0x44, 0x66, 0xf0, 0xbe, 0x50,
|
||||
0x7c, 0x1c, 0x8a, 0xf6, 0x06, 0x27, 0xe6, 0x33,
|
||||
@@ -1072,7 +1058,7 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0x12a05f200, // 5000000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -1094,9 +1080,9 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95,
|
||||
0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3,
|
||||
@@ -1108,7 +1094,7 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b,
|
||||
0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87,
|
||||
@@ -1124,9 +1110,9 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
|
||||
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
|
||||
@@ -1161,7 +1147,7 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0x2123e300, // 556000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -1196,9 +1182,9 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
|
||||
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
|
||||
@@ -1232,7 +1218,7 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -1265,9 +1251,9 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73,
|
||||
0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac,
|
||||
@@ -1302,7 +1288,7 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
|
||||
@@ -78,7 +78,7 @@ func (c bitConditionChecker) EndTime() uint64 {
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 {
|
||||
return c.dag.dagParams.RuleChangeActivationThreshold
|
||||
return c.dag.Params.RuleChangeActivationThreshold
|
||||
}
|
||||
|
||||
// MinerConfirmationWindow is the number of blocks in each threshold state
|
||||
@@ -89,7 +89,7 @@ func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 {
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c bitConditionChecker) MinerConfirmationWindow() uint64 {
|
||||
return c.dag.dagParams.MinerConfirmationWindow
|
||||
return c.dag.Params.MinerConfirmationWindow
|
||||
}
|
||||
|
||||
// Condition returns true when the specific bit associated with the checker is
|
||||
@@ -159,7 +159,7 @@ func (c deploymentChecker) EndTime() uint64 {
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c deploymentChecker) RuleChangeActivationThreshold() uint64 {
|
||||
return c.dag.dagParams.RuleChangeActivationThreshold
|
||||
return c.dag.Params.RuleChangeActivationThreshold
|
||||
}
|
||||
|
||||
// MinerConfirmationWindow is the number of blocks in each threshold state
|
||||
@@ -170,7 +170,7 @@ func (c deploymentChecker) RuleChangeActivationThreshold() uint64 {
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c deploymentChecker) MinerConfirmationWindow() uint64 {
|
||||
return c.dag.dagParams.MinerConfirmationWindow
|
||||
return c.dag.Params.MinerConfirmationWindow
|
||||
}
|
||||
|
||||
// Condition returns true when the specific bit defined by the deployment
|
||||
@@ -198,8 +198,8 @@ func (dag *BlockDAG) calcNextBlockVersion(prevNode *blockNode) (int32, error) {
|
||||
// that is either in the process of being voted on, or locked in for the
|
||||
// activation at the next threshold window change.
|
||||
expectedVersion := uint32(vbTopBits)
|
||||
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
||||
deployment := &dag.dagParams.Deployments[id]
|
||||
for id := 0; id < len(dag.Params.Deployments); id++ {
|
||||
deployment := &dag.Params.Deployments[id]
|
||||
cache := &dag.deploymentCaches[id]
|
||||
checker := deploymentChecker{deployment: deployment, dag: dag}
|
||||
state, err := dag.thresholdState(prevNode, checker, cache)
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestVirtualBlock(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -134,7 +134,7 @@ func TestSelectedPath(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestSelectedPath", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestSelectedPath", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -222,7 +222,7 @@ func TestChainUpdates(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestChainUpdates", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestChainUpdates", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -5,12 +5,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/limits"
|
||||
"github.com/kaspanet/kaspad/logs"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
@@ -24,42 +21,9 @@ const (
|
||||
var (
|
||||
cfg *ConfigFlags
|
||||
log *logs.Logger
|
||||
spawn func(func())
|
||||
spawn func(string, func())
|
||||
)
|
||||
|
||||
// loadBlockDB opens the block database and returns a handle to it.
|
||||
func loadBlockDB() (database.DB, error) {
|
||||
// The database name is based on the database type.
|
||||
dbName := blockDBNamePrefix + "_" + cfg.DBType
|
||||
dbPath := filepath.Join(cfg.DataDir, dbName)
|
||||
|
||||
log.Infof("Loading block database from '%s'", dbPath)
|
||||
db, err := database.Open(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
|
||||
if err != nil {
|
||||
// Return the error if it's not because the database doesn't
|
||||
// exist.
|
||||
var dbErr database.Error
|
||||
if ok := errors.As(err, &dbErr); !ok || dbErr.ErrorCode !=
|
||||
database.ErrDbDoesNotExist {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the db if it does not exist.
|
||||
err = os.MkdirAll(cfg.DataDir, 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db, err = database.Create(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Block database loaded")
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// realMain is the real main function for the utility. It is necessary to work
|
||||
// around the fact that deferred functions do not run when os.Exit() is called.
|
||||
func realMain() error {
|
||||
@@ -76,14 +40,6 @@ func realMain() error {
|
||||
log = backendLogger.Logger("MAIN")
|
||||
spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
// Load the block database.
|
||||
db, err := loadBlockDB()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to load database: %s", err)
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
fi, err := os.Open(cfg.InFile)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to open file %s: %s", cfg.InFile, err)
|
||||
@@ -94,7 +50,7 @@ func realMain() error {
|
||||
// Create a block importer for the database and input file and start it.
|
||||
// The done channel returned from start will contain an error if
|
||||
// anything went wrong.
|
||||
importer, err := newBlockImporter(db, fi)
|
||||
importer, err := newBlockImporter(fi)
|
||||
if err != nil {
|
||||
log.Errorf("Failed create block importer: %s", err)
|
||||
return err
|
||||
|
||||
@@ -6,20 +6,15 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
flags "github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
flags "github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDBType = "ffldb"
|
||||
defaultDataFile = "bootstrap.dat"
|
||||
defaultProgress = 10
|
||||
)
|
||||
@@ -27,7 +22,6 @@ const (
|
||||
var (
|
||||
kaspadHomeDir = util.AppDataDir("kaspad", false)
|
||||
defaultDataDir = filepath.Join(kaspadHomeDir, "data")
|
||||
knownDbTypes = database.SupportedDrivers()
|
||||
activeConfig *ConfigFlags
|
||||
)
|
||||
|
||||
@@ -41,7 +35,6 @@ func ActiveConfig() *ConfigFlags {
|
||||
// See loadConfig for details on the configuration load process.
|
||||
type ConfigFlags struct {
|
||||
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
|
||||
DBType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
|
||||
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
|
||||
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
|
||||
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
|
||||
@@ -58,23 +51,11 @@ func fileExists(name string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// validDbType returns whether or not dbType is a supported database type.
|
||||
func validDbType(dbType string) bool {
|
||||
for _, knownType := range knownDbTypes {
|
||||
if dbType == knownType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// loadConfig initializes and parses the config using command line options.
|
||||
func loadConfig() (*ConfigFlags, []string, error) {
|
||||
// Default config.
|
||||
activeConfig = &ConfigFlags{
|
||||
DataDir: defaultDataDir,
|
||||
DBType: defaultDBType,
|
||||
InFile: defaultDataFile,
|
||||
Progress: defaultProgress,
|
||||
}
|
||||
@@ -95,16 +76,6 @@ func loadConfig() (*ConfigFlags, []string, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Validate database type.
|
||||
if !validDbType(activeConfig.DBType) {
|
||||
str := "%s: The specified database type [%s] is invalid -- " +
|
||||
"supported types %s"
|
||||
err := errors.Errorf(str, "loadConfig", activeConfig.DBType, strings.Join(knownDbTypes, ", "))
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
parser.WriteHelp(os.Stderr)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Append the network type to the data directory so it is "namespaced"
|
||||
// per network. In addition to the block database, there are other
|
||||
// pieces of data that are saved to disk such as address manager state.
|
||||
|
||||
@@ -7,15 +7,15 @@ package main
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/blockdag/indexers"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// importResults houses the stats and result as an import operation.
|
||||
@@ -28,7 +28,6 @@ type importResults struct {
|
||||
// blockImporter houses information about an ongoing import from a block data
|
||||
// file to the block database.
|
||||
type blockImporter struct {
|
||||
db database.DB
|
||||
dag *blockdag.BlockDAG
|
||||
r io.ReadSeeker
|
||||
processQueue chan []byte
|
||||
@@ -41,8 +40,8 @@ type blockImporter struct {
|
||||
receivedLogBlocks int64
|
||||
receivedLogTx int64
|
||||
lastHeight int64
|
||||
lastBlockTime time.Time
|
||||
lastLogTime time.Time
|
||||
lastBlockTime mstime.Time
|
||||
lastLogTime mstime.Time
|
||||
}
|
||||
|
||||
// readBlock reads the next block from the input file.
|
||||
@@ -69,10 +68,10 @@ func (bi *blockImporter) readBlock() ([]byte, error) {
|
||||
if err := binary.Read(bi.r, binary.LittleEndian, &blockLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blockLen > wire.MaxMessagePayload {
|
||||
if blockLen > domainmessage.MaxMessagePayload {
|
||||
return nil, errors.Errorf("block payload of %d bytes is larger "+
|
||||
"than the max allowed %d bytes", blockLen,
|
||||
wire.MaxMessagePayload)
|
||||
domainmessage.MaxMessagePayload)
|
||||
}
|
||||
|
||||
serializedBlock := make([]byte, blockLen)
|
||||
@@ -172,7 +171,7 @@ out:
|
||||
func (bi *blockImporter) logProgress() {
|
||||
bi.receivedLogBlocks++
|
||||
|
||||
now := time.Now()
|
||||
now := mstime.Now()
|
||||
duration := now.Sub(bi.lastLogTime)
|
||||
if duration < time.Second*time.Duration(cfg.Progress) {
|
||||
return
|
||||
@@ -266,12 +265,12 @@ func (bi *blockImporter) Import() chan *importResults {
|
||||
// Start up the read and process handling goroutines. This setup allows
|
||||
// blocks to be read from disk in parallel while being processed.
|
||||
bi.wg.Add(2)
|
||||
spawn(bi.readHandler)
|
||||
spawn(bi.processHandler)
|
||||
spawn("blockImporter.readHandler", bi.readHandler)
|
||||
spawn("blockImporter.processHandler", bi.processHandler)
|
||||
|
||||
// Wait for the import to finish in a separate goroutine and signal
|
||||
// the status handler when done.
|
||||
spawn(func() {
|
||||
spawn("blockImporter.sendToDoneChan", func() {
|
||||
bi.wg.Wait()
|
||||
bi.doneChan <- true
|
||||
})
|
||||
@@ -279,7 +278,7 @@ func (bi *blockImporter) Import() chan *importResults {
|
||||
// Start the status handler and return the result channel that it will
|
||||
// send the results on when the import is done.
|
||||
resultChan := make(chan *importResults)
|
||||
spawn(func() {
|
||||
spawn("blockImporter.statusHandler", func() {
|
||||
bi.statusHandler(resultChan)
|
||||
})
|
||||
return resultChan
|
||||
@@ -287,7 +286,7 @@ func (bi *blockImporter) Import() chan *importResults {
|
||||
|
||||
// newBlockImporter returns a new importer for the provided file reader seeker
|
||||
// and database.
|
||||
func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
|
||||
func newBlockImporter(r io.ReadSeeker) (*blockImporter, error) {
|
||||
// Create the acceptance index if needed.
|
||||
var indexes []indexers.Indexer
|
||||
if cfg.AcceptanceIndex {
|
||||
@@ -302,9 +301,8 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
|
||||
}
|
||||
|
||||
dag, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: ActiveConfig().NetParams(),
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
TimeSource: blockdag.NewTimeSource(),
|
||||
IndexManager: indexManager,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -312,13 +310,12 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
|
||||
}
|
||||
|
||||
return &blockImporter{
|
||||
db: db,
|
||||
r: r,
|
||||
processQueue: make(chan []byte, 2),
|
||||
doneChan: make(chan bool),
|
||||
errChan: make(chan error),
|
||||
quit: make(chan struct{}),
|
||||
dag: dag,
|
||||
lastLogTime: time.Now(),
|
||||
lastLogTime: mstime.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/rpcmodel"
|
||||
"github.com/kaspanet/kaspad/rpc/model"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
@@ -24,7 +24,7 @@ const (
|
||||
// unusableFlags are the command usage flags which this utility are not
|
||||
// able to use. In particular it doesn't support websockets and
|
||||
// consequently notifications.
|
||||
unusableFlags = rpcmodel.UFWebsocketOnly | rpcmodel.UFNotification
|
||||
unusableFlags = model.UFWebsocketOnly | model.UFNotification
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -45,10 +45,10 @@ func listCommands() {
|
||||
)
|
||||
|
||||
// Get a list of registered commands and categorize and filter them.
|
||||
cmdMethods := rpcmodel.RegisteredCmdMethods()
|
||||
cmdMethods := model.RegisteredCmdMethods()
|
||||
categorized := make([][]string, numCategories)
|
||||
for _, method := range cmdMethods {
|
||||
flags, err := rpcmodel.MethodUsageFlags(method)
|
||||
flags, err := model.MethodUsageFlags(method)
|
||||
if err != nil {
|
||||
// This should never happen since the method was just
|
||||
// returned from the package, but be safe.
|
||||
@@ -60,7 +60,7 @@ func listCommands() {
|
||||
continue
|
||||
}
|
||||
|
||||
usage, err := rpcmodel.MethodUsageText(method)
|
||||
usage, err := model.MethodUsageText(method)
|
||||
if err != nil {
|
||||
// This should never happen since the method was just
|
||||
// returned from the package, but be safe.
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/btcsuite/go-socks/socks"
|
||||
"github.com/kaspanet/kaspad/rpcmodel"
|
||||
"github.com/kaspanet/kaspad/rpc/model"
|
||||
)
|
||||
|
||||
// newHTTPClient returns a new HTTP client that is configured according to the
|
||||
@@ -117,7 +117,7 @@ func sendPostRequest(marshalledJSON []byte, cfg *ConfigFlags) ([]byte, error) {
|
||||
}
|
||||
|
||||
// Unmarshal the response.
|
||||
var resp rpcmodel.Response
|
||||
var resp model.Response
|
||||
if err := json.Unmarshal(respBytes, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/kaspanet/kaspad/rpcmodel"
|
||||
"github.com/kaspanet/kaspad/rpc/model"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -21,7 +21,7 @@ const (
|
||||
|
||||
// commandUsage display the usage for a specific command.
|
||||
func commandUsage(method string) {
|
||||
usage, err := rpcmodel.MethodUsageText(method)
|
||||
usage, err := model.MethodUsageText(method)
|
||||
if err != nil {
|
||||
// This should never happen since the method was already checked
|
||||
// before calling this function, but be safe.
|
||||
@@ -60,7 +60,7 @@ func main() {
|
||||
// Ensure the specified method identifies a valid registered command and
|
||||
// is one of the usable types.
|
||||
method := args[0]
|
||||
usageFlags, err := rpcmodel.MethodUsageFlags(method)
|
||||
usageFlags, err := model.MethodUsageFlags(method)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unrecognized command '%s'\n", method)
|
||||
fmt.Fprintln(os.Stderr, listCmdMessage)
|
||||
@@ -105,13 +105,13 @@ func main() {
|
||||
|
||||
// Attempt to create the appropriate command using the arguments
|
||||
// provided by the user.
|
||||
cmd, err := rpcmodel.NewCommand(method, params...)
|
||||
cmd, err := model.NewCommand(method, params...)
|
||||
if err != nil {
|
||||
// Show the error along with its error code when it's a
|
||||
// rpcmodel.Error as it reallistcally will always be since the
|
||||
// model.Error as it reallistcally will always be since the
|
||||
// NewCommand function is only supposed to return errors of that
|
||||
// type.
|
||||
var rpcModelErr rpcmodel.Error
|
||||
var rpcModelErr model.Error
|
||||
if ok := errors.As(err, &rpcModelErr); ok {
|
||||
fmt.Fprintf(os.Stderr, "%s error: %s (command code: %s)\n",
|
||||
method, err, rpcModelErr.ErrorCode)
|
||||
@@ -119,7 +119,7 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// The error is not a rpcmodel.Error and this really should not
|
||||
// The error is not a model.Error and this really should not
|
||||
// happen. Nevertheless, fallback to just showing the error
|
||||
// if it should happen due to a bug in the package.
|
||||
fmt.Fprintf(os.Stderr, "%s error: %s\n", method, err)
|
||||
@@ -129,7 +129,7 @@ func main() {
|
||||
|
||||
// Marshal the command into a JSON-RPC byte slice in preparation for
|
||||
// sending it to the RPC server.
|
||||
marshalledJSON, err := rpcmodel.MarshalCommand(1, cmd)
|
||||
marshalledJSON, err := model.MarshalCommand(1, cmd)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
|
||||
@@ -1,39 +1,39 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/rpcclient"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/rpc/client"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
type minerClient struct {
|
||||
*rpcclient.Client
|
||||
*client.Client
|
||||
onBlockAdded chan struct{}
|
||||
}
|
||||
|
||||
func newMinerClient(connCfg *rpcclient.ConnConfig) (*minerClient, error) {
|
||||
client := &minerClient{
|
||||
func newMinerClient(connCfg *client.ConnConfig) (*minerClient, error) {
|
||||
minerClient := &minerClient{
|
||||
onBlockAdded: make(chan struct{}, 1),
|
||||
}
|
||||
notificationHandlers := &rpcclient.NotificationHandlers{
|
||||
OnFilteredBlockAdded: func(_ uint64, header *wire.BlockHeader,
|
||||
notificationHandlers := &client.NotificationHandlers{
|
||||
OnFilteredBlockAdded: func(_ uint64, header *domainmessage.BlockHeader,
|
||||
txs []*util.Tx) {
|
||||
client.onBlockAdded <- struct{}{}
|
||||
minerClient.onBlockAdded <- struct{}{}
|
||||
},
|
||||
}
|
||||
var err error
|
||||
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
|
||||
minerClient.Client, err = client.New(connCfg, notificationHandlers)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
|
||||
}
|
||||
|
||||
if err = client.NotifyBlocks(); err != nil {
|
||||
return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
|
||||
if err = minerClient.NotifyBlocks(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error while registering minerClient %s for block notifications", minerClient.Host())
|
||||
}
|
||||
return client, nil
|
||||
return minerClient, nil
|
||||
}
|
||||
|
||||
func connectToServer(cfg *configFlags) (*minerClient, error) {
|
||||
@@ -47,7 +47,7 @@ func connectToServer(cfg *configFlags) (*minerClient, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connCfg := &rpcclient.ConnConfig{
|
||||
connCfg := &client.ConnConfig{
|
||||
Host: rpcAddr,
|
||||
Endpoint: "ws",
|
||||
User: cfg.RPCUser,
|
||||
|
||||
@@ -2,11 +2,13 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
@@ -28,15 +30,18 @@ var (
|
||||
)
|
||||
|
||||
type configFlags struct {
|
||||
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
|
||||
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
|
||||
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
|
||||
DisableTLS bool `long:"notls" description:"Disable TLS"`
|
||||
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
|
||||
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
|
||||
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."`
|
||||
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
|
||||
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
|
||||
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
|
||||
DisableTLS bool `long:"notls" description:"Disable TLS"`
|
||||
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
|
||||
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
|
||||
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
|
||||
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."`
|
||||
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
|
||||
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
@@ -72,12 +77,19 @@ func parseConfig() (*configFlags, error) {
|
||||
}
|
||||
|
||||
if cfg.RPCCert == "" && !cfg.DisableTLS {
|
||||
return nil, errors.New("--notls has to be disabled if --cert is used")
|
||||
return nil, errors.New("either --notls or --rpccert must be specified")
|
||||
}
|
||||
if cfg.RPCCert != "" && cfg.DisableTLS {
|
||||
return nil, errors.New("--rpccert should be omitted if --notls is used")
|
||||
}
|
||||
|
||||
if cfg.Profile != "" {
|
||||
profilePort, err := strconv.Atoi(cfg.Profile)
|
||||
if err != nil || profilePort < 1024 || profilePort > 65535 {
|
||||
return nil, errors.New("The profile port must be between 1024 and 65535")
|
||||
}
|
||||
}
|
||||
|
||||
initLog(defaultLogFile, defaultErrLogFile)
|
||||
|
||||
return cfg, nil
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.13-alpine AS build
|
||||
FROM golang:1.14-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
@@ -20,7 +20,7 @@ WORKDIR /go/src/github.com/kaspanet/kaspad/cmd/kaspaminer
|
||||
RUN GOFMT_RESULT=`go fmt ./...`; echo $GOFMT_RESULT; test -z "$GOFMT_RESULT"
|
||||
RUN go vet ./...
|
||||
RUN golint -set_exit_status ./...
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
|
||||
RUN GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
|
||||
|
||||
# --- multistage docker build: stage #2: runtime image
|
||||
FROM alpine
|
||||
|
||||
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/logs"
|
||||
"github.com/kaspanet/kaspad/rpcclient"
|
||||
"github.com/kaspanet/kaspad/rpc/client"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
"os"
|
||||
)
|
||||
@@ -28,5 +28,5 @@ func initLog(logFile, errLogFile string) {
|
||||
}
|
||||
|
||||
func enableRPCLogging() {
|
||||
rpcclient.UseLogger(backendLog, logs.LevelTrace)
|
||||
client.UseLogger(backendLog, logs.LevelTrace)
|
||||
}
|
||||
|
||||
@@ -2,17 +2,22 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"os"
|
||||
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/kaspanet/kaspad/signal"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
"github.com/kaspanet/kaspad/util/profiling"
|
||||
)
|
||||
|
||||
func main() {
|
||||
defer panics.HandlePanic(log, nil)
|
||||
defer panics.HandlePanic(log, "MAIN", nil)
|
||||
interrupt := signal.InterruptListener()
|
||||
|
||||
cfg, err := parseConfig()
|
||||
@@ -28,17 +33,27 @@ func main() {
|
||||
enableRPCLogging()
|
||||
}
|
||||
|
||||
// Enable http profiling server if requested.
|
||||
if cfg.Profile != "" {
|
||||
profiling.Start(cfg.Profile, log)
|
||||
}
|
||||
|
||||
client, err := connectToServer(cfg)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "Error connecting to the RPC server"))
|
||||
panic(errors.Wrap(err, "error connecting to the RPC server"))
|
||||
}
|
||||
defer client.Disconnect()
|
||||
|
||||
miningAddr, err := util.DecodeAddress(cfg.MiningAddr, cfg.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error decoding mining address"))
|
||||
}
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
spawn(func() {
|
||||
err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay)
|
||||
spawn("mineLoop", func() {
|
||||
err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay, cfg.MineWhenNotSynced, miningAddr)
|
||||
if err != nil {
|
||||
panic(errors.Errorf("Error in mine loop: %s", err))
|
||||
panic(errors.Wrap(err, "error in mine loop"))
|
||||
}
|
||||
doneChan <- struct{}{}
|
||||
})
|
||||
|
||||
@@ -1,23 +1,17 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
nativeerrors "errors"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/rpcclient"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/rpcmodel"
|
||||
clientpkg "github.com/kaspanet/kaspad/rpc/client"
|
||||
"github.com/kaspanet/kaspad/rpc/model"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var random = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
@@ -25,21 +19,23 @@ var hashesTried uint64
|
||||
|
||||
const logHashRateInterval = 10 * time.Second
|
||||
|
||||
func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) error {
|
||||
func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64, mineWhenNotSynced bool,
|
||||
miningAddr util.Address) error {
|
||||
|
||||
errChan := make(chan error)
|
||||
|
||||
templateStopChan := make(chan struct{})
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
spawn(func() {
|
||||
spawn("mineLoop-internalLoop", func() {
|
||||
wg := sync.WaitGroup{}
|
||||
for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ {
|
||||
foundBlock := make(chan *util.Block)
|
||||
mineNextBlock(client, foundBlock, templateStopChan, errChan)
|
||||
mineNextBlock(client, miningAddr, foundBlock, mineWhenNotSynced, templateStopChan, errChan)
|
||||
block := <-foundBlock
|
||||
templateStopChan <- struct{}{}
|
||||
wg.Add(1)
|
||||
spawn(func() {
|
||||
spawn("mineLoop-handleFoundBlock", func() {
|
||||
if blockDelay != 0 {
|
||||
time.Sleep(time.Duration(blockDelay) * time.Millisecond)
|
||||
}
|
||||
@@ -65,7 +61,7 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) err
|
||||
}
|
||||
|
||||
func logHashRate() {
|
||||
spawn(func() {
|
||||
spawn("logHashRate", func() {
|
||||
lastCheck := time.Now()
|
||||
for range time.Tick(logHashRateInterval) {
|
||||
currentHashesTried := hashesTried
|
||||
@@ -80,77 +76,33 @@ func logHashRate() {
|
||||
})
|
||||
}
|
||||
|
||||
func mineNextBlock(client *minerClient, foundBlock chan *util.Block, templateStopChan chan struct{}, errChan chan error) {
|
||||
newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult)
|
||||
spawn(func() {
|
||||
templatesLoop(client, newTemplateChan, errChan, templateStopChan)
|
||||
func mineNextBlock(client *minerClient, miningAddr util.Address, foundBlock chan *util.Block, mineWhenNotSynced bool,
|
||||
templateStopChan chan struct{}, errChan chan error) {
|
||||
|
||||
newTemplateChan := make(chan *model.GetBlockTemplateResult)
|
||||
spawn("templatesLoop", func() {
|
||||
templatesLoop(client, miningAddr, newTemplateChan, errChan, templateStopChan)
|
||||
})
|
||||
spawn(func() {
|
||||
solveLoop(newTemplateChan, foundBlock, errChan)
|
||||
spawn("solveLoop", func() {
|
||||
solveLoop(newTemplateChan, foundBlock, mineWhenNotSynced, errChan)
|
||||
})
|
||||
}
|
||||
|
||||
func handleFoundBlock(client *minerClient, block *util.Block) error {
|
||||
log.Infof("Found block %s with parents %s. Submitting to %s", block.Hash(), block.MsgBlock().Header.ParentHashes, client.Host())
|
||||
|
||||
err := client.SubmitBlock(block, &rpcmodel.SubmitBlockOptions{})
|
||||
err := client.SubmitBlock(block, &model.SubmitBlockOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Error submitting block %s to %s: %s", block.Hash(), client.Host(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseBlock(template *rpcmodel.GetBlockTemplateResult) (*util.Block, error) {
|
||||
// parse parent hashes
|
||||
parentHashes := make([]*daghash.Hash, len(template.ParentHashes))
|
||||
for i, parentHash := range template.ParentHashes {
|
||||
hash, err := daghash.NewHashFromStr(parentHash)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error decoding hash %s: %s", parentHash, err)
|
||||
}
|
||||
parentHashes[i] = hash
|
||||
}
|
||||
|
||||
// parse Bits
|
||||
bitsUint64, err := strconv.ParseUint(template.Bits, 16, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error decoding bits %s: %s", template.Bits, err)
|
||||
}
|
||||
bits := uint32(bitsUint64)
|
||||
|
||||
// parseAcceptedIDMerkleRoot
|
||||
acceptedIDMerkleRoot, err := daghash.NewHashFromStr(template.AcceptedIDMerkleRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error parsing acceptedIDMerkleRoot: %s", err)
|
||||
}
|
||||
utxoCommitment, err := daghash.NewHashFromStr(template.UTXOCommitment)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error parsing utxoCommitment: %s", err)
|
||||
}
|
||||
// parse rest of block
|
||||
msgBlock := wire.NewMsgBlock(
|
||||
wire.NewBlockHeader(template.Version, parentHashes, &daghash.Hash{},
|
||||
acceptedIDMerkleRoot, utxoCommitment, bits, 0))
|
||||
|
||||
for i, txResult := range append([]rpcmodel.GetBlockTemplateResultTx{*template.CoinbaseTxn}, template.Transactions...) {
|
||||
reader := hex.NewDecoder(strings.NewReader(txResult.Data))
|
||||
tx := &wire.MsgTx{}
|
||||
if err := tx.KaspaDecode(reader, 0); err != nil {
|
||||
return nil, errors.Errorf("Error decoding tx #%d: %s", i, err)
|
||||
}
|
||||
msgBlock.AddTransaction(tx)
|
||||
}
|
||||
|
||||
block := util.NewBlock(msgBlock)
|
||||
msgBlock.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block.Transactions()).Root()
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util.Block) {
|
||||
msgBlock := block.MsgBlock()
|
||||
targetDifficulty := util.CompactToBig(msgBlock.Header.Bits)
|
||||
initialNonce := random.Uint64()
|
||||
for i := random.Uint64(); i != initialNonce-1; i++ {
|
||||
for i := initialNonce; i != initialNonce-1; i++ {
|
||||
select {
|
||||
case <-stopChan:
|
||||
return
|
||||
@@ -167,7 +119,9 @@ func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util
|
||||
|
||||
}
|
||||
|
||||
func templatesLoop(client *minerClient, newTemplateChan chan *rpcmodel.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) {
|
||||
func templatesLoop(client *minerClient, miningAddr util.Address,
|
||||
newTemplateChan chan *model.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) {
|
||||
|
||||
longPollID := ""
|
||||
getBlockTemplateLongPoll := func() {
|
||||
if longPollID != "" {
|
||||
@@ -175,8 +129,8 @@ func templatesLoop(client *minerClient, newTemplateChan chan *rpcmodel.GetBlockT
|
||||
} else {
|
||||
log.Infof("Requesting template without longPollID from %s", client.Host())
|
||||
}
|
||||
template, err := getBlockTemplate(client, longPollID)
|
||||
if nativeerrors.Is(err, rpcclient.ErrResponseTimedOut) {
|
||||
template, err := getBlockTemplate(client, miningAddr, longPollID)
|
||||
if nativeerrors.Is(err, clientpkg.ErrResponseTimedOut) {
|
||||
log.Infof("Got timeout while requesting template '%s' from %s", longPollID, client.Host())
|
||||
return
|
||||
} else if err != nil {
|
||||
@@ -203,24 +157,35 @@ func templatesLoop(client *minerClient, newTemplateChan chan *rpcmodel.GetBlockT
|
||||
}
|
||||
}
|
||||
|
||||
func getBlockTemplate(client *minerClient, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) {
|
||||
return client.GetBlockTemplate([]string{"coinbasetxn"}, longPollID)
|
||||
func getBlockTemplate(client *minerClient, miningAddr util.Address, longPollID string) (*model.GetBlockTemplateResult, error) {
|
||||
return client.GetBlockTemplate(miningAddr.String(), longPollID)
|
||||
}
|
||||
|
||||
func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, errChan chan error) {
|
||||
func solveLoop(newTemplateChan chan *model.GetBlockTemplateResult, foundBlock chan *util.Block,
|
||||
mineWhenNotSynced bool, errChan chan error) {
|
||||
|
||||
var stopOldTemplateSolving chan struct{}
|
||||
for template := range newTemplateChan {
|
||||
if stopOldTemplateSolving != nil {
|
||||
close(stopOldTemplateSolving)
|
||||
}
|
||||
|
||||
if !template.IsSynced {
|
||||
if !mineWhenNotSynced {
|
||||
errChan <- errors.Errorf("got template with isSynced=false")
|
||||
return
|
||||
}
|
||||
log.Warnf("Got template with isSynced=false")
|
||||
}
|
||||
|
||||
stopOldTemplateSolving = make(chan struct{})
|
||||
block, err := parseBlock(template)
|
||||
block, err := clientpkg.ConvertGetBlockTemplateResultToBlock(template)
|
||||
if err != nil {
|
||||
errChan <- errors.Errorf("Error parsing block: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
spawn(func() {
|
||||
spawn("solveBlock", func() {
|
||||
solveBlock(block, stopOldTemplateSolving, foundBlock)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
)
|
||||
@@ -28,7 +28,11 @@ func main() {
|
||||
printErrorAndExit(err, "Failed to decode transaction")
|
||||
}
|
||||
|
||||
scriptPubKey, err := createScriptPubKey(privateKey.PubKey())
|
||||
pubkey, err := privateKey.SchnorrPublicKey()
|
||||
if err != nil {
|
||||
printErrorAndExit(err, "Failed to generate a public key")
|
||||
}
|
||||
scriptPubKey, err := createScriptPubKey(pubkey)
|
||||
if err != nil {
|
||||
printErrorAndExit(err, "Failed to create scriptPubKey")
|
||||
}
|
||||
@@ -46,24 +50,30 @@ func main() {
|
||||
fmt.Printf("Signed Transaction (hex): %s\n\n", serializedTransaction)
|
||||
}
|
||||
|
||||
func parsePrivateKey(privateKeyHex string) (*ecc.PrivateKey, error) {
|
||||
func parsePrivateKey(privateKeyHex string) (*secp256k1.PrivateKey, error) {
|
||||
privateKeyBytes, err := hex.DecodeString(privateKeyHex)
|
||||
privateKey, _ := ecc.PrivKeyFromBytes(ecc.S256(), privateKeyBytes)
|
||||
return privateKey, err
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("'%s' isn't a valid hex. err: '%s' ", privateKeyHex, err)
|
||||
}
|
||||
return secp256k1.DeserializePrivateKeyFromSlice(privateKeyBytes)
|
||||
}
|
||||
|
||||
func parseTransaction(transactionHex string) (*wire.MsgTx, error) {
|
||||
func parseTransaction(transactionHex string) (*domainmessage.MsgTx, error) {
|
||||
serializedTx, err := hex.DecodeString(transactionHex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't decode transaction hex")
|
||||
}
|
||||
var transaction wire.MsgTx
|
||||
var transaction domainmessage.MsgTx
|
||||
err = transaction.Deserialize(bytes.NewReader(serializedTx))
|
||||
return &transaction, err
|
||||
}
|
||||
|
||||
func createScriptPubKey(publicKey *ecc.PublicKey) ([]byte, error) {
|
||||
p2pkhAddress, err := util.NewAddressPubKeyHashFromPublicKey(publicKey.SerializeCompressed(), ActiveConfig().NetParams().Prefix)
|
||||
func createScriptPubKey(publicKey *secp256k1.SchnorrPublicKey) ([]byte, error) {
|
||||
serializedKey, err := publicKey.SerializeCompressed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p2pkhAddress, err := util.NewAddressPubKeyHashFromPublicKey(serializedKey, ActiveConfig().NetParams().Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -71,7 +81,7 @@ func createScriptPubKey(publicKey *ecc.PublicKey) ([]byte, error) {
|
||||
return scriptPubKey, err
|
||||
}
|
||||
|
||||
func signTransaction(transaction *wire.MsgTx, privateKey *ecc.PrivateKey, scriptPubKey []byte) error {
|
||||
func signTransaction(transaction *domainmessage.MsgTx, privateKey *secp256k1.PrivateKey, scriptPubKey []byte) error {
|
||||
for i, transactionInput := range transaction.TxIn {
|
||||
signatureScript, err := txscript.SignatureScript(transaction, i, scriptPubKey, txscript.SigHashAll, privateKey, true)
|
||||
if err != nil {
|
||||
@@ -82,7 +92,7 @@ func signTransaction(transaction *wire.MsgTx, privateKey *ecc.PrivateKey, script
|
||||
return nil
|
||||
}
|
||||
|
||||
func serializeTransaction(transaction *wire.MsgTx) (string, error) {
|
||||
func serializeTransaction(transaction *domainmessage.MsgTx) (string, error) {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, transaction.SerializeSize()))
|
||||
err := transaction.Serialize(buf)
|
||||
serializedTransaction := hex.EncodeToString(buf.Bytes())
|
||||
|
||||
298
config/config.go
298
config/config.go
@@ -18,11 +18,12 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/btcsuite/go-socks/socks"
|
||||
"github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/network"
|
||||
@@ -46,7 +47,6 @@ const (
|
||||
defaultMaxRPCClients = 10
|
||||
defaultMaxRPCWebsockets = 25
|
||||
defaultMaxRPCConcurrentReqs = 20
|
||||
defaultDbType = "ffldb"
|
||||
defaultBlockMaxMass = 10000000
|
||||
blockMaxMassMin = 1000
|
||||
blockMaxMassMax = 10000000
|
||||
@@ -65,14 +65,11 @@ var (
|
||||
|
||||
defaultConfigFile = filepath.Join(DefaultHomeDir, defaultConfigFilename)
|
||||
defaultDataDir = filepath.Join(DefaultHomeDir, defaultDataDirname)
|
||||
knownDbTypes = database.SupportedDrivers()
|
||||
defaultRPCKeyFile = filepath.Join(DefaultHomeDir, "rpc.key")
|
||||
defaultRPCCertFile = filepath.Join(DefaultHomeDir, "rpc.cert")
|
||||
defaultLogDir = filepath.Join(DefaultHomeDir, defaultLogDirname)
|
||||
)
|
||||
|
||||
var activeConfig *Config
|
||||
|
||||
// RunServiceCommand is only set to a real function on Windows. It is used
|
||||
// to parse and execute service commands specified via the -s flag.
|
||||
var RunServiceCommand func(string) error
|
||||
@@ -120,7 +117,6 @@ type Flags struct {
|
||||
Upnp bool `long:"upnp" description:"Use UPnP to map our listening port outside of NAT"`
|
||||
MinRelayTxFee float64 `long:"minrelaytxfee" description:"The minimum transaction fee in KAS/kB to be considered a non-zero fee."`
|
||||
MaxOrphanTxs int `long:"maxorphantx" description:"Max number of orphan transactions to keep in memory"`
|
||||
MiningAddrs []string `long:"miningaddr" description:"Add the specified payment address to the list of addresses to use for generated blocks -- At least one address is required if the generate option is set"`
|
||||
BlockMaxMass uint64 `long:"blockmaxmass" description:"Maximum transaction mass to be used when creating a block"`
|
||||
UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."`
|
||||
NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"`
|
||||
@@ -130,7 +126,6 @@ type Flags struct {
|
||||
DropAcceptanceIndex bool `long:"dropacceptanceindex" description:"Deletes the hash-based acceptance index from the database on start up and then exits."`
|
||||
RelayNonStd bool `long:"relaynonstd" description:"Relay non-standard transactions regardless of the default settings for the active network."`
|
||||
RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."`
|
||||
Subnetwork string `long:"subnetwork" description:"If subnetwork ID is specified, than node will request and process only payloads from specified subnetwork. And if subnetwork ID is ommited, than payloads of all subnetworks are processed. Subnetworks with IDs 2 through 255 are reserved for future use and are currently not allowed."`
|
||||
ResetDatabase bool `long:"reset-db" description:"Reset database before starting node. It's needed when switching between subnetworks."`
|
||||
NetworkFlags
|
||||
}
|
||||
@@ -168,17 +163,6 @@ func cleanAndExpandPath(path string) string {
|
||||
return filepath.Clean(os.ExpandEnv(path))
|
||||
}
|
||||
|
||||
// validDbType returns whether or not dbType is a supported database type.
|
||||
func validDbType(dbType string) bool {
|
||||
for _, knownType := range knownDbTypes {
|
||||
if dbType == knownType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// newConfigParser returns a new command line flags parser.
|
||||
func newConfigParser(cfgFlags *Flags, so *serviceOptions, options flags.Options) *flags.Parser {
|
||||
parser := flags.NewParser(cfgFlags, options)
|
||||
@@ -188,42 +172,8 @@ func newConfigParser(cfgFlags *Flags, so *serviceOptions, options flags.Options)
|
||||
return parser
|
||||
}
|
||||
|
||||
//LoadAndSetActiveConfig loads the config that can be afterward be accesible through ActiveConfig()
|
||||
func LoadAndSetActiveConfig() error {
|
||||
tcfg, _, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
activeConfig = tcfg
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveConfig is a getter to the main config
|
||||
func ActiveConfig() *Config {
|
||||
return activeConfig
|
||||
}
|
||||
|
||||
// SetActiveConfig sets the active config
|
||||
// to the given config.
|
||||
func SetActiveConfig(cfg *Config) {
|
||||
activeConfig = cfg
|
||||
}
|
||||
|
||||
// loadConfig initializes and parses the config using a config file and command
|
||||
// line options.
|
||||
//
|
||||
// The configuration proceeds as follows:
|
||||
// 1) Start with a default config with sane settings
|
||||
// 2) Pre-parse the command line to check for an alternative config file
|
||||
// 3) Load configuration file overwriting defaults with any specified options
|
||||
// 4) Parse CLI options and overwrite/add any specified options
|
||||
//
|
||||
// The above results in kaspad functioning properly without any config settings
|
||||
// while still allowing the user to override settings with config files and
|
||||
// command line options. Command line options always take precedence.
|
||||
func loadConfig() (*Config, []string, error) {
|
||||
// Default config.
|
||||
cfgFlags := Flags{
|
||||
func defaultFlags() *Flags {
|
||||
return &Flags{
|
||||
ConfigFile: defaultConfigFile,
|
||||
DebugLevel: defaultLogLevel,
|
||||
TargetOutboundPeers: defaultTargetOutboundPeers,
|
||||
@@ -235,7 +185,6 @@ func loadConfig() (*Config, []string, error) {
|
||||
RPCMaxConcurrentReqs: defaultMaxRPCConcurrentReqs,
|
||||
DataDir: defaultDataDir,
|
||||
LogDir: defaultLogDir,
|
||||
DbType: defaultDbType,
|
||||
RPCKey: defaultRPCKeyFile,
|
||||
RPCCert: defaultRPCCertFile,
|
||||
BlockMaxMass: defaultBlockMaxMass,
|
||||
@@ -244,6 +193,29 @@ func loadConfig() (*Config, []string, error) {
|
||||
MinRelayTxFee: defaultMinRelayTxFee,
|
||||
AcceptanceIndex: defaultAcceptanceIndex,
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultConfig returns the default kaspad configuration
|
||||
func DefaultConfig() *Config {
|
||||
config := &Config{Flags: defaultFlags()}
|
||||
config.NetworkFlags.ActiveNetParams = &dagconfig.MainnetParams
|
||||
return config
|
||||
}
|
||||
|
||||
// LoadConfig initializes and parses the config using a config file and command
|
||||
// line options.
|
||||
//
|
||||
// The configuration proceeds as follows:
|
||||
// 1) Start with a default config with sane settings
|
||||
// 2) Pre-parse the command line to check for an alternative config file
|
||||
// 3) Load configuration file overwriting defaults with any specified options
|
||||
// 4) Parse CLI options and overwrite/add any specified options
|
||||
//
|
||||
// The above results in kaspad functioning properly without any config settings
|
||||
// while still allowing the user to override settings with config files and
|
||||
// command line options. Command line options always take precedence.
|
||||
func LoadConfig() (cfg *Config, remainingArgs []string, err error) {
|
||||
cfgFlags := defaultFlags()
|
||||
|
||||
// Service options which are only added on Windows.
|
||||
serviceOpts := serviceOptions{}
|
||||
@@ -253,8 +225,8 @@ func loadConfig() (*Config, []string, error) {
|
||||
// help message error can be ignored here since they will be caught by
|
||||
// the final parse below.
|
||||
preCfg := cfgFlags
|
||||
preParser := newConfigParser(&preCfg, &serviceOpts, flags.HelpFlag)
|
||||
_, err := preParser.Parse()
|
||||
preParser := newConfigParser(preCfg, &serviceOpts, flags.HelpFlag)
|
||||
_, err = preParser.Parse()
|
||||
if err != nil {
|
||||
var flagsErr *flags.Error
|
||||
if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp {
|
||||
@@ -286,9 +258,9 @@ func loadConfig() (*Config, []string, error) {
|
||||
|
||||
// Load additional config from file.
|
||||
var configFileError error
|
||||
parser := newConfigParser(&cfgFlags, &serviceOpts, flags.Default)
|
||||
activeConfig = &Config{
|
||||
Flags: &cfgFlags,
|
||||
parser := newConfigParser(cfgFlags, &serviceOpts, flags.Default)
|
||||
cfg = &Config{
|
||||
Flags: cfgFlags,
|
||||
}
|
||||
if !(preCfg.RegressionTest || preCfg.Simnet) || preCfg.ConfigFile !=
|
||||
defaultConfigFile {
|
||||
@@ -314,12 +286,12 @@ func loadConfig() (*Config, []string, error) {
|
||||
}
|
||||
|
||||
// Don't add peers from the config file when in regression test mode.
|
||||
if preCfg.RegressionTest && len(activeConfig.AddPeers) > 0 {
|
||||
activeConfig.AddPeers = nil
|
||||
if preCfg.RegressionTest && len(cfg.AddPeers) > 0 {
|
||||
cfg.AddPeers = nil
|
||||
}
|
||||
|
||||
// Parse command line options again to ensure they take precedence.
|
||||
remainingArgs, err := parser.Parse()
|
||||
remainingArgs, err = parser.Parse()
|
||||
if err != nil {
|
||||
var flagsErr *flags.Error
|
||||
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
|
||||
@@ -349,8 +321,8 @@ func loadConfig() (*Config, []string, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !activeConfig.DisableRPC {
|
||||
if activeConfig.RPCUser == "" {
|
||||
if !cfg.DisableRPC {
|
||||
if cfg.RPCUser == "" {
|
||||
str := "%s: rpcuser cannot be empty"
|
||||
err := errors.Errorf(str, funcName)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
@@ -358,7 +330,7 @@ func loadConfig() (*Config, []string, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if activeConfig.RPCPass == "" {
|
||||
if cfg.RPCPass == "" {
|
||||
str := "%s: rpcpass cannot be empty"
|
||||
err := errors.Errorf(str, funcName)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
@@ -367,7 +339,7 @@ func loadConfig() (*Config, []string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
err = activeConfig.ResolveNetwork(parser)
|
||||
err = cfg.ResolveNetwork(parser)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -376,21 +348,21 @@ func loadConfig() (*Config, []string, error) {
|
||||
// according to the default of the active network. The set
|
||||
// configuration value takes precedence over the default value for the
|
||||
// selected network.
|
||||
relayNonStd := activeConfig.NetParams().RelayNonStdTxs
|
||||
relayNonStd := cfg.NetParams().RelayNonStdTxs
|
||||
switch {
|
||||
case activeConfig.RelayNonStd && activeConfig.RejectNonStd:
|
||||
case cfg.RelayNonStd && cfg.RejectNonStd:
|
||||
str := "%s: rejectnonstd and relaynonstd cannot be used " +
|
||||
"together -- choose only one"
|
||||
err := errors.Errorf(str, funcName)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
case activeConfig.RejectNonStd:
|
||||
case cfg.RejectNonStd:
|
||||
relayNonStd = false
|
||||
case activeConfig.RelayNonStd:
|
||||
case cfg.RelayNonStd:
|
||||
relayNonStd = true
|
||||
}
|
||||
activeConfig.RelayNonStd = relayNonStd
|
||||
cfg.RelayNonStd = relayNonStd
|
||||
|
||||
// Append the network type to the data directory so it is "namespaced"
|
||||
// per network. In addition to the block database, there are other
|
||||
@@ -398,45 +370,35 @@ func loadConfig() (*Config, []string, error) {
|
||||
// All data is specific to a network, so namespacing the data directory
|
||||
// means each individual piece of serialized data does not have to
|
||||
// worry about changing names per network and such.
|
||||
activeConfig.DataDir = cleanAndExpandPath(activeConfig.DataDir)
|
||||
activeConfig.DataDir = filepath.Join(activeConfig.DataDir, activeConfig.NetParams().Name)
|
||||
cfg.DataDir = cleanAndExpandPath(cfg.DataDir)
|
||||
cfg.DataDir = filepath.Join(cfg.DataDir, cfg.NetParams().Name)
|
||||
|
||||
// Append the network type to the log directory so it is "namespaced"
|
||||
// per network in the same fashion as the data directory.
|
||||
activeConfig.LogDir = cleanAndExpandPath(activeConfig.LogDir)
|
||||
activeConfig.LogDir = filepath.Join(activeConfig.LogDir, activeConfig.NetParams().Name)
|
||||
cfg.LogDir = cleanAndExpandPath(cfg.LogDir)
|
||||
cfg.LogDir = filepath.Join(cfg.LogDir, cfg.NetParams().Name)
|
||||
|
||||
// Special show command to list supported subsystems and exit.
|
||||
if activeConfig.DebugLevel == "show" {
|
||||
if cfg.DebugLevel == "show" {
|
||||
fmt.Println("Supported subsystems", logger.SupportedSubsystems())
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// Initialize log rotation. After log rotation has been initialized, the
|
||||
// logger variables may be used.
|
||||
logger.InitLog(filepath.Join(activeConfig.LogDir, defaultLogFilename), filepath.Join(activeConfig.LogDir, defaultErrLogFilename))
|
||||
logger.InitLog(filepath.Join(cfg.LogDir, defaultLogFilename), filepath.Join(cfg.LogDir, defaultErrLogFilename))
|
||||
|
||||
// Parse, validate, and set debug log level(s).
|
||||
if err := logger.ParseAndSetDebugLevels(activeConfig.DebugLevel); err != nil {
|
||||
if err := logger.ParseAndSetDebugLevels(cfg.DebugLevel); err != nil {
|
||||
err := errors.Errorf("%s: %s", funcName, err.Error())
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Validate database type.
|
||||
if !validDbType(activeConfig.DbType) {
|
||||
str := "%s: The specified database type [%s] is invalid -- " +
|
||||
"supported types %s"
|
||||
err := errors.Errorf(str, funcName, activeConfig.DbType, knownDbTypes)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Validate profile port number
|
||||
if activeConfig.Profile != "" {
|
||||
profilePort, err := strconv.Atoi(activeConfig.Profile)
|
||||
if cfg.Profile != "" {
|
||||
profilePort, err := strconv.Atoi(cfg.Profile)
|
||||
if err != nil || profilePort < 1024 || profilePort > 65535 {
|
||||
str := "%s: The profile port must be between 1024 and 65535"
|
||||
err := errors.Errorf(str, funcName)
|
||||
@@ -447,20 +409,20 @@ func loadConfig() (*Config, []string, error) {
|
||||
}
|
||||
|
||||
// Don't allow ban durations that are too short.
|
||||
if activeConfig.BanDuration < time.Second {
|
||||
if cfg.BanDuration < time.Second {
|
||||
str := "%s: The banduration option may not be less than 1s -- parsed [%s]"
|
||||
err := errors.Errorf(str, funcName, activeConfig.BanDuration)
|
||||
err := errors.Errorf(str, funcName, cfg.BanDuration)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Validate any given whitelisted IP addresses and networks.
|
||||
if len(activeConfig.Whitelists) > 0 {
|
||||
if len(cfg.Whitelists) > 0 {
|
||||
var ip net.IP
|
||||
activeConfig.Whitelists = make([]*net.IPNet, 0, len(activeConfig.Flags.Whitelists))
|
||||
cfg.Whitelists = make([]*net.IPNet, 0, len(cfg.Flags.Whitelists))
|
||||
|
||||
for _, addr := range activeConfig.Flags.Whitelists {
|
||||
for _, addr := range cfg.Flags.Whitelists {
|
||||
_, ipnet, err := net.ParseCIDR(addr)
|
||||
if err != nil {
|
||||
ip = net.ParseIP(addr)
|
||||
@@ -483,12 +445,12 @@ func loadConfig() (*Config, []string, error) {
|
||||
Mask: net.CIDRMask(bits, bits),
|
||||
}
|
||||
}
|
||||
activeConfig.Whitelists = append(activeConfig.Whitelists, ipnet)
|
||||
cfg.Whitelists = append(cfg.Whitelists, ipnet)
|
||||
}
|
||||
}
|
||||
|
||||
// --addPeer and --connect do not mix.
|
||||
if len(activeConfig.AddPeers) > 0 && len(activeConfig.ConnectPeers) > 0 {
|
||||
if len(cfg.AddPeers) > 0 && len(cfg.ConnectPeers) > 0 {
|
||||
str := "%s: the --addpeer and --connect options can not be " +
|
||||
"mixed"
|
||||
err := errors.Errorf(str, funcName)
|
||||
@@ -498,27 +460,28 @@ func loadConfig() (*Config, []string, error) {
|
||||
}
|
||||
|
||||
// --proxy or --connect without --listen disables listening.
|
||||
if (activeConfig.Proxy != "" || len(activeConfig.ConnectPeers) > 0) &&
|
||||
len(activeConfig.Listeners) == 0 {
|
||||
activeConfig.DisableListen = true
|
||||
if (cfg.Proxy != "" || len(cfg.ConnectPeers) > 0) &&
|
||||
len(cfg.Listeners) == 0 {
|
||||
cfg.DisableListen = true
|
||||
}
|
||||
|
||||
// Connect means no DNS seeding.
|
||||
if len(activeConfig.ConnectPeers) > 0 {
|
||||
activeConfig.DisableDNSSeed = true
|
||||
// ConnectPeers means no DNS seeding and no outbound peers
|
||||
if len(cfg.ConnectPeers) > 0 {
|
||||
cfg.DisableDNSSeed = true
|
||||
cfg.TargetOutboundPeers = 0
|
||||
}
|
||||
|
||||
// Add the default listener if none were specified. The default
|
||||
// listener is all addresses on the listen port for the network
|
||||
// we are to connect to.
|
||||
if len(activeConfig.Listeners) == 0 {
|
||||
activeConfig.Listeners = []string{
|
||||
net.JoinHostPort("", activeConfig.NetParams().DefaultPort),
|
||||
if len(cfg.Listeners) == 0 {
|
||||
cfg.Listeners = []string{
|
||||
net.JoinHostPort("", cfg.NetParams().DefaultPort),
|
||||
}
|
||||
}
|
||||
|
||||
// Check to make sure limited and admin users don't have the same username
|
||||
if activeConfig.RPCUser == activeConfig.RPCLimitUser && activeConfig.RPCUser != "" {
|
||||
if cfg.RPCUser == cfg.RPCLimitUser && cfg.RPCUser != "" {
|
||||
str := "%s: --rpcuser and --rpclimituser must not specify the " +
|
||||
"same username"
|
||||
err := errors.Errorf(str, funcName)
|
||||
@@ -528,7 +491,7 @@ func loadConfig() (*Config, []string, error) {
|
||||
}
|
||||
|
||||
// Check to make sure limited and admin users don't have the same password
|
||||
if activeConfig.RPCPass == activeConfig.RPCLimitPass && activeConfig.RPCPass != "" {
|
||||
if cfg.RPCPass == cfg.RPCLimitPass && cfg.RPCPass != "" {
|
||||
str := "%s: --rpcpass and --rpclimitpass must not specify the " +
|
||||
"same password"
|
||||
err := errors.Errorf(str, funcName)
|
||||
@@ -538,39 +501,39 @@ func loadConfig() (*Config, []string, error) {
|
||||
}
|
||||
|
||||
// The RPC server is disabled if no username or password is provided.
|
||||
if (activeConfig.RPCUser == "" || activeConfig.RPCPass == "") &&
|
||||
(activeConfig.RPCLimitUser == "" || activeConfig.RPCLimitPass == "") {
|
||||
activeConfig.DisableRPC = true
|
||||
if (cfg.RPCUser == "" || cfg.RPCPass == "") &&
|
||||
(cfg.RPCLimitUser == "" || cfg.RPCLimitPass == "") {
|
||||
cfg.DisableRPC = true
|
||||
}
|
||||
|
||||
if activeConfig.DisableRPC {
|
||||
if cfg.DisableRPC {
|
||||
log.Infof("RPC service is disabled")
|
||||
}
|
||||
|
||||
// Default RPC to listen on localhost only.
|
||||
if !activeConfig.DisableRPC && len(activeConfig.RPCListeners) == 0 {
|
||||
if !cfg.DisableRPC && len(cfg.RPCListeners) == 0 {
|
||||
addrs, err := net.LookupHost("localhost")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
activeConfig.RPCListeners = make([]string, 0, len(addrs))
|
||||
cfg.RPCListeners = make([]string, 0, len(addrs))
|
||||
for _, addr := range addrs {
|
||||
addr = net.JoinHostPort(addr, activeConfig.NetParams().RPCPort)
|
||||
activeConfig.RPCListeners = append(activeConfig.RPCListeners, addr)
|
||||
addr = net.JoinHostPort(addr, cfg.NetParams().RPCPort)
|
||||
cfg.RPCListeners = append(cfg.RPCListeners, addr)
|
||||
}
|
||||
}
|
||||
|
||||
if activeConfig.RPCMaxConcurrentReqs < 0 {
|
||||
if cfg.RPCMaxConcurrentReqs < 0 {
|
||||
str := "%s: The rpcmaxwebsocketconcurrentrequests option may " +
|
||||
"not be less than 0 -- parsed [%d]"
|
||||
err := errors.Errorf(str, funcName, activeConfig.RPCMaxConcurrentReqs)
|
||||
err := errors.Errorf(str, funcName, cfg.RPCMaxConcurrentReqs)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Validate the the minrelaytxfee.
|
||||
activeConfig.MinRelayTxFee, err = util.NewAmount(activeConfig.Flags.MinRelayTxFee)
|
||||
cfg.MinRelayTxFee, err = util.NewAmount(cfg.Flags.MinRelayTxFee)
|
||||
if err != nil {
|
||||
str := "%s: invalid minrelaytxfee: %s"
|
||||
err := errors.Errorf(str, funcName, err)
|
||||
@@ -580,39 +543,39 @@ func loadConfig() (*Config, []string, error) {
|
||||
}
|
||||
|
||||
// Disallow 0 and negative min tx fees.
|
||||
if activeConfig.MinRelayTxFee == 0 {
|
||||
if cfg.MinRelayTxFee == 0 {
|
||||
str := "%s: The minrelaytxfee option must be greater than 0 -- parsed [%d]"
|
||||
err := errors.Errorf(str, funcName, activeConfig.MinRelayTxFee)
|
||||
err := errors.Errorf(str, funcName, cfg.MinRelayTxFee)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Limit the max block mass to a sane value.
|
||||
if activeConfig.BlockMaxMass < blockMaxMassMin || activeConfig.BlockMaxMass >
|
||||
if cfg.BlockMaxMass < blockMaxMassMin || cfg.BlockMaxMass >
|
||||
blockMaxMassMax {
|
||||
|
||||
str := "%s: The blockmaxmass option must be in between %d " +
|
||||
"and %d -- parsed [%d]"
|
||||
err := errors.Errorf(str, funcName, blockMaxMassMin,
|
||||
blockMaxMassMax, activeConfig.BlockMaxMass)
|
||||
blockMaxMassMax, cfg.BlockMaxMass)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Limit the max orphan count to a sane value.
|
||||
if activeConfig.MaxOrphanTxs < 0 {
|
||||
if cfg.MaxOrphanTxs < 0 {
|
||||
str := "%s: The maxorphantx option may not be less than 0 " +
|
||||
"-- parsed [%d]"
|
||||
err := errors.Errorf(str, funcName, activeConfig.MaxOrphanTxs)
|
||||
err := errors.Errorf(str, funcName, cfg.MaxOrphanTxs)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Look for illegal characters in the user agent comments.
|
||||
for _, uaComment := range activeConfig.UserAgentComments {
|
||||
for _, uaComment := range cfg.UserAgentComments {
|
||||
if strings.ContainsAny(uaComment, "/:()") {
|
||||
err := errors.Errorf("%s: The following characters must not "+
|
||||
"appear in user agent comments: '/', ':', '(', ')'",
|
||||
@@ -624,7 +587,7 @@ func loadConfig() (*Config, []string, error) {
|
||||
}
|
||||
|
||||
// --acceptanceindex and --dropacceptanceindex do not mix.
|
||||
if activeConfig.AcceptanceIndex && activeConfig.DropAcceptanceIndex {
|
||||
if cfg.AcceptanceIndex && cfg.DropAcceptanceIndex {
|
||||
err := errors.Errorf("%s: the --acceptanceindex and --dropacceptanceindex "+
|
||||
"options may not be activated at the same time",
|
||||
funcName)
|
||||
@@ -633,61 +596,31 @@ func loadConfig() (*Config, []string, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Check mining addresses are valid and saved parsed versions.
|
||||
activeConfig.MiningAddrs = make([]util.Address, 0, len(activeConfig.Flags.MiningAddrs))
|
||||
for _, strAddr := range activeConfig.Flags.MiningAddrs {
|
||||
addr, err := util.DecodeAddress(strAddr, activeConfig.NetParams().Prefix)
|
||||
if err != nil {
|
||||
str := "%s: mining address '%s' failed to decode: %s"
|
||||
err := errors.Errorf(str, funcName, strAddr, err)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
if !addr.IsForPrefix(activeConfig.NetParams().Prefix) {
|
||||
str := "%s: mining address '%s' is on the wrong network"
|
||||
err := errors.Errorf(str, funcName, strAddr)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
activeConfig.MiningAddrs = append(activeConfig.MiningAddrs, addr)
|
||||
}
|
||||
|
||||
if activeConfig.Flags.Subnetwork != "" {
|
||||
activeConfig.SubnetworkID, err = subnetworkid.NewFromStr(activeConfig.Flags.Subnetwork)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
activeConfig.SubnetworkID = nil
|
||||
}
|
||||
|
||||
// Add default port to all listener addresses if needed and remove
|
||||
// duplicate addresses.
|
||||
activeConfig.Listeners, err = network.NormalizeAddresses(activeConfig.Listeners,
|
||||
activeConfig.NetParams().DefaultPort)
|
||||
cfg.Listeners, err = network.NormalizeAddresses(cfg.Listeners,
|
||||
cfg.NetParams().DefaultPort)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Add default port to all rpc listener addresses if needed and remove
|
||||
// duplicate addresses.
|
||||
activeConfig.RPCListeners, err = network.NormalizeAddresses(activeConfig.RPCListeners,
|
||||
activeConfig.NetParams().RPCPort)
|
||||
cfg.RPCListeners, err = network.NormalizeAddresses(cfg.RPCListeners,
|
||||
cfg.NetParams().RPCPort)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Only allow TLS to be disabled if the RPC is bound to localhost
|
||||
// addresses.
|
||||
if !activeConfig.DisableRPC && activeConfig.DisableTLS {
|
||||
if !cfg.DisableRPC && cfg.DisableTLS {
|
||||
allowedTLSListeners := map[string]struct{}{
|
||||
"localhost": {},
|
||||
"127.0.0.1": {},
|
||||
"::1": {},
|
||||
}
|
||||
for _, addr := range activeConfig.RPCListeners {
|
||||
for _, addr := range cfg.RPCListeners {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
str := "%s: RPC listen interface '%s' is " +
|
||||
@@ -709,16 +642,25 @@ func loadConfig() (*Config, []string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Disallow --addpeer and --connect used together
|
||||
if len(cfg.AddPeers) > 0 && len(cfg.ConnectPeers) > 0 {
|
||||
str := "%s: --addpeer and --connect can not be used together"
|
||||
err := errors.Errorf(str, funcName)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Add default port to all added peer addresses if needed and remove
|
||||
// duplicate addresses.
|
||||
activeConfig.AddPeers, err = network.NormalizeAddresses(activeConfig.AddPeers,
|
||||
activeConfig.NetParams().DefaultPort)
|
||||
cfg.AddPeers, err = network.NormalizeAddresses(cfg.AddPeers,
|
||||
cfg.NetParams().DefaultPort)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
activeConfig.ConnectPeers, err = network.NormalizeAddresses(activeConfig.ConnectPeers,
|
||||
activeConfig.NetParams().DefaultPort)
|
||||
cfg.ConnectPeers, err = network.NormalizeAddresses(cfg.ConnectPeers,
|
||||
cfg.NetParams().DefaultPort)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -728,24 +670,24 @@ func loadConfig() (*Config, []string, error) {
|
||||
// net.DialTimeout function as well as the system DNS resolver. When a
|
||||
// proxy is specified, the dial function is set to the proxy specific
|
||||
// dial function.
|
||||
activeConfig.Dial = net.DialTimeout
|
||||
activeConfig.Lookup = net.LookupIP
|
||||
if activeConfig.Proxy != "" {
|
||||
_, _, err := net.SplitHostPort(activeConfig.Proxy)
|
||||
cfg.Dial = net.DialTimeout
|
||||
cfg.Lookup = net.LookupIP
|
||||
if cfg.Proxy != "" {
|
||||
_, _, err := net.SplitHostPort(cfg.Proxy)
|
||||
if err != nil {
|
||||
str := "%s: Proxy address '%s' is invalid: %s"
|
||||
err := errors.Errorf(str, funcName, activeConfig.Proxy, err)
|
||||
err := errors.Errorf(str, funcName, cfg.Proxy, err)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
proxy := &socks.Proxy{
|
||||
Addr: activeConfig.Proxy,
|
||||
Username: activeConfig.ProxyUser,
|
||||
Password: activeConfig.ProxyPass,
|
||||
Addr: cfg.Proxy,
|
||||
Username: cfg.ProxyUser,
|
||||
Password: cfg.ProxyPass,
|
||||
}
|
||||
activeConfig.Dial = proxy.DialTimeout
|
||||
cfg.Dial = proxy.DialTimeout
|
||||
}
|
||||
|
||||
// Warn about missing config file only after all other configuration is
|
||||
@@ -755,7 +697,7 @@ func loadConfig() (*Config, []string, error) {
|
||||
log.Warnf("%s", configFileError)
|
||||
}
|
||||
|
||||
return activeConfig, remainingArgs, nil
|
||||
return cfg, remainingArgs, nil
|
||||
}
|
||||
|
||||
// createDefaultConfig copies the file sample-kaspad.conf to the given destination path,
|
||||
|
||||
113
connmanager/connection_requests.go
Normal file
113
connmanager/connection_requests.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package connmanager
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
minRetryDuration = 30 * time.Second
|
||||
maxRetryDuration = 10 * time.Minute
|
||||
)
|
||||
|
||||
func nextRetryDuration(previousDuration time.Duration) time.Duration {
|
||||
if previousDuration < minRetryDuration {
|
||||
return minRetryDuration
|
||||
}
|
||||
if previousDuration*2 > maxRetryDuration {
|
||||
return maxRetryDuration
|
||||
}
|
||||
return previousDuration * 2
|
||||
}
|
||||
|
||||
// checkRequestedConnections checks that all activeRequested are still active, and initiates connections
|
||||
// for pendingRequested.
|
||||
// While doing so, it filters out of connSet all connections that were initiated as a connectionRequest
|
||||
func (c *ConnectionManager) checkRequestedConnections(connSet connectionSet) {
|
||||
c.connectionRequestsLock.Lock()
|
||||
defer c.connectionRequestsLock.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
for address, connReq := range c.activeRequested {
|
||||
connection, ok := connSet.get(address)
|
||||
if !ok { // a requested connection was disconnected
|
||||
delete(c.activeRequested, address)
|
||||
|
||||
if connReq.isPermanent { // if is one-try - ignore. If permanent - add to pending list to retry
|
||||
connReq.nextAttempt = now
|
||||
connReq.retryDuration = 0
|
||||
c.pendingRequested[address] = connReq
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
connSet.remove(connection)
|
||||
}
|
||||
|
||||
for address, connReq := range c.pendingRequested {
|
||||
if connReq.nextAttempt.After(now) { // ignore connection requests which are still waiting for retry
|
||||
continue
|
||||
}
|
||||
|
||||
connection, ok := connSet.get(address)
|
||||
// The pending connection request has already connected - move it to active
|
||||
// This can happen in rare cases such as when the other side has connected to our node
|
||||
// while it has been pending on our side.
|
||||
if ok {
|
||||
delete(c.pendingRequested, address)
|
||||
c.pendingRequested[address] = connReq
|
||||
|
||||
connSet.remove(connection)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// try to initiate connection
|
||||
err := c.initiateConnection(connReq.address)
|
||||
if err != nil {
|
||||
log.Infof("Couldn't connect to %s: %s", address, err)
|
||||
// if connection request is one try - remove from pending and ignore failure
|
||||
if !connReq.isPermanent {
|
||||
delete(c.pendingRequested, address)
|
||||
continue
|
||||
}
|
||||
// if connection request is permanent - keep in pending, and increase retry time
|
||||
connReq.retryDuration = nextRetryDuration(connReq.retryDuration)
|
||||
connReq.nextAttempt = now.Add(connReq.retryDuration)
|
||||
log.Debugf("Retrying permanent connection to %s in %s", address, connReq.retryDuration)
|
||||
continue
|
||||
}
|
||||
|
||||
// if connected successfully - move from pending to active
|
||||
delete(c.pendingRequested, address)
|
||||
c.activeRequested[address] = connReq
|
||||
}
|
||||
}
|
||||
|
||||
// AddConnectionRequest adds the given address to list of pending connection requests
|
||||
func (c *ConnectionManager) AddConnectionRequest(address string, isPermanent bool) {
|
||||
// spawn goroutine so that caller doesn't wait in case connectionManager is in the midst of handling
|
||||
// connection requests
|
||||
spawn("ConnectionManager.AddConnectionRequest", func() {
|
||||
c.connectionRequestsLock.Lock()
|
||||
defer c.connectionRequestsLock.Unlock()
|
||||
|
||||
if _, ok := c.activeRequested[address]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
c.pendingRequested[address] = &connectionRequest{
|
||||
address: address,
|
||||
isPermanent: isPermanent,
|
||||
}
|
||||
|
||||
c.run()
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveConnection disconnects the connection for the given address
|
||||
// and removes it entirely from the connection manager.
|
||||
func (c *ConnectionManager) RemoveConnection(address string) {
|
||||
// TODO(libp2p): unimplemented
|
||||
panic("unimplemented")
|
||||
}
|
||||
30
connmanager/connection_set.go
Normal file
30
connmanager/connection_set.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package connmanager
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/netadapter"
|
||||
)
|
||||
|
||||
type connectionSet map[string]*netadapter.NetConnection
|
||||
|
||||
func (cs connectionSet) add(connection *netadapter.NetConnection) {
|
||||
cs[connection.Address()] = connection
|
||||
}
|
||||
|
||||
func (cs connectionSet) remove(connection *netadapter.NetConnection) {
|
||||
delete(cs, connection.Address())
|
||||
}
|
||||
|
||||
func (cs connectionSet) get(address string) (*netadapter.NetConnection, bool) {
|
||||
connection, ok := cs[address]
|
||||
return connection, ok
|
||||
}
|
||||
|
||||
func convertToSet(connections []*netadapter.NetConnection) connectionSet {
|
||||
connSet := make(connectionSet, len(connections))
|
||||
|
||||
for _, connection := range connections {
|
||||
connSet[connection.Address()] = connection
|
||||
}
|
||||
|
||||
return connSet
|
||||
}
|
||||
145
connmanager/connmanager.go
Normal file
145
connmanager/connmanager.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package connmanager
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/addressmanager"
|
||||
|
||||
"github.com/kaspanet/kaspad/netadapter"
|
||||
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
)
|
||||
|
||||
// connectionRequest represents a user request (either through CLI or RPC) to connect to a certain node
|
||||
type connectionRequest struct {
|
||||
address string
|
||||
isPermanent bool
|
||||
nextAttempt time.Time
|
||||
retryDuration time.Duration
|
||||
}
|
||||
|
||||
// ConnectionManager monitors that the current active connections satisfy the requirements of
|
||||
// outgoing, requested and incoming connections
|
||||
type ConnectionManager struct {
|
||||
cfg *config.Config
|
||||
netAdapter *netadapter.NetAdapter
|
||||
addressManager *addressmanager.AddressManager
|
||||
|
||||
activeRequested map[string]*connectionRequest
|
||||
pendingRequested map[string]*connectionRequest
|
||||
activeOutgoing map[string]struct{}
|
||||
targetOutgoing int
|
||||
activeIncoming map[string]struct{}
|
||||
maxIncoming int
|
||||
|
||||
stop uint32
|
||||
connectionRequestsLock sync.Mutex
|
||||
|
||||
resetLoopChan chan struct{}
|
||||
loopTicker *time.Ticker
|
||||
}
|
||||
|
||||
// New instantiates a new instance of a ConnectionManager
|
||||
func New(cfg *config.Config, netAdapter *netadapter.NetAdapter, addressManager *addressmanager.AddressManager) (*ConnectionManager, error) {
|
||||
c := &ConnectionManager{
|
||||
cfg: cfg,
|
||||
netAdapter: netAdapter,
|
||||
addressManager: addressManager,
|
||||
activeRequested: map[string]*connectionRequest{},
|
||||
pendingRequested: map[string]*connectionRequest{},
|
||||
activeOutgoing: map[string]struct{}{},
|
||||
activeIncoming: map[string]struct{}{},
|
||||
resetLoopChan: make(chan struct{}),
|
||||
loopTicker: time.NewTicker(connectionsLoopInterval),
|
||||
}
|
||||
|
||||
connectPeers := cfg.AddPeers
|
||||
if len(cfg.ConnectPeers) > 0 {
|
||||
connectPeers = cfg.ConnectPeers
|
||||
}
|
||||
|
||||
c.maxIncoming = cfg.MaxInboundPeers
|
||||
c.targetOutgoing = cfg.TargetOutboundPeers
|
||||
|
||||
for _, connectPeer := range connectPeers {
|
||||
c.pendingRequested[connectPeer] = &connectionRequest{
|
||||
address: connectPeer,
|
||||
isPermanent: true,
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Start begins the operation of the ConnectionManager
|
||||
func (c *ConnectionManager) Start() {
|
||||
spawn("ConnectionManager.connectionsLoop", c.connectionsLoop)
|
||||
}
|
||||
|
||||
// Stop halts the operation of the ConnectionManager
|
||||
func (c *ConnectionManager) Stop() {
|
||||
atomic.StoreUint32(&c.stop, 1)
|
||||
|
||||
for _, connection := range c.netAdapter.Connections() {
|
||||
connection.Disconnect()
|
||||
}
|
||||
|
||||
c.loopTicker.Stop()
|
||||
}
|
||||
|
||||
func (c *ConnectionManager) run() {
|
||||
c.resetLoopChan <- struct{}{}
|
||||
}
|
||||
|
||||
func (c *ConnectionManager) initiateConnection(address string) error {
|
||||
log.Infof("Connecting to %s", address)
|
||||
return c.netAdapter.Connect(address)
|
||||
}
|
||||
|
||||
const connectionsLoopInterval = 30 * time.Second
|
||||
|
||||
func (c *ConnectionManager) connectionsLoop() {
|
||||
for atomic.LoadUint32(&c.stop) == 0 {
|
||||
connections := c.netAdapter.Connections()
|
||||
|
||||
// We convert the connections list to a set, so that connections can be found quickly
|
||||
// Then we go over the set, classifying connection by category: requested, outgoing or incoming.
|
||||
// Every step removes all matching connections so that once we get to checkIncomingConnections -
|
||||
// the only connections left are the incoming ones
|
||||
connSet := convertToSet(connections)
|
||||
|
||||
c.checkRequestedConnections(connSet)
|
||||
|
||||
c.checkOutgoingConnections(connSet)
|
||||
|
||||
c.checkIncomingConnections(connSet)
|
||||
|
||||
c.waitTillNextIteration()
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectionCount returns the count of the connected connections
|
||||
func (c *ConnectionManager) ConnectionCount() int {
|
||||
return c.netAdapter.ConnectionCount()
|
||||
}
|
||||
|
||||
// Ban marks the given netConnection as banned
|
||||
func (c *ConnectionManager) Ban(netConnection *netadapter.NetConnection) error {
|
||||
return c.addressManager.Ban(netConnection.NetAddress())
|
||||
}
|
||||
|
||||
// IsBanned returns whether the given netConnection is banned
|
||||
func (c *ConnectionManager) IsBanned(netConnection *netadapter.NetConnection) (bool, error) {
|
||||
return c.addressManager.IsBanned(netConnection.NetAddress())
|
||||
}
|
||||
|
||||
func (c *ConnectionManager) waitTillNextIteration() {
|
||||
select {
|
||||
case <-c.resetLoopChan:
|
||||
c.loopTicker.Stop()
|
||||
c.loopTicker = time.NewTicker(connectionsLoopInterval)
|
||||
case <-c.loopTicker.C:
|
||||
}
|
||||
}
|
||||
20
connmanager/incoming_connections.go
Normal file
20
connmanager/incoming_connections.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package connmanager
|
||||
|
||||
// checkIncomingConnections makes sure there's no more than maxIncoming incoming connections
|
||||
// if there are - it randomly disconnects enough to go below that number
|
||||
func (c *ConnectionManager) checkIncomingConnections(incomingConnectionSet connectionSet) {
|
||||
if len(incomingConnectionSet) <= c.maxIncoming {
|
||||
return
|
||||
}
|
||||
|
||||
numConnectionsOverMax := len(incomingConnectionSet) - c.maxIncoming
|
||||
// randomly disconnect nodes until the number of incoming connections is smaller than maxIncoming
|
||||
for _, connection := range incomingConnectionSet {
|
||||
connection.Disconnect()
|
||||
|
||||
numConnectionsOverMax--
|
||||
if numConnectionsOverMax == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
9
connmanager/log.go
Normal file
9
connmanager/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package connmanager
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.CMGR)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
61
connmanager/outgoing_connections.go
Normal file
61
connmanager/outgoing_connections.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package connmanager
|
||||
|
||||
// checkOutgoingConnections goes over all activeOutgoing and makes sure they are still active.
|
||||
// Then it opens connections so that we have targetOutgoing active connections
|
||||
func (c *ConnectionManager) checkOutgoingConnections(connSet connectionSet) {
|
||||
for address := range c.activeOutgoing {
|
||||
connection, ok := connSet.get(address)
|
||||
if ok { // connection is still connected
|
||||
connSet.remove(connection)
|
||||
continue
|
||||
}
|
||||
|
||||
// if connection is dead - remove from list of active ones
|
||||
delete(c.activeOutgoing, address)
|
||||
}
|
||||
|
||||
liveConnections := len(c.activeOutgoing)
|
||||
if c.targetOutgoing == liveConnections {
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("Have got %d outgoing connections out of target %d, adding %d more",
|
||||
liveConnections, c.targetOutgoing, c.targetOutgoing-liveConnections)
|
||||
|
||||
connectionsNeededCount := c.targetOutgoing - len(c.activeOutgoing)
|
||||
connectionAttempts := connectionsNeededCount * 2
|
||||
for i := 0; i < connectionAttempts; i++ {
|
||||
// Return in case we've already reached or surpassed our target
|
||||
if len(c.activeOutgoing) >= c.targetOutgoing {
|
||||
return
|
||||
}
|
||||
|
||||
address := c.addressManager.GetAddress()
|
||||
if address == nil {
|
||||
log.Warnf("No more addresses available")
|
||||
return
|
||||
}
|
||||
|
||||
netAddress := address.NetAddress()
|
||||
tcpAddress := netAddress.TCPAddress()
|
||||
addressString := tcpAddress.String()
|
||||
isBanned, err := c.addressManager.IsBanned(netAddress)
|
||||
if err != nil {
|
||||
log.Infof("Couldn't resolve whether %s is banned: %s", addressString, err)
|
||||
continue
|
||||
}
|
||||
if isBanned {
|
||||
continue
|
||||
}
|
||||
|
||||
c.addressManager.Attempt(netAddress)
|
||||
err = c.initiateConnection(addressString)
|
||||
if err != nil {
|
||||
log.Infof("Couldn't connect to %s: %s", addressString, err)
|
||||
continue
|
||||
}
|
||||
|
||||
c.addressManager.Connected(netAddress)
|
||||
c.activeOutgoing[addressString] = struct{}{}
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
connmgr
|
||||
=======
|
||||
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad/connmgr)
|
||||
|
||||
Package connmgr implements a generic Kaspa network connection manager.
|
||||
|
||||
## Overview
|
||||
|
||||
Connection Manager handles all the general connection concerns such as
|
||||
maintaining a set number of outbound connections, sourcing peers, banning,
|
||||
limiting max connections, etc.
|
||||
|
||||
The package provides a generic connection manager which is able to accept
|
||||
connection requests from a source or a set of given addresses, dial them and
|
||||
notify the caller on connections. The main intended use is to initialize a pool
|
||||
of active connections and maintain them to remain connected to the P2P network.
|
||||
|
||||
In addition the connection manager provides the following utilities:
|
||||
|
||||
- Notifications on connections or disconnections
|
||||
- Handle failures and retry new addresses from the source
|
||||
- Connect only to specified addresses
|
||||
- Permanent connections with increasing backoff retry timers
|
||||
- Disconnect or Remove an established connection
|
||||
|
||||
@@ -1,659 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package connmgr
|
||||
|
||||
import (
|
||||
nativeerrors "errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// maxFailedAttempts is the maximum number of successive failed connection
|
||||
// attempts after which network failure is assumed and new connections will
|
||||
// be delayed by the configured retry duration.
|
||||
const maxFailedAttempts = 25
|
||||
|
||||
var (
|
||||
// maxRetryDuration is the max duration of time retrying of a persistent
|
||||
// connection is allowed to grow to. This is necessary since the retry
|
||||
// logic uses a backoff mechanism which increases the interval base times
|
||||
// the number of retries that have been done.
|
||||
maxRetryDuration = time.Minute * 5
|
||||
|
||||
// defaultRetryDuration is the default duration of time for retrying
|
||||
// persistent connections.
|
||||
defaultRetryDuration = time.Second * 5
|
||||
|
||||
// defaultTargetOutbound is the default number of outbound connections to
|
||||
// maintain.
|
||||
defaultTargetOutbound = uint32(8)
|
||||
)
|
||||
|
||||
var (
|
||||
//ErrDialNil is used to indicate that Dial cannot be nil in the configuration.
|
||||
ErrDialNil = errors.New("Config: Dial cannot be nil")
|
||||
|
||||
// ErrMaxOutboundPeers is an error that is thrown when the max amount of peers had
|
||||
// been reached.
|
||||
ErrMaxOutboundPeers = errors.New("max outbound peers reached")
|
||||
|
||||
// ErrAlreadyConnected is an error that is thrown if the peer is already
|
||||
// connected.
|
||||
ErrAlreadyConnected = errors.New("peer already connected")
|
||||
|
||||
// ErrAlreadyPermanent is an error that is thrown if the peer is already
|
||||
// connected as a permanent peer.
|
||||
ErrAlreadyPermanent = errors.New("peer exists as a permanent peer")
|
||||
|
||||
// ErrPeerNotFound is an error that is thrown if the peer was not found.
|
||||
ErrPeerNotFound = errors.New("peer not found")
|
||||
)
|
||||
|
||||
// ConnState represents the state of the requested connection.
|
||||
type ConnState uint8
|
||||
|
||||
// ConnState can be either pending, established, disconnected or failed. When
|
||||
// a new connection is requested, it is attempted and categorized as
|
||||
// established or failed depending on the connection result. An established
|
||||
// connection which was disconnected is categorized as disconnected.
|
||||
const (
|
||||
ConnPending ConnState = iota
|
||||
ConnFailing
|
||||
ConnCanceled
|
||||
ConnEstablished
|
||||
ConnDisconnected
|
||||
)
|
||||
|
||||
// ConnReq is the connection request to a network address. If permanent, the
|
||||
// connection will be retried on disconnection.
|
||||
type ConnReq struct {
|
||||
// The following variables must only be used atomically.
|
||||
id uint64
|
||||
|
||||
Addr net.Addr
|
||||
Permanent bool
|
||||
|
||||
conn net.Conn
|
||||
state ConnState
|
||||
stateMtx sync.RWMutex
|
||||
retryCount uint32
|
||||
}
|
||||
|
||||
// updateState updates the state of the connection request.
|
||||
func (c *ConnReq) updateState(state ConnState) {
|
||||
c.stateMtx.Lock()
|
||||
defer c.stateMtx.Unlock()
|
||||
c.state = state
|
||||
}
|
||||
|
||||
// ID returns a unique identifier for the connection request.
|
||||
func (c *ConnReq) ID() uint64 {
|
||||
return atomic.LoadUint64(&c.id)
|
||||
}
|
||||
|
||||
// State is the connection state of the requested connection.
|
||||
func (c *ConnReq) State() ConnState {
|
||||
c.stateMtx.RLock()
|
||||
defer c.stateMtx.RUnlock()
|
||||
state := c.state
|
||||
return state
|
||||
}
|
||||
|
||||
// String returns a human-readable string for the connection request.
|
||||
func (c *ConnReq) String() string {
|
||||
if c.Addr == nil || c.Addr.String() == "" {
|
||||
return fmt.Sprintf("reqid %d", atomic.LoadUint64(&c.id))
|
||||
}
|
||||
return fmt.Sprintf("%s (reqid %d)", c.Addr, atomic.LoadUint64(&c.id))
|
||||
}
|
||||
|
||||
// Config holds the configuration options related to the connection manager.
|
||||
type Config struct {
|
||||
// Listeners defines a slice of listeners for which the connection
|
||||
// manager will take ownership of and accept connections. When a
|
||||
// connection is accepted, the OnAccept handler will be invoked with the
|
||||
// connection. Since the connection manager takes ownership of these
|
||||
// listeners, they will be closed when the connection manager is
|
||||
// stopped.
|
||||
//
|
||||
// This field will not have any effect if the OnAccept field is not
|
||||
// also specified. It may be nil if the caller does not wish to listen
|
||||
// for incoming connections.
|
||||
Listeners []net.Listener
|
||||
|
||||
// OnAccept is a callback that is fired when an inbound connection is
|
||||
// accepted. It is the caller's responsibility to close the connection.
|
||||
// Failure to close the connection will result in the connection manager
|
||||
// believing the connection is still active and thus have undesirable
|
||||
// side effects such as still counting toward maximum connection limits.
|
||||
//
|
||||
// This field will not have any effect if the Listeners field is not
|
||||
// also specified since there couldn't possibly be any accepted
|
||||
// connections in that case.
|
||||
OnAccept func(net.Conn)
|
||||
|
||||
// TargetOutbound is the number of outbound network connections to
|
||||
// maintain. Defaults to 8.
|
||||
TargetOutbound uint32
|
||||
|
||||
// RetryDuration is the duration to wait before retrying connection
|
||||
// requests. Defaults to 5s.
|
||||
RetryDuration time.Duration
|
||||
|
||||
// OnConnection is a callback that is fired when a new outbound
|
||||
// connection is established.
|
||||
OnConnection func(*ConnReq, net.Conn)
|
||||
|
||||
// OnDisconnection is a callback that is fired when an outbound
|
||||
// connection is disconnected.
|
||||
OnDisconnection func(*ConnReq)
|
||||
|
||||
// GetNewAddress is a way to get an address to make a network connection
|
||||
// to. If nil, no new connections will be made automatically.
|
||||
GetNewAddress func() (net.Addr, error)
|
||||
|
||||
// Dial connects to the address on the named network. It cannot be nil.
|
||||
Dial func(net.Addr) (net.Conn, error)
|
||||
}
|
||||
|
||||
// registerPending is used to register a pending connection attempt. By
|
||||
// registering pending connection attempts we allow callers to cancel pending
|
||||
// connection attempts before their successful or in the case they're not
|
||||
// longer wanted.
|
||||
type registerPending struct {
|
||||
c *ConnReq
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// handleConnected is used to queue a successful connection.
|
||||
type handleConnected struct {
|
||||
c *ConnReq
|
||||
conn net.Conn
|
||||
}
|
||||
|
||||
// handleDisconnected is used to remove a connection.
|
||||
type handleDisconnected struct {
|
||||
id uint64
|
||||
retry bool
|
||||
}
|
||||
|
||||
// handleFailed is used to remove a pending connection.
|
||||
type handleFailed struct {
|
||||
c *ConnReq
|
||||
err error
|
||||
}
|
||||
|
||||
// ConnManager provides a manager to handle network connections.
|
||||
type ConnManager struct {
|
||||
// The following variables must only be used atomically.
|
||||
connReqCount uint64
|
||||
start int32
|
||||
stop int32
|
||||
|
||||
newConnReqMtx sync.Mutex
|
||||
|
||||
cfg Config
|
||||
wg sync.WaitGroup
|
||||
failedAttempts uint64
|
||||
requests chan interface{}
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
// handleFailedConn handles a connection failed due to a disconnect or any
|
||||
// other failure. If permanent, it retries the connection after the configured
|
||||
// retry duration. Otherwise, if required, it makes a new connection request.
|
||||
// After maxFailedConnectionAttempts new connections will be retried after the
|
||||
// configured retry duration.
|
||||
func (cm *ConnManager) handleFailedConn(c *ConnReq, err error) {
|
||||
if atomic.LoadInt32(&cm.stop) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Don't write throttled logs more than once every throttledConnFailedLogInterval
|
||||
shouldWriteLog := shouldWriteConnFailedLog(err)
|
||||
if shouldWriteLog {
|
||||
// If we are to write a log, set its lastLogTime to now
|
||||
setConnFailedLastLogTime(err, time.Now())
|
||||
}
|
||||
|
||||
if c.Permanent {
|
||||
c.retryCount++
|
||||
d := time.Duration(c.retryCount) * cm.cfg.RetryDuration
|
||||
if d > maxRetryDuration {
|
||||
d = maxRetryDuration
|
||||
}
|
||||
if shouldWriteLog {
|
||||
log.Debugf("Retrying further connections to %s every %s", c, d)
|
||||
}
|
||||
spawnAfter(d, func() {
|
||||
cm.Connect(c)
|
||||
})
|
||||
} else if cm.cfg.GetNewAddress != nil {
|
||||
cm.failedAttempts++
|
||||
if cm.failedAttempts >= maxFailedAttempts {
|
||||
if shouldWriteLog {
|
||||
log.Debugf("Max failed connection attempts reached: [%d] "+
|
||||
"-- retrying further connections every %s", maxFailedAttempts,
|
||||
cm.cfg.RetryDuration)
|
||||
}
|
||||
spawnAfter(cm.cfg.RetryDuration, cm.NewConnReq)
|
||||
} else {
|
||||
spawn(cm.NewConnReq)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// throttledError defines an error type whose logs get throttled. This is to
|
||||
// prevent flooding the logs with identical errors.
|
||||
type throttledError error
|
||||
|
||||
var (
|
||||
// throttledConnFailedLogInterval is the minimum duration of time between
|
||||
// the logs defined in throttledConnFailedLogs.
|
||||
throttledConnFailedLogInterval = time.Minute * 10
|
||||
|
||||
// throttledConnFailedLogs are logs that get written at most every
|
||||
// throttledConnFailedLogInterval. Each entry in this map defines a type
|
||||
// of error that we want to throttle. The value of each entry is the last
|
||||
// time that type of log had been written.
|
||||
throttledConnFailedLogs = map[throttledError]time.Time{
|
||||
ErrNoAddress: {},
|
||||
}
|
||||
|
||||
// ErrNoAddress is an error that is thrown when there aren't any
|
||||
// valid connection addresses.
|
||||
ErrNoAddress throttledError = errors.New("no valid connect address")
|
||||
)
|
||||
|
||||
// shouldWriteConnFailedLog resolves whether to write logs related to connection
|
||||
// failures. Errors that had not been previously registered in throttledConnFailedLogs
|
||||
// and non-error (nil values) must always be logged.
|
||||
func shouldWriteConnFailedLog(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
lastLogTime, ok := throttledConnFailedLogs[err]
|
||||
return !ok || lastLogTime.Add(throttledConnFailedLogInterval).Before(time.Now())
|
||||
}
|
||||
|
||||
// setConnFailedLastLogTime sets the last log time of the specified error
|
||||
func setConnFailedLastLogTime(err error, lastLogTime time.Time) {
|
||||
var throttledErr throttledError
|
||||
nativeerrors.As(err, &throttledErr)
|
||||
throttledConnFailedLogs[err] = lastLogTime
|
||||
}
|
||||
|
||||
// connHandler handles all connection related requests. It must be run as a
|
||||
// goroutine.
|
||||
//
|
||||
// The connection handler makes sure that we maintain a pool of active outbound
|
||||
// connections so that we remain connected to the network. Connection requests
|
||||
// are processed and mapped by their assigned ids.
|
||||
func (cm *ConnManager) connHandler() {
|
||||
|
||||
var (
|
||||
// pending holds all registered conn requests that have yet to
|
||||
// succeed.
|
||||
pending = make(map[uint64]*ConnReq)
|
||||
|
||||
// conns represents the set of all actively connected peers.
|
||||
conns = make(map[uint64]*ConnReq, cm.cfg.TargetOutbound)
|
||||
)
|
||||
|
||||
out:
|
||||
for {
|
||||
select {
|
||||
case req := <-cm.requests:
|
||||
switch msg := req.(type) {
|
||||
|
||||
case registerPending:
|
||||
connReq := msg.c
|
||||
connReq.updateState(ConnPending)
|
||||
pending[msg.c.id] = connReq
|
||||
close(msg.done)
|
||||
|
||||
case handleConnected:
|
||||
connReq := msg.c
|
||||
|
||||
if _, ok := pending[connReq.id]; !ok {
|
||||
if msg.conn != nil {
|
||||
msg.conn.Close()
|
||||
}
|
||||
log.Debugf("Ignoring connection for "+
|
||||
"canceled connreq=%s", connReq)
|
||||
continue
|
||||
}
|
||||
|
||||
connReq.updateState(ConnEstablished)
|
||||
connReq.conn = msg.conn
|
||||
conns[connReq.id] = connReq
|
||||
log.Debugf("Connected to %s", connReq)
|
||||
connReq.retryCount = 0
|
||||
|
||||
delete(pending, connReq.id)
|
||||
|
||||
if cm.cfg.OnConnection != nil {
|
||||
cm.cfg.OnConnection(connReq, msg.conn)
|
||||
}
|
||||
|
||||
case handleDisconnected:
|
||||
connReq, ok := conns[msg.id]
|
||||
if !ok {
|
||||
connReq, ok = pending[msg.id]
|
||||
if !ok {
|
||||
log.Errorf("Unknown connid=%d",
|
||||
msg.id)
|
||||
continue
|
||||
}
|
||||
|
||||
// Pending connection was found, remove
|
||||
// it from pending map if we should
|
||||
// ignore a later, successful
|
||||
// connection.
|
||||
connReq.updateState(ConnCanceled)
|
||||
log.Debugf("Canceling: %s", connReq)
|
||||
delete(pending, msg.id)
|
||||
continue
|
||||
|
||||
}
|
||||
|
||||
// An existing connection was located, mark as
|
||||
// disconnected and execute disconnection
|
||||
// callback.
|
||||
log.Debugf("Disconnected from %s", connReq)
|
||||
delete(conns, msg.id)
|
||||
|
||||
if connReq.conn != nil {
|
||||
connReq.conn.Close()
|
||||
}
|
||||
|
||||
if cm.cfg.OnDisconnection != nil {
|
||||
spawn(func() {
|
||||
cm.cfg.OnDisconnection(connReq)
|
||||
})
|
||||
}
|
||||
|
||||
// All internal state has been cleaned up, if
|
||||
// this connection is being removed, we will
|
||||
// make no further attempts with this request.
|
||||
if !msg.retry {
|
||||
connReq.updateState(ConnDisconnected)
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise, we will attempt a reconnection if
|
||||
// we do not have enough peers, or if this is a
|
||||
// persistent peer. The connection request is
|
||||
// re added to the pending map, so that
|
||||
// subsequent processing of connections and
|
||||
// failures do not ignore the request.
|
||||
if uint32(len(conns)) < cm.cfg.TargetOutbound ||
|
||||
connReq.Permanent {
|
||||
|
||||
connReq.updateState(ConnPending)
|
||||
log.Debugf("Reconnecting to %s",
|
||||
connReq)
|
||||
pending[msg.id] = connReq
|
||||
cm.handleFailedConn(connReq, nil)
|
||||
}
|
||||
|
||||
case handleFailed:
|
||||
connReq := msg.c
|
||||
|
||||
if _, ok := pending[connReq.id]; !ok {
|
||||
log.Debugf("Ignoring connection for "+
|
||||
"canceled conn req: %s", connReq)
|
||||
continue
|
||||
}
|
||||
|
||||
connReq.updateState(ConnFailing)
|
||||
if shouldWriteConnFailedLog(msg.err) {
|
||||
log.Debugf("Failed to connect to %s: %s",
|
||||
connReq, msg.err)
|
||||
}
|
||||
cm.handleFailedConn(connReq, msg.err)
|
||||
}
|
||||
|
||||
case <-cm.quit:
|
||||
break out
|
||||
}
|
||||
}
|
||||
|
||||
cm.wg.Done()
|
||||
log.Trace("Connection handler done")
|
||||
}
|
||||
|
||||
// NotifyConnectionRequestComplete notifies the connection
|
||||
// manager that a peer had been successfully connected and
|
||||
// marked as good.
|
||||
func (cm *ConnManager) NotifyConnectionRequestComplete() {
|
||||
cm.failedAttempts = 0
|
||||
}
|
||||
|
||||
// NewConnReq creates a new connection request and connects to the
|
||||
// corresponding address.
|
||||
func (cm *ConnManager) NewConnReq() {
|
||||
cm.newConnReqMtx.Lock()
|
||||
defer cm.newConnReqMtx.Unlock()
|
||||
if atomic.LoadInt32(&cm.stop) != 0 {
|
||||
return
|
||||
}
|
||||
if cm.cfg.GetNewAddress == nil {
|
||||
return
|
||||
}
|
||||
|
||||
c := &ConnReq{}
|
||||
atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1))
|
||||
|
||||
// Submit a request of a pending connection attempt to the connection
|
||||
// manager. By registering the id before the connection is even
|
||||
// established, we'll be able to later cancel the connection via the
|
||||
// Remove method.
|
||||
done := make(chan struct{})
|
||||
select {
|
||||
case cm.requests <- registerPending{c, done}:
|
||||
case <-cm.quit:
|
||||
return
|
||||
}
|
||||
|
||||
// Wait for the registration to successfully add the pending conn req to
|
||||
// the conn manager's internal state.
|
||||
select {
|
||||
case <-done:
|
||||
case <-cm.quit:
|
||||
return
|
||||
}
|
||||
|
||||
addr, err := cm.cfg.GetNewAddress()
|
||||
if err != nil {
|
||||
select {
|
||||
case cm.requests <- handleFailed{c, err}:
|
||||
case <-cm.quit:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
c.Addr = addr
|
||||
|
||||
cm.Connect(c)
|
||||
}
|
||||
|
||||
// Connect assigns an id and dials a connection to the address of the
|
||||
// connection request.
|
||||
func (cm *ConnManager) Connect(c *ConnReq) {
|
||||
if atomic.LoadInt32(&cm.stop) != 0 {
|
||||
return
|
||||
}
|
||||
if atomic.LoadUint64(&c.id) == 0 {
|
||||
atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1))
|
||||
|
||||
// Submit a request of a pending connection attempt to the
|
||||
// connection manager. By registering the id before the
|
||||
// connection is even established, we'll be able to later
|
||||
// cancel the connection via the Remove method.
|
||||
done := make(chan struct{})
|
||||
select {
|
||||
case cm.requests <- registerPending{c, done}:
|
||||
case <-cm.quit:
|
||||
return
|
||||
}
|
||||
|
||||
// Wait for the registration to successfully add the pending
|
||||
// conn req to the conn manager's internal state.
|
||||
select {
|
||||
case <-done:
|
||||
case <-cm.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Attempting to connect to %s", c)
|
||||
|
||||
conn, err := cm.cfg.Dial(c.Addr)
|
||||
if err != nil {
|
||||
select {
|
||||
case cm.requests <- handleFailed{c, err}:
|
||||
case <-cm.quit:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case cm.requests <- handleConnected{c, conn}:
|
||||
case <-cm.quit:
|
||||
}
|
||||
}
|
||||
|
||||
// Disconnect disconnects the connection corresponding to the given connection
|
||||
// id. If permanent, the connection will be retried with an increasing backoff
|
||||
// duration.
|
||||
func (cm *ConnManager) Disconnect(id uint64) {
|
||||
if atomic.LoadInt32(&cm.stop) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case cm.requests <- handleDisconnected{id, true}:
|
||||
case <-cm.quit:
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes the connection corresponding to the given connection id from
|
||||
// known connections.
|
||||
//
|
||||
// NOTE: This method can also be used to cancel a lingering connection attempt
|
||||
// that hasn't yet succeeded.
|
||||
func (cm *ConnManager) Remove(id uint64) {
|
||||
if atomic.LoadInt32(&cm.stop) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case cm.requests <- handleDisconnected{id, false}:
|
||||
case <-cm.quit:
|
||||
}
|
||||
}
|
||||
|
||||
// listenHandler accepts incoming connections on a given listener. It must be
|
||||
// run as a goroutine.
|
||||
func (cm *ConnManager) listenHandler(listener net.Listener) {
|
||||
log.Infof("Server listening on %s", listener.Addr())
|
||||
for atomic.LoadInt32(&cm.stop) == 0 {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
// Only log the error if not forcibly shutting down.
|
||||
if atomic.LoadInt32(&cm.stop) == 0 {
|
||||
log.Errorf("Can't accept connection: %s", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
spawn(func() {
|
||||
cm.cfg.OnAccept(conn)
|
||||
})
|
||||
}
|
||||
|
||||
cm.wg.Done()
|
||||
log.Tracef("Listener handler done for %s", listener.Addr())
|
||||
}
|
||||
|
||||
// Start launches the connection manager and begins connecting to the network.
|
||||
func (cm *ConnManager) Start() {
|
||||
// Already started?
|
||||
if atomic.AddInt32(&cm.start, 1) != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace("Connection manager started")
|
||||
cm.wg.Add(1)
|
||||
spawn(cm.connHandler)
|
||||
|
||||
// Start all the listeners so long as the caller requested them and
|
||||
// provided a callback to be invoked when connections are accepted.
|
||||
if cm.cfg.OnAccept != nil {
|
||||
for _, listener := range cm.cfg.Listeners {
|
||||
// Declaring this variable is necessary as it needs be declared in the same
|
||||
// scope of the anonymous function below it.
|
||||
listenerCopy := listener
|
||||
cm.wg.Add(1)
|
||||
spawn(func() {
|
||||
cm.listenHandler(listenerCopy)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for i := atomic.LoadUint64(&cm.connReqCount); i < uint64(cm.cfg.TargetOutbound); i++ {
|
||||
spawn(cm.NewConnReq)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the connection manager halts gracefully.
|
||||
func (cm *ConnManager) Wait() {
|
||||
cm.wg.Wait()
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down the connection manager.
|
||||
func (cm *ConnManager) Stop() {
|
||||
if atomic.AddInt32(&cm.stop, 1) != 1 {
|
||||
log.Warnf("Connection manager already stopped")
|
||||
return
|
||||
}
|
||||
|
||||
// Stop all the listeners. There will not be any listeners if
|
||||
// listening is disabled.
|
||||
for _, listener := range cm.cfg.Listeners {
|
||||
// Ignore the error since this is shutdown and there is no way
|
||||
// to recover anyways.
|
||||
_ = listener.Close()
|
||||
}
|
||||
|
||||
close(cm.quit)
|
||||
log.Trace("Connection manager stopped")
|
||||
}
|
||||
|
||||
// New returns a new connection manager.
|
||||
// Use Start to start connecting to the network.
|
||||
func New(cfg *Config) (*ConnManager, error) {
|
||||
if cfg.Dial == nil {
|
||||
return nil, ErrDialNil
|
||||
}
|
||||
// Default to sane values
|
||||
if cfg.RetryDuration <= 0 {
|
||||
cfg.RetryDuration = defaultRetryDuration
|
||||
}
|
||||
if cfg.TargetOutbound == 0 {
|
||||
cfg.TargetOutbound = defaultTargetOutbound
|
||||
}
|
||||
cm := ConnManager{
|
||||
cfg: *cfg, // Copy so caller can't mutate
|
||||
requests: make(chan interface{}),
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
return &cm, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user