mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 11:17:05 +00:00
Compare commits
2 Commits
v0.6.2-dev
...
v0.1.1-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
271bcedc19 | ||
|
|
be556ada9b |
@@ -4,7 +4,7 @@ Kaspad
|
||||
Warning: This is pre-alpha software. There's no guarantee anything works.
|
||||
====
|
||||
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad)
|
||||
|
||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
||||
@@ -75,5 +75,5 @@ The documentation is a work-in-progress. It is located in the [docs](https://git
|
||||
|
||||
## License
|
||||
|
||||
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).
|
||||
Kaspad is licensed under the [copyfree](http://copyfree.org) ISC License.
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,635 +0,0 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
)
|
||||
|
||||
// naTest is used to describe a test to be performed against the NetAddressKey
|
||||
// method.
|
||||
type naTest struct {
|
||||
in domainmessage.NetAddress
|
||||
want AddressKey
|
||||
}
|
||||
|
||||
// naTests houses all of the tests to be performed against the NetAddressKey
|
||||
// method.
|
||||
var naTests = make([]naTest, 0)
|
||||
|
||||
// Put some IP in here for convenience. Points to google.
|
||||
var someIP = "173.194.115.66"
|
||||
|
||||
// addNaTests
|
||||
func addNaTests() {
|
||||
// IPv4
|
||||
// Localhost
|
||||
addNaTest("127.0.0.1", 16111, "127.0.0.1:16111")
|
||||
addNaTest("127.0.0.1", 16110, "127.0.0.1:16110")
|
||||
|
||||
// Class A
|
||||
addNaTest("1.0.0.1", 16111, "1.0.0.1:16111")
|
||||
addNaTest("2.2.2.2", 16110, "2.2.2.2:16110")
|
||||
addNaTest("27.253.252.251", 8335, "27.253.252.251:8335")
|
||||
addNaTest("123.3.2.1", 8336, "123.3.2.1:8336")
|
||||
|
||||
// Private Class A
|
||||
addNaTest("10.0.0.1", 16111, "10.0.0.1:16111")
|
||||
addNaTest("10.1.1.1", 16110, "10.1.1.1:16110")
|
||||
addNaTest("10.2.2.2", 8335, "10.2.2.2:8335")
|
||||
addNaTest("10.10.10.10", 8336, "10.10.10.10:8336")
|
||||
|
||||
// Class B
|
||||
addNaTest("128.0.0.1", 16111, "128.0.0.1:16111")
|
||||
addNaTest("129.1.1.1", 16110, "129.1.1.1:16110")
|
||||
addNaTest("180.2.2.2", 8335, "180.2.2.2:8335")
|
||||
addNaTest("191.10.10.10", 8336, "191.10.10.10:8336")
|
||||
|
||||
// Private Class B
|
||||
addNaTest("172.16.0.1", 16111, "172.16.0.1:16111")
|
||||
addNaTest("172.16.1.1", 16110, "172.16.1.1:16110")
|
||||
addNaTest("172.16.2.2", 8335, "172.16.2.2:8335")
|
||||
addNaTest("172.16.172.172", 8336, "172.16.172.172:8336")
|
||||
|
||||
// Class C
|
||||
addNaTest("193.0.0.1", 16111, "193.0.0.1:16111")
|
||||
addNaTest("200.1.1.1", 16110, "200.1.1.1:16110")
|
||||
addNaTest("205.2.2.2", 8335, "205.2.2.2:8335")
|
||||
addNaTest("223.10.10.10", 8336, "223.10.10.10:8336")
|
||||
|
||||
// Private Class C
|
||||
addNaTest("192.168.0.1", 16111, "192.168.0.1:16111")
|
||||
addNaTest("192.168.1.1", 16110, "192.168.1.1:16110")
|
||||
addNaTest("192.168.2.2", 8335, "192.168.2.2:8335")
|
||||
addNaTest("192.168.192.192", 8336, "192.168.192.192:8336")
|
||||
|
||||
// IPv6
|
||||
// Localhost
|
||||
addNaTest("::1", 16111, "[::1]:16111")
|
||||
addNaTest("fe80::1", 16110, "[fe80::1]:16110")
|
||||
|
||||
// Link-local
|
||||
addNaTest("fe80::1:1", 16111, "[fe80::1:1]:16111")
|
||||
addNaTest("fe91::2:2", 16110, "[fe91::2:2]:16110")
|
||||
addNaTest("fea2::3:3", 8335, "[fea2::3:3]:8335")
|
||||
addNaTest("feb3::4:4", 8336, "[feb3::4:4]:8336")
|
||||
|
||||
// Site-local
|
||||
addNaTest("fec0::1:1", 16111, "[fec0::1:1]:16111")
|
||||
addNaTest("fed1::2:2", 16110, "[fed1::2:2]:16110")
|
||||
addNaTest("fee2::3:3", 8335, "[fee2::3:3]:8335")
|
||||
addNaTest("fef3::4:4", 8336, "[fef3::4:4]:8336")
|
||||
}
|
||||
|
||||
func addNaTest(ip string, port uint16, want AddressKey) {
|
||||
nip := net.ParseIP(ip)
|
||||
na := *domainmessage.NewNetAddressIPPort(nip, port, domainmessage.SFNodeNetwork)
|
||||
test := naTest{na, want}
|
||||
naTests = append(naTests, test)
|
||||
}
|
||||
|
||||
func lookupFuncForTest(host string) ([]net.IP, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func newAddrManagerForTest(t *testing.T, testName string,
|
||||
localSubnetworkID *subnetworkid.SubnetworkID) (addressManager *AddressManager, teardown func()) {
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.SubnetworkID = localSubnetworkID
|
||||
|
||||
dbPath, err := ioutil.TempDir("", testName)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
}
|
||||
|
||||
databaseContext, err := dbaccess.New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
addressManager = New(cfg, databaseContext)
|
||||
|
||||
return addressManager, func() {
|
||||
err := databaseContext.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("error closing the database: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartStop(t *testing.T) {
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestStartStop", nil)
|
||||
defer teardown()
|
||||
err := amgr.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("Address Manager failed to start: %v", err)
|
||||
}
|
||||
err = amgr.Stop()
|
||||
if err != nil {
|
||||
t.Fatalf("Address Manager failed to stop: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddAddressByIP(t *testing.T) {
|
||||
fmtErr := errors.Errorf("")
|
||||
addrErr := &net.AddrError{}
|
||||
var tests = []struct {
|
||||
addrIP string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
someIP + ":16111",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
someIP,
|
||||
addrErr,
|
||||
},
|
||||
{
|
||||
someIP[:12] + ":8333",
|
||||
fmtErr,
|
||||
},
|
||||
{
|
||||
someIP + ":abcd",
|
||||
fmtErr,
|
||||
},
|
||||
}
|
||||
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
|
||||
defer teardown()
|
||||
for i, test := range tests {
|
||||
err := amgr.AddAddressByIP(test.addrIP, nil)
|
||||
if test.err != nil && err == nil {
|
||||
t.Errorf("TestAddAddressByIP test %d failed expected an error and got none", i)
|
||||
continue
|
||||
}
|
||||
if test.err == nil && err != nil {
|
||||
t.Errorf("TestAddAddressByIP test %d failed expected no error and got one", i)
|
||||
continue
|
||||
}
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
|
||||
t.Errorf("TestAddAddressByIP test %d failed got %v, want %v", i,
|
||||
reflect.TypeOf(err), reflect.TypeOf(test.err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddLocalAddress(t *testing.T) {
|
||||
var tests = []struct {
|
||||
address domainmessage.NetAddress
|
||||
priority AddressPriority
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
domainmessage.NetAddress{IP: net.ParseIP("192.168.0.100")},
|
||||
InterfacePrio,
|
||||
false,
|
||||
},
|
||||
{
|
||||
domainmessage.NetAddress{IP: net.ParseIP("204.124.1.1")},
|
||||
InterfacePrio,
|
||||
true,
|
||||
},
|
||||
{
|
||||
domainmessage.NetAddress{IP: net.ParseIP("204.124.1.1")},
|
||||
BoundPrio,
|
||||
true,
|
||||
},
|
||||
{
|
||||
domainmessage.NetAddress{IP: net.ParseIP("::1")},
|
||||
InterfacePrio,
|
||||
false,
|
||||
},
|
||||
{
|
||||
domainmessage.NetAddress{IP: net.ParseIP("fe80::1")},
|
||||
InterfacePrio,
|
||||
false,
|
||||
},
|
||||
{
|
||||
domainmessage.NetAddress{IP: net.ParseIP("2620:100::1")},
|
||||
InterfacePrio,
|
||||
true,
|
||||
},
|
||||
}
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddLocalAddress", nil)
|
||||
defer teardown()
|
||||
for x, test := range tests {
|
||||
result := amgr.AddLocalAddress(&test.address, test.priority)
|
||||
if result == nil && !test.valid {
|
||||
t.Errorf("TestAddLocalAddress test #%d failed: %s should have "+
|
||||
"been accepted", x, test.address.IP)
|
||||
continue
|
||||
}
|
||||
if result != nil && test.valid {
|
||||
t.Errorf("TestAddLocalAddress test #%d failed: %s should not have "+
|
||||
"been accepted", x, test.address.IP)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttempt(t *testing.T) {
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAttempt", nil)
|
||||
defer teardown()
|
||||
|
||||
// Add a new address and get it
|
||||
err := amgr.AddAddressByIP(someIP+":8333", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka := amgr.GetAddress()
|
||||
|
||||
if !ka.LastAttempt().IsZero() {
|
||||
t.Errorf("Address should not have attempts, but does")
|
||||
}
|
||||
|
||||
na := ka.NetAddress()
|
||||
amgr.Attempt(na)
|
||||
|
||||
if ka.LastAttempt().IsZero() {
|
||||
t.Errorf("Address should have an attempt, but does not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnected(t *testing.T) {
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestConnected", nil)
|
||||
defer teardown()
|
||||
|
||||
// Add a new address and get it
|
||||
err := amgr.AddAddressByIP(someIP+":8333", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka := amgr.GetAddress()
|
||||
na := ka.NetAddress()
|
||||
// make it an hour ago
|
||||
na.Timestamp = mstime.Now().Add(time.Hour * -1)
|
||||
|
||||
amgr.Connected(na)
|
||||
|
||||
if !ka.NetAddress().Timestamp.After(na.Timestamp) {
|
||||
t.Errorf("Address should have a new timestamp, but does not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedMoreAddresses(t *testing.T) {
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestNeedMoreAddresses", nil)
|
||||
defer teardown()
|
||||
addrsToAdd := 1500
|
||||
b := amgr.NeedMoreAddresses()
|
||||
if !b {
|
||||
t.Errorf("Expected that we need more addresses")
|
||||
}
|
||||
addrs := make([]*domainmessage.NetAddress, addrsToAdd)
|
||||
|
||||
var err error
|
||||
for i := 0; i < addrsToAdd; i++ {
|
||||
s := AddressKey(fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60))
|
||||
addrs[i], err = amgr.DeserializeNetAddress(s)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to turn %s into an address: %v", s, err)
|
||||
}
|
||||
}
|
||||
|
||||
srcAddr := domainmessage.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
|
||||
amgr.AddAddresses(addrs, srcAddr, nil)
|
||||
numAddrs := amgr.TotalNumAddresses()
|
||||
if numAddrs > addrsToAdd {
|
||||
t.Errorf("Number of addresses is too many %d vs %d", numAddrs, addrsToAdd)
|
||||
}
|
||||
|
||||
b = amgr.NeedMoreAddresses()
|
||||
if b {
|
||||
t.Errorf("Expected that we don't need more addresses")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGood(t *testing.T) {
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGood", nil)
|
||||
defer teardown()
|
||||
addrsToAdd := 64 * 64
|
||||
addrs := make([]*domainmessage.NetAddress, addrsToAdd)
|
||||
subnetworkCount := 32
|
||||
subnetworkIDs := make([]*subnetworkid.SubnetworkID, subnetworkCount)
|
||||
|
||||
var err error
|
||||
for i := 0; i < addrsToAdd; i++ {
|
||||
s := AddressKey(fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60))
|
||||
addrs[i], err = amgr.DeserializeNetAddress(s)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to turn %s into an address: %v", s, err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < subnetworkCount; i++ {
|
||||
subnetworkIDs[i] = &subnetworkid.SubnetworkID{0xff - byte(i)}
|
||||
}
|
||||
|
||||
srcAddr := domainmessage.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
|
||||
amgr.AddAddresses(addrs, srcAddr, nil)
|
||||
for i, addr := range addrs {
|
||||
amgr.Good(addr, subnetworkIDs[i%subnetworkCount])
|
||||
}
|
||||
|
||||
numAddrs := amgr.TotalNumAddresses()
|
||||
if numAddrs >= addrsToAdd {
|
||||
t.Errorf("Number of addresses is too many: %d vs %d", numAddrs, addrsToAdd)
|
||||
}
|
||||
|
||||
numCache := len(amgr.AddressCache(true, nil))
|
||||
if numCache == 0 || numCache >= numAddrs/4 {
|
||||
t.Errorf("Number of addresses in cache: got %d, want positive and less than %d",
|
||||
numCache, numAddrs/4)
|
||||
}
|
||||
|
||||
for i := 0; i < subnetworkCount; i++ {
|
||||
numCache = len(amgr.AddressCache(false, subnetworkIDs[i]))
|
||||
if numCache == 0 || numCache >= numAddrs/subnetworkCount {
|
||||
t.Errorf("Number of addresses in subnetwork cache: got %d, want positive and less than %d",
|
||||
numCache, numAddrs/4/subnetworkCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGoodChangeSubnetworkID", nil)
|
||||
defer teardown()
|
||||
addr := domainmessage.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
addrKey := NetAddressKey(addr)
|
||||
srcAddr := domainmessage.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
|
||||
oldSubnetwork := subnetworkid.SubnetworkIDNative
|
||||
amgr.AddAddress(addr, srcAddr, oldSubnetwork)
|
||||
amgr.Good(addr, oldSubnetwork)
|
||||
|
||||
// make sure address was saved to addressIndex under oldSubnetwork
|
||||
ka := amgr.knownAddress(addr)
|
||||
if ka == nil {
|
||||
t.Fatalf("Address was not found after first time .Good called")
|
||||
}
|
||||
if !ka.SubnetworkID().IsEqual(oldSubnetwork) {
|
||||
t.Fatalf("Address index did not point to oldSubnetwork")
|
||||
}
|
||||
|
||||
// make sure address was added to correct bucket under oldSubnetwork
|
||||
bucket := amgr.subnetworkTriedAddresBucketArrays[*oldSubnetwork][amgr.triedAddressBucketIndex(addr)]
|
||||
wasFound := false
|
||||
for _, ka := range bucket {
|
||||
if NetAddressKey(ka.NetAddress()) == addrKey {
|
||||
wasFound = true
|
||||
}
|
||||
}
|
||||
if !wasFound {
|
||||
t.Fatalf("Address was not found in the correct bucket in oldSubnetwork")
|
||||
}
|
||||
|
||||
// now call .Good again with a different subnetwork
|
||||
newSubnetwork := subnetworkid.SubnetworkIDRegistry
|
||||
amgr.Good(addr, newSubnetwork)
|
||||
|
||||
// make sure address was updated in addressIndex under newSubnetwork
|
||||
ka = amgr.knownAddress(addr)
|
||||
if ka == nil {
|
||||
t.Fatalf("Address was not found after second time .Good called")
|
||||
}
|
||||
if !ka.SubnetworkID().IsEqual(newSubnetwork) {
|
||||
t.Fatalf("Address index did not point to newSubnetwork")
|
||||
}
|
||||
|
||||
// make sure address was removed from bucket under oldSubnetwork
|
||||
bucket = amgr.subnetworkTriedAddresBucketArrays[*oldSubnetwork][amgr.triedAddressBucketIndex(addr)]
|
||||
wasFound = false
|
||||
for _, ka := range bucket {
|
||||
if NetAddressKey(ka.NetAddress()) == addrKey {
|
||||
wasFound = true
|
||||
}
|
||||
}
|
||||
if wasFound {
|
||||
t.Fatalf("Address was not removed from bucket in oldSubnetwork")
|
||||
}
|
||||
|
||||
// make sure address was added to correct bucket under newSubnetwork
|
||||
bucket = amgr.subnetworkTriedAddresBucketArrays[*newSubnetwork][amgr.triedAddressBucketIndex(addr)]
|
||||
wasFound = false
|
||||
for _, ka := range bucket {
|
||||
if NetAddressKey(ka.NetAddress()) == addrKey {
|
||||
wasFound = true
|
||||
}
|
||||
}
|
||||
if !wasFound {
|
||||
t.Fatalf("Address was not found in the correct bucket in newSubnetwork")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAddress(t *testing.T) {
|
||||
localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGetAddress", localSubnetworkID)
|
||||
defer teardown()
|
||||
|
||||
// Get an address from an empty set (should error)
|
||||
if rv := amgr.GetAddress(); rv != nil {
|
||||
t.Errorf("GetAddress failed: got: %v want: %v\n", rv, nil)
|
||||
}
|
||||
|
||||
// Add a new address and get it
|
||||
err := amgr.AddAddressByIP(someIP+":8332", localSubnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka := amgr.GetAddress()
|
||||
if ka == nil {
|
||||
t.Fatalf("Did not get an address where there is one in the pool")
|
||||
}
|
||||
amgr.Attempt(ka.NetAddress())
|
||||
|
||||
// Checks that we don't get it if we find that it has other subnetwork ID than expected.
|
||||
actualSubnetworkID := &subnetworkid.SubnetworkID{0xfe}
|
||||
amgr.Good(ka.NetAddress(), actualSubnetworkID)
|
||||
ka = amgr.GetAddress()
|
||||
if ka != nil {
|
||||
t.Errorf("Didn't expect to get an address because there shouldn't be any address from subnetwork ID %s or nil", localSubnetworkID)
|
||||
}
|
||||
|
||||
// Checks that the total number of addresses incremented although the new address is not full node or a partial node of the same subnetwork as the local node.
|
||||
numAddrs := amgr.TotalNumAddresses()
|
||||
if numAddrs != 1 {
|
||||
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
|
||||
}
|
||||
|
||||
// Now we repeat the same process, but now the address has the expected subnetwork ID.
|
||||
|
||||
// Add a new address and get it
|
||||
err = amgr.AddAddressByIP(someIP+":8333", localSubnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka = amgr.GetAddress()
|
||||
if ka == nil {
|
||||
t.Fatalf("Did not get an address where there is one in the pool")
|
||||
}
|
||||
if ka.NetAddress().IP.String() != someIP {
|
||||
t.Errorf("Wrong IP: got %v, want %v", ka.NetAddress().IP.String(), someIP)
|
||||
}
|
||||
if !ka.SubnetworkID().IsEqual(localSubnetworkID) {
|
||||
t.Errorf("Wrong Subnetwork ID: got %v, want %v", *ka.SubnetworkID(), localSubnetworkID)
|
||||
}
|
||||
amgr.Attempt(ka.NetAddress())
|
||||
|
||||
// Mark this as a good address and get it
|
||||
amgr.Good(ka.NetAddress(), localSubnetworkID)
|
||||
ka = amgr.GetAddress()
|
||||
if ka == nil {
|
||||
t.Fatalf("Did not get an address where there is one in the pool")
|
||||
}
|
||||
if ka.NetAddress().IP.String() != someIP {
|
||||
t.Errorf("Wrong IP: got %v, want %v", ka.NetAddress().IP.String(), someIP)
|
||||
}
|
||||
if *ka.SubnetworkID() != *localSubnetworkID {
|
||||
t.Errorf("Wrong Subnetwork ID: got %v, want %v", ka.SubnetworkID(), localSubnetworkID)
|
||||
}
|
||||
|
||||
numAddrs = amgr.TotalNumAddresses()
|
||||
if numAddrs != 2 {
|
||||
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBestLocalAddress(t *testing.T) {
|
||||
localAddrs := []domainmessage.NetAddress{
|
||||
{IP: net.ParseIP("192.168.0.100")},
|
||||
{IP: net.ParseIP("::1")},
|
||||
{IP: net.ParseIP("fe80::1")},
|
||||
{IP: net.ParseIP("2001:470::1")},
|
||||
}
|
||||
|
||||
var tests = []struct {
|
||||
remoteAddr domainmessage.NetAddress
|
||||
want0 domainmessage.NetAddress
|
||||
want1 domainmessage.NetAddress
|
||||
want2 domainmessage.NetAddress
|
||||
want3 domainmessage.NetAddress
|
||||
}{
|
||||
{
|
||||
// Remote connection from public IPv4
|
||||
domainmessage.NetAddress{IP: net.ParseIP("204.124.8.1")},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("204.124.8.100")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
|
||||
},
|
||||
{
|
||||
// Remote connection from private IPv4
|
||||
domainmessage.NetAddress{IP: net.ParseIP("172.16.0.254")},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
},
|
||||
{
|
||||
// Remote connection from public IPv6
|
||||
domainmessage.NetAddress{IP: net.ParseIP("2602:100:abcd::102")},
|
||||
domainmessage.NetAddress{IP: net.IPv6zero},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("2001:470::1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("2001:470::1")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("2001:470::1")},
|
||||
},
|
||||
/* XXX
|
||||
{
|
||||
// Remote connection from Tor
|
||||
domainmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43::100")},
|
||||
domainmessage.NetAddress{IP: net.IPv4zero},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("204.124.8.100")},
|
||||
domainmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
|
||||
},
|
||||
*/
|
||||
}
|
||||
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGetBestLocalAddress", nil)
|
||||
defer teardown()
|
||||
|
||||
// Test against default when there's no address
|
||||
for x, test := range tests {
|
||||
got := amgr.GetBestLocalAddress(&test.remoteAddr)
|
||||
if !test.want0.IP.Equal(got.IP) {
|
||||
t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s",
|
||||
x, test.remoteAddr.IP, test.want1.IP, got.IP)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, localAddr := range localAddrs {
|
||||
amgr.AddLocalAddress(&localAddr, InterfacePrio)
|
||||
}
|
||||
|
||||
// Test against want1
|
||||
for x, test := range tests {
|
||||
got := amgr.GetBestLocalAddress(&test.remoteAddr)
|
||||
if !test.want1.IP.Equal(got.IP) {
|
||||
t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s",
|
||||
x, test.remoteAddr.IP, test.want1.IP, got.IP)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Add a public IP to the list of local addresses.
|
||||
localAddr := domainmessage.NetAddress{IP: net.ParseIP("204.124.8.100")}
|
||||
amgr.AddLocalAddress(&localAddr, InterfacePrio)
|
||||
|
||||
// Test against want2
|
||||
for x, test := range tests {
|
||||
got := amgr.GetBestLocalAddress(&test.remoteAddr)
|
||||
if !test.want2.IP.Equal(got.IP) {
|
||||
t.Errorf("TestGetBestLocalAddress test2 #%d failed for remote address %s: want %s got %s",
|
||||
x, test.remoteAddr.IP, test.want2.IP, got.IP)
|
||||
continue
|
||||
}
|
||||
}
|
||||
/*
|
||||
// Add a Tor generated IP address
|
||||
localAddr = domainmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}
|
||||
amgr.AddLocalAddress(&localAddr, ManualPrio)
|
||||
// Test against want3
|
||||
for x, test := range tests {
|
||||
got := amgr.GetBestLocalAddress(&test.remoteAddr)
|
||||
if !test.want3.IP.Equal(got.IP) {
|
||||
t.Errorf("TestGetBestLocalAddress test3 #%d failed for remote address %s: want %s got %s",
|
||||
x, test.remoteAddr.IP, test.want3.IP, got.IP)
|
||||
continue
|
||||
}
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
func TestNetAddressKey(t *testing.T) {
|
||||
addNaTests()
|
||||
|
||||
t.Logf("Running %d tests", len(naTests))
|
||||
for i, test := range naTests {
|
||||
key := NetAddressKey(&test.in)
|
||||
if key != test.want {
|
||||
t.Errorf("NetAddressKey #%d\n got: %s want: %s", i, key, test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
)
|
||||
|
||||
func TstKnownAddressIsBad(ka *KnownAddress) bool {
|
||||
return ka.isBad()
|
||||
}
|
||||
|
||||
func TstKnownAddressChance(ka *KnownAddress) float64 {
|
||||
return ka.chance()
|
||||
}
|
||||
|
||||
func TstNewKnownAddress(na *domainmessage.NetAddress, attempts int,
|
||||
lastattempt, lastsuccess mstime.Time, tried bool, refs int) *KnownAddress {
|
||||
return &KnownAddress{netAddress: na, attempts: attempts, lastAttempt: lastattempt,
|
||||
lastSuccess: lastsuccess, tried: tried, referenceCount: refs}
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addressmanager_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/addressmanager"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
)
|
||||
|
||||
func TestChance(t *testing.T) {
|
||||
now := mstime.Now()
|
||||
var tests = []struct {
|
||||
addr *addressmanager.KnownAddress
|
||||
expected float64
|
||||
}{
|
||||
{
|
||||
//Test normal case
|
||||
addressmanager.TstNewKnownAddress(&domainmessage.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastseen < 0
|
||||
addressmanager.TstNewKnownAddress(&domainmessage.NetAddress{Timestamp: now.Add(20 * time.Second)},
|
||||
0, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastAttempt < 0
|
||||
addressmanager.TstNewKnownAddress(&domainmessage.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, mstime.Now().Add(30*time.Minute), mstime.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case in which lastAttempt < ten minutes
|
||||
addressmanager.TstNewKnownAddress(&domainmessage.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, mstime.Now().Add(-5*time.Minute), mstime.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case with several failed attempts.
|
||||
addressmanager.TstNewKnownAddress(&domainmessage.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
2, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
|
||||
1 / 1.5 / 1.5,
|
||||
},
|
||||
}
|
||||
|
||||
err := .0001
|
||||
for i, test := range tests {
|
||||
chance := addressmanager.TstKnownAddressChance(test.addr)
|
||||
if math.Abs(test.expected-chance) >= err {
|
||||
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBad(t *testing.T) {
|
||||
now := mstime.Now()
|
||||
future := now.Add(35 * time.Minute)
|
||||
monthOld := now.Add(-43 * time.Hour * 24)
|
||||
secondsOld := now.Add(-2 * time.Second)
|
||||
minutesOld := now.Add(-27 * time.Minute)
|
||||
hoursOld := now.Add(-5 * time.Hour)
|
||||
zeroTime := mstime.Time{}
|
||||
|
||||
futureNa := &domainmessage.NetAddress{Timestamp: future}
|
||||
minutesOldNa := &domainmessage.NetAddress{Timestamp: minutesOld}
|
||||
monthOldNa := &domainmessage.NetAddress{Timestamp: monthOld}
|
||||
currentNa := &domainmessage.NetAddress{Timestamp: secondsOld}
|
||||
|
||||
//Test addresses that have been tried in the last minute.
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
|
||||
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
|
||||
//Test address that claims to be from the future.
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
|
||||
}
|
||||
|
||||
//Test address that has not been seen in over a month.
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 7: addresses more than a month old are bad.")
|
||||
}
|
||||
|
||||
//It has failed at least three times and never succeeded.
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
|
||||
t.Errorf("test case 8: addresses that have never succeeded are bad.")
|
||||
}
|
||||
|
||||
//It has failed ten times in the last week
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
|
||||
}
|
||||
|
||||
//Test an address that should work.
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 10: This should be a valid address.")
|
||||
}
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
)
|
||||
|
||||
// TestIPTypes ensures the various functions which determine the type of an IP
|
||||
// address based on RFCs work as intended.
|
||||
func TestIPTypes(t *testing.T) {
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
|
||||
defer teardown()
|
||||
type ipTest struct {
|
||||
in domainmessage.NetAddress
|
||||
rfc1918 bool
|
||||
rfc2544 bool
|
||||
rfc3849 bool
|
||||
rfc3927 bool
|
||||
rfc3964 bool
|
||||
rfc4193 bool
|
||||
rfc4380 bool
|
||||
rfc4843 bool
|
||||
rfc4862 bool
|
||||
rfc5737 bool
|
||||
rfc6052 bool
|
||||
rfc6145 bool
|
||||
rfc6598 bool
|
||||
local bool
|
||||
valid bool
|
||||
routable bool
|
||||
}
|
||||
|
||||
newIPTest := func(ip string, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964,
|
||||
rfc4193, rfc4380, rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598,
|
||||
local, valid, routable bool) ipTest {
|
||||
nip := net.ParseIP(ip)
|
||||
na := *domainmessage.NewNetAddressIPPort(nip, 16111, domainmessage.SFNodeNetwork)
|
||||
test := ipTest{na, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964, rfc4193, rfc4380,
|
||||
rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598, local, valid, routable}
|
||||
return test
|
||||
}
|
||||
|
||||
tests := []ipTest{
|
||||
newIPTest("10.255.255.255", true, false, false, false, false, false,
|
||||
false, false, false, false, false, false, false, false, true, false),
|
||||
newIPTest("192.168.0.1", true, false, false, false, false, false,
|
||||
false, false, false, false, false, false, false, false, true, false),
|
||||
newIPTest("172.31.255.1", true, false, false, false, false, false,
|
||||
false, false, false, false, false, false, false, false, true, false),
|
||||
newIPTest("172.32.1.1", false, false, false, false, false, false, false, false,
|
||||
false, false, false, false, false, false, true, true),
|
||||
newIPTest("169.254.250.120", false, false, false, true, false, false,
|
||||
false, false, false, false, false, false, false, false, true, false),
|
||||
newIPTest("0.0.0.0", false, false, false, false, false, false, false,
|
||||
false, false, false, false, false, false, true, false, false),
|
||||
newIPTest("255.255.255.255", false, false, false, false, false, false,
|
||||
false, false, false, false, false, false, false, false, false, false),
|
||||
newIPTest("127.0.0.1", false, false, false, false, false, false,
|
||||
false, false, false, false, false, false, false, true, true, false),
|
||||
newIPTest("fd00:dead::1", false, false, false, false, false, true,
|
||||
false, false, false, false, false, false, false, false, true, false),
|
||||
newIPTest("2001::1", false, false, false, false, false, false,
|
||||
true, false, false, false, false, false, false, false, true, true),
|
||||
newIPTest("2001:10:abcd::1:1", false, false, false, false, false, false,
|
||||
false, true, false, false, false, false, false, false, true, false),
|
||||
newIPTest("fe80::1", false, false, false, false, false, false,
|
||||
false, false, true, false, false, false, false, false, true, false),
|
||||
newIPTest("fe80:1::1", false, false, false, false, false, false,
|
||||
false, false, false, false, false, false, false, false, true, true),
|
||||
newIPTest("64:ff9b::1", false, false, false, false, false, false,
|
||||
false, false, false, false, true, false, false, false, true, true),
|
||||
newIPTest("::ffff:abcd:ef12:1", false, false, false, false, false, false,
|
||||
false, false, false, false, false, false, false, false, true, true),
|
||||
newIPTest("::1", false, false, false, false, false, false, false, false,
|
||||
false, false, false, false, false, true, true, false),
|
||||
newIPTest("198.18.0.1", false, true, false, false, false, false, false,
|
||||
false, false, false, false, false, false, false, true, false),
|
||||
newIPTest("100.127.255.1", false, false, false, false, false, false, false,
|
||||
false, false, false, false, false, true, false, true, false),
|
||||
newIPTest("203.0.113.1", false, false, false, false, false, false, false,
|
||||
false, false, false, false, false, false, false, true, false),
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for _, test := range tests {
|
||||
if rv := IsRFC1918(&test.in); rv != test.rfc1918 {
|
||||
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc1918)
|
||||
}
|
||||
|
||||
if rv := IsRFC3849(&test.in); rv != test.rfc3849 {
|
||||
t.Errorf("IsRFC3849 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3849)
|
||||
}
|
||||
|
||||
if rv := IsRFC3927(&test.in); rv != test.rfc3927 {
|
||||
t.Errorf("IsRFC3927 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3927)
|
||||
}
|
||||
|
||||
if rv := IsRFC3964(&test.in); rv != test.rfc3964 {
|
||||
t.Errorf("IsRFC3964 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3964)
|
||||
}
|
||||
|
||||
if rv := IsRFC4193(&test.in); rv != test.rfc4193 {
|
||||
t.Errorf("IsRFC4193 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4193)
|
||||
}
|
||||
|
||||
if rv := IsRFC4380(&test.in); rv != test.rfc4380 {
|
||||
t.Errorf("IsRFC4380 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4380)
|
||||
}
|
||||
|
||||
if rv := IsRFC4843(&test.in); rv != test.rfc4843 {
|
||||
t.Errorf("IsRFC4843 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4843)
|
||||
}
|
||||
|
||||
if rv := IsRFC4862(&test.in); rv != test.rfc4862 {
|
||||
t.Errorf("IsRFC4862 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4862)
|
||||
}
|
||||
|
||||
if rv := IsRFC6052(&test.in); rv != test.rfc6052 {
|
||||
t.Errorf("isRFC6052 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6052)
|
||||
}
|
||||
|
||||
if rv := IsRFC6145(&test.in); rv != test.rfc6145 {
|
||||
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6145)
|
||||
}
|
||||
|
||||
if rv := IsLocal(&test.in); rv != test.local {
|
||||
t.Errorf("IsLocal %s\n got: %v want: %v", test.in.IP, rv, test.local)
|
||||
}
|
||||
|
||||
if rv := IsValid(&test.in); rv != test.valid {
|
||||
t.Errorf("IsValid %s\n got: %v want: %v", test.in.IP, rv, test.valid)
|
||||
}
|
||||
|
||||
if rv := amgr.IsRoutable(&test.in); rv != test.routable {
|
||||
t.Errorf("IsRoutable %s\n got: %v want: %v", test.in.IP, rv, test.routable)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestGroupKey tests the GroupKey function to ensure it properly groups various
|
||||
// IP addresses.
|
||||
func TestGroupKey(t *testing.T) {
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
|
||||
defer teardown()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ip string
|
||||
expected string
|
||||
}{
|
||||
// Local addresses.
|
||||
{name: "ipv4 localhost", ip: "127.0.0.1", expected: "local"},
|
||||
{name: "ipv6 localhost", ip: "::1", expected: "local"},
|
||||
{name: "ipv4 zero", ip: "0.0.0.0", expected: "local"},
|
||||
{name: "ipv4 first octet zero", ip: "0.1.2.3", expected: "local"},
|
||||
|
||||
// Unroutable addresses.
|
||||
{name: "ipv4 invalid bcast", ip: "255.255.255.255", expected: "unroutable"},
|
||||
{name: "ipv4 rfc1918 10/8", ip: "10.1.2.3", expected: "unroutable"},
|
||||
{name: "ipv4 rfc1918 172.16/12", ip: "172.16.1.2", expected: "unroutable"},
|
||||
{name: "ipv4 rfc1918 192.168/16", ip: "192.168.1.2", expected: "unroutable"},
|
||||
{name: "ipv6 rfc3849 2001:db8::/32", ip: "2001:db8::1234", expected: "unroutable"},
|
||||
{name: "ipv4 rfc3927 169.254/16", ip: "169.254.1.2", expected: "unroutable"},
|
||||
{name: "ipv6 rfc4193 fc00::/7", ip: "fc00::1234", expected: "unroutable"},
|
||||
{name: "ipv6 rfc4843 2001:10::/28", ip: "2001:10::1234", expected: "unroutable"},
|
||||
{name: "ipv6 rfc4862 fe80::/64", ip: "fe80::1234", expected: "unroutable"},
|
||||
|
||||
// IPv4 normal.
|
||||
{name: "ipv4 normal class a", ip: "12.1.2.3", expected: "12.1.0.0"},
|
||||
{name: "ipv4 normal class b", ip: "173.1.2.3", expected: "173.1.0.0"},
|
||||
{name: "ipv4 normal class c", ip: "196.1.2.3", expected: "196.1.0.0"},
|
||||
|
||||
// IPv6/IPv4 translations.
|
||||
{name: "ipv6 rfc3964 with ipv4 encap", ip: "2002:0c01:0203::", expected: "12.1.0.0"},
|
||||
{name: "ipv6 rfc4380 toredo ipv4", ip: "2001:0:1234::f3fe:fdfc", expected: "12.1.0.0"},
|
||||
{name: "ipv6 rfc6052 well-known prefix with ipv4", ip: "64:ff9b::0c01:0203", expected: "12.1.0.0"},
|
||||
{name: "ipv6 rfc6145 translated ipv4", ip: "::ffff:0:0c01:0203", expected: "12.1.0.0"},
|
||||
|
||||
// Tor.
|
||||
{name: "ipv6 tor onioncat", ip: "fd87:d87e:eb43:1234::5678", expected: "unroutable"},
|
||||
{name: "ipv6 tor onioncat 2", ip: "fd87:d87e:eb43:1245::6789", expected: "unroutable"},
|
||||
{name: "ipv6 tor onioncat 3", ip: "fd87:d87e:eb43:1345::6789", expected: "unroutable"},
|
||||
|
||||
// IPv6 normal.
|
||||
{name: "ipv6 normal", ip: "2602:100::1", expected: "2602:100::"},
|
||||
{name: "ipv6 normal 2", ip: "2602:0100::1234", expected: "2602:100::"},
|
||||
{name: "ipv6 hurricane electric", ip: "2001:470:1f10:a1::2", expected: "2001:470:1000::"},
|
||||
{name: "ipv6 hurricane electric 2", ip: "2001:0470:1f10:a1::2", expected: "2001:470:1000::"},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
nip := net.ParseIP(test.ip)
|
||||
na := *domainmessage.NewNetAddressIPPort(nip, 8333, domainmessage.SFNodeNetwork)
|
||||
if key := amgr.GroupKey(&na); key != test.expected {
|
||||
t.Errorf("TestGroupKey #%d (%s): unexpected group key "+
|
||||
"- got '%s', want '%s'", i, test.name,
|
||||
key, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
1351
addrmgr/addrmanager.go
Normal file
1351
addrmgr/addrmanager.go
Normal file
File diff suppressed because it is too large
Load Diff
123
addrmgr/addrmanager_test.go
Normal file
123
addrmgr/addrmanager_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// naTest is used to describe a test to be performed against the NetAddressKey
|
||||
// method.
|
||||
type naTest struct {
|
||||
in wire.NetAddress
|
||||
want string
|
||||
}
|
||||
|
||||
// naTests houses all of the tests to be performed against the NetAddressKey
|
||||
// method.
|
||||
var naTests = make([]naTest, 0)
|
||||
|
||||
// Put some IP in here for convenience. Points to google.
|
||||
var someIP = "173.194.115.66"
|
||||
|
||||
// addNaTests
|
||||
func addNaTests() {
|
||||
// IPv4
|
||||
// Localhost
|
||||
addNaTest("127.0.0.1", 16111, "127.0.0.1:16111")
|
||||
addNaTest("127.0.0.1", 16110, "127.0.0.1:16110")
|
||||
|
||||
// Class A
|
||||
addNaTest("1.0.0.1", 16111, "1.0.0.1:16111")
|
||||
addNaTest("2.2.2.2", 16110, "2.2.2.2:16110")
|
||||
addNaTest("27.253.252.251", 8335, "27.253.252.251:8335")
|
||||
addNaTest("123.3.2.1", 8336, "123.3.2.1:8336")
|
||||
|
||||
// Private Class A
|
||||
addNaTest("10.0.0.1", 16111, "10.0.0.1:16111")
|
||||
addNaTest("10.1.1.1", 16110, "10.1.1.1:16110")
|
||||
addNaTest("10.2.2.2", 8335, "10.2.2.2:8335")
|
||||
addNaTest("10.10.10.10", 8336, "10.10.10.10:8336")
|
||||
|
||||
// Class B
|
||||
addNaTest("128.0.0.1", 16111, "128.0.0.1:16111")
|
||||
addNaTest("129.1.1.1", 16110, "129.1.1.1:16110")
|
||||
addNaTest("180.2.2.2", 8335, "180.2.2.2:8335")
|
||||
addNaTest("191.10.10.10", 8336, "191.10.10.10:8336")
|
||||
|
||||
// Private Class B
|
||||
addNaTest("172.16.0.1", 16111, "172.16.0.1:16111")
|
||||
addNaTest("172.16.1.1", 16110, "172.16.1.1:16110")
|
||||
addNaTest("172.16.2.2", 8335, "172.16.2.2:8335")
|
||||
addNaTest("172.16.172.172", 8336, "172.16.172.172:8336")
|
||||
|
||||
// Class C
|
||||
addNaTest("193.0.0.1", 16111, "193.0.0.1:16111")
|
||||
addNaTest("200.1.1.1", 16110, "200.1.1.1:16110")
|
||||
addNaTest("205.2.2.2", 8335, "205.2.2.2:8335")
|
||||
addNaTest("223.10.10.10", 8336, "223.10.10.10:8336")
|
||||
|
||||
// Private Class C
|
||||
addNaTest("192.168.0.1", 16111, "192.168.0.1:16111")
|
||||
addNaTest("192.168.1.1", 16110, "192.168.1.1:16110")
|
||||
addNaTest("192.168.2.2", 8335, "192.168.2.2:8335")
|
||||
addNaTest("192.168.192.192", 8336, "192.168.192.192:8336")
|
||||
|
||||
// IPv6
|
||||
// Localhost
|
||||
addNaTest("::1", 16111, "[::1]:16111")
|
||||
addNaTest("fe80::1", 16110, "[fe80::1]:16110")
|
||||
|
||||
// Link-local
|
||||
addNaTest("fe80::1:1", 16111, "[fe80::1:1]:16111")
|
||||
addNaTest("fe91::2:2", 16110, "[fe91::2:2]:16110")
|
||||
addNaTest("fea2::3:3", 8335, "[fea2::3:3]:8335")
|
||||
addNaTest("feb3::4:4", 8336, "[feb3::4:4]:8336")
|
||||
|
||||
// Site-local
|
||||
addNaTest("fec0::1:1", 16111, "[fec0::1:1]:16111")
|
||||
addNaTest("fed1::2:2", 16110, "[fed1::2:2]:16110")
|
||||
addNaTest("fee2::3:3", 8335, "[fee2::3:3]:8335")
|
||||
addNaTest("fef3::4:4", 8336, "[fef3::4:4]:8336")
|
||||
}
|
||||
|
||||
func addNaTest(ip string, port uint16, want string) {
|
||||
nip := net.ParseIP(ip)
|
||||
na := *wire.NewNetAddressIPPort(nip, port, wire.SFNodeNetwork)
|
||||
test := naTest{na, want}
|
||||
naTests = append(naTests, test)
|
||||
}
|
||||
|
||||
func lookupFunc(host string) ([]net.IP, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func TestStartStop(t *testing.T) {
|
||||
n := New("teststartstop", lookupFunc, nil)
|
||||
n.Start()
|
||||
err := n.Stop()
|
||||
if err != nil {
|
||||
t.Fatalf("Address Manager failed to stop: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetAddressKey(t *testing.T) {
|
||||
addNaTests()
|
||||
|
||||
t.Logf("Running %d tests", len(naTests))
|
||||
for i, test := range naTests {
|
||||
key := NetAddressKey(&test.in)
|
||||
if key != test.want {
|
||||
t.Errorf("NetAddressKey #%d\n got: %s want: %s", i, key, test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Package addressmanager implements concurrency safe Kaspa address manager.
|
||||
Package addrmgr implements concurrency safe Kaspa address manager.
|
||||
|
||||
Address Manager Overview
|
||||
|
||||
@@ -31,4 +31,4 @@ peers which no longer appear to be good peers as well as bias the selection
|
||||
toward known good peers. The general idea is to make a best effort at only
|
||||
providing usable addresses.
|
||||
*/
|
||||
package addressmanager
|
||||
package addrmgr
|
||||
25
addrmgr/internal_test.go
Normal file
25
addrmgr/internal_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func TstKnownAddressIsBad(ka *KnownAddress) bool {
|
||||
return ka.isBad()
|
||||
}
|
||||
|
||||
func TstKnownAddressChance(ka *KnownAddress) float64 {
|
||||
return ka.chance()
|
||||
}
|
||||
|
||||
func TstNewKnownAddress(na *wire.NetAddress, attempts int,
|
||||
lastattempt, lastsuccess time.Time, tried bool, refs int) *KnownAddress {
|
||||
return &KnownAddress{na: na, attempts: attempts, lastattempt: lastattempt,
|
||||
lastsuccess: lastsuccess, tried: tried, refs: refs}
|
||||
}
|
||||
@@ -2,36 +2,33 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addressmanager
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// KnownAddress tracks information about a known network address that is used
|
||||
// to determine how viable an address is.
|
||||
type KnownAddress struct {
|
||||
netAddress *domainmessage.NetAddress
|
||||
sourceAddress *domainmessage.NetAddress
|
||||
attempts int
|
||||
lastAttempt mstime.Time
|
||||
lastSuccess mstime.Time
|
||||
tried bool
|
||||
referenceCount int // reference count of new buckets
|
||||
subnetworkID *subnetworkid.SubnetworkID
|
||||
isBanned bool
|
||||
bannedTime mstime.Time
|
||||
na *wire.NetAddress
|
||||
srcAddr *wire.NetAddress
|
||||
attempts int
|
||||
lastattempt time.Time
|
||||
lastsuccess time.Time
|
||||
tried bool
|
||||
refs int // reference count of new buckets
|
||||
subnetworkID *subnetworkid.SubnetworkID
|
||||
}
|
||||
|
||||
// NetAddress returns the underlying domainmessage.NetAddress associated with the
|
||||
// NetAddress returns the underlying wire.NetAddress associated with the
|
||||
// known address.
|
||||
func (ka *KnownAddress) NetAddress() *domainmessage.NetAddress {
|
||||
return ka.netAddress
|
||||
func (ka *KnownAddress) NetAddress() *wire.NetAddress {
|
||||
return ka.na
|
||||
}
|
||||
|
||||
// SubnetworkID returns the subnetwork ID of the known address.
|
||||
@@ -40,16 +37,16 @@ func (ka *KnownAddress) SubnetworkID() *subnetworkid.SubnetworkID {
|
||||
}
|
||||
|
||||
// LastAttempt returns the last time the known address was attempted.
|
||||
func (ka *KnownAddress) LastAttempt() mstime.Time {
|
||||
return ka.lastAttempt
|
||||
func (ka *KnownAddress) LastAttempt() time.Time {
|
||||
return ka.lastattempt
|
||||
}
|
||||
|
||||
// chance returns the selection probability for a known address. The priority
|
||||
// depends upon how recently the address has been seen, how recently it was last
|
||||
// attempted and how often attempts to connect to it have failed.
|
||||
func (ka *KnownAddress) chance() float64 {
|
||||
now := mstime.Now()
|
||||
lastAttempt := now.Sub(ka.lastAttempt)
|
||||
now := time.Now()
|
||||
lastAttempt := now.Sub(ka.lastattempt)
|
||||
|
||||
if lastAttempt < 0 {
|
||||
lastAttempt = 0
|
||||
@@ -79,27 +76,27 @@ func (ka *KnownAddress) chance() float64 {
|
||||
// All addresses that meet these criteria are assumed to be worthless and not
|
||||
// worth keeping hold of.
|
||||
func (ka *KnownAddress) isBad() bool {
|
||||
if ka.lastAttempt.After(mstime.Now().Add(-1 * time.Minute)) {
|
||||
if ka.lastattempt.After(time.Now().Add(-1 * time.Minute)) {
|
||||
return false
|
||||
}
|
||||
|
||||
// From the future?
|
||||
if ka.netAddress.Timestamp.After(mstime.Now().Add(10 * time.Minute)) {
|
||||
if ka.na.Timestamp.After(time.Now().Add(10 * time.Minute)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Over a month old?
|
||||
if ka.netAddress.Timestamp.Before(mstime.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
|
||||
if ka.na.Timestamp.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Never succeeded?
|
||||
if ka.lastSuccess.IsZero() && ka.attempts >= numRetries {
|
||||
if ka.lastsuccess.IsZero() && ka.attempts >= numRetries {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hasn't succeeded in too long?
|
||||
if !ka.lastSuccess.After(mstime.Now().Add(-1*minBadDays*time.Hour*24)) &&
|
||||
if !ka.lastsuccess.After(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
|
||||
ka.attempts >= maxFailures {
|
||||
return true
|
||||
}
|
||||
114
addrmgr/knownaddress_test.go
Normal file
114
addrmgr/knownaddress_test.go
Normal file
@@ -0,0 +1,114 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/addrmgr"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func TestChance(t *testing.T) {
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
var tests = []struct {
|
||||
addr *addrmgr.KnownAddress
|
||||
expected float64
|
||||
}{
|
||||
{
|
||||
//Test normal case
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastseen < 0
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
|
||||
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastattempt < 0
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(30*time.Minute), time.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case in which lastattempt < ten minutes
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(-5*time.Minute), time.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case with several failed attempts.
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
2, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1 / 1.5 / 1.5,
|
||||
},
|
||||
}
|
||||
|
||||
err := .0001
|
||||
for i, test := range tests {
|
||||
chance := addrmgr.TstKnownAddressChance(test.addr)
|
||||
if math.Abs(test.expected-chance) >= err {
|
||||
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBad(t *testing.T) {
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
future := now.Add(35 * time.Minute)
|
||||
monthOld := now.Add(-43 * time.Hour * 24)
|
||||
secondsOld := now.Add(-2 * time.Second)
|
||||
minutesOld := now.Add(-27 * time.Minute)
|
||||
hoursOld := now.Add(-5 * time.Hour)
|
||||
zeroTime := time.Time{}
|
||||
|
||||
futureNa := &wire.NetAddress{Timestamp: future}
|
||||
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
|
||||
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
|
||||
currentNa := &wire.NetAddress{Timestamp: secondsOld}
|
||||
|
||||
//Test addresses that have been tried in the last minute.
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
|
||||
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
|
||||
//Test address that claims to be from the future.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
|
||||
}
|
||||
|
||||
//Test address that has not been seen in over a month.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 7: addresses more than a month old are bad.")
|
||||
}
|
||||
|
||||
//It has failed at least three times and never succeeded.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
|
||||
t.Errorf("test case 8: addresses that have never succeeded are bad.")
|
||||
}
|
||||
|
||||
//It has failed ten times in the last week
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
|
||||
}
|
||||
|
||||
//Test an address that should work.
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 10: This should be a valid address.")
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addressmanager
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
@@ -10,4 +10,4 @@ import (
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.ADXR)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
var spawn = panics.GoroutineWrapperFuncWithPanicHandler(log)
|
||||
@@ -2,12 +2,14 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addressmanager
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -86,19 +88,19 @@ func ipNet(ip string, ones, bits int) net.IPNet {
|
||||
}
|
||||
|
||||
// IsIPv4 returns whether or not the given address is an IPv4 address.
|
||||
func IsIPv4(na *domainmessage.NetAddress) bool {
|
||||
func IsIPv4(na *wire.NetAddress) bool {
|
||||
return na.IP.To4() != nil
|
||||
}
|
||||
|
||||
// IsLocal returns whether or not the given address is a local address.
|
||||
func IsLocal(na *domainmessage.NetAddress) bool {
|
||||
func IsLocal(na *wire.NetAddress) bool {
|
||||
return na.IP.IsLoopback() || zero4Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC1918 returns whether or not the passed address is part of the IPv4
|
||||
// private network address space as defined by RFC1918 (10.0.0.0/8,
|
||||
// 172.16.0.0/12, or 192.168.0.0/16).
|
||||
func IsRFC1918(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC1918(na *wire.NetAddress) bool {
|
||||
for _, rfc := range rfc1918Nets {
|
||||
if rfc.Contains(na.IP) {
|
||||
return true
|
||||
@@ -109,56 +111,56 @@ func IsRFC1918(na *domainmessage.NetAddress) bool {
|
||||
|
||||
// IsRFC2544 returns whether or not the passed address is part of the IPv4
|
||||
// address space as defined by RFC2544 (198.18.0.0/15)
|
||||
func IsRFC2544(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC2544(na *wire.NetAddress) bool {
|
||||
return rfc2544Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC3849 returns whether or not the passed address is part of the IPv6
|
||||
// documentation range as defined by RFC3849 (2001:DB8::/32).
|
||||
func IsRFC3849(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC3849(na *wire.NetAddress) bool {
|
||||
return rfc3849Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC3927 returns whether or not the passed address is part of the IPv4
|
||||
// autoconfiguration range as defined by RFC3927 (169.254.0.0/16).
|
||||
func IsRFC3927(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC3927(na *wire.NetAddress) bool {
|
||||
return rfc3927Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC3964 returns whether or not the passed address is part of the IPv6 to
|
||||
// IPv4 encapsulation range as defined by RFC3964 (2002::/16).
|
||||
func IsRFC3964(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC3964(na *wire.NetAddress) bool {
|
||||
return rfc3964Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC4193 returns whether or not the passed address is part of the IPv6
|
||||
// unique local range as defined by RFC4193 (FC00::/7).
|
||||
func IsRFC4193(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC4193(na *wire.NetAddress) bool {
|
||||
return rfc4193Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC4380 returns whether or not the passed address is part of the IPv6
|
||||
// teredo tunneling over UDP range as defined by RFC4380 (2001::/32).
|
||||
func IsRFC4380(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC4380(na *wire.NetAddress) bool {
|
||||
return rfc4380Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC4843 returns whether or not the passed address is part of the IPv6
|
||||
// ORCHID range as defined by RFC4843 (2001:10::/28).
|
||||
func IsRFC4843(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC4843(na *wire.NetAddress) bool {
|
||||
return rfc4843Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC4862 returns whether or not the passed address is part of the IPv6
|
||||
// stateless address autoconfiguration range as defined by RFC4862 (FE80::/64).
|
||||
func IsRFC4862(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC4862(na *wire.NetAddress) bool {
|
||||
return rfc4862Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC5737 returns whether or not the passed address is part of the IPv4
|
||||
// documentation address space as defined by RFC5737 (192.0.2.0/24,
|
||||
// 198.51.100.0/24, 203.0.113.0/24)
|
||||
func IsRFC5737(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC5737(na *wire.NetAddress) bool {
|
||||
for _, rfc := range rfc5737Net {
|
||||
if rfc.Contains(na.IP) {
|
||||
return true
|
||||
@@ -170,19 +172,19 @@ func IsRFC5737(na *domainmessage.NetAddress) bool {
|
||||
|
||||
// IsRFC6052 returns whether or not the passed address is part of the IPv6
|
||||
// well-known prefix range as defined by RFC6052 (64:FF9B::/96).
|
||||
func IsRFC6052(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC6052(na *wire.NetAddress) bool {
|
||||
return rfc6052Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC6145 returns whether or not the passed address is part of the IPv6 to
|
||||
// IPv4 translated address range as defined by RFC6145 (::FFFF:0:0:0/96).
|
||||
func IsRFC6145(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC6145(na *wire.NetAddress) bool {
|
||||
return rfc6145Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC6598 returns whether or not the passed address is part of the IPv4
|
||||
// shared address space specified by RFC6598 (100.64.0.0/10)
|
||||
func IsRFC6598(na *domainmessage.NetAddress) bool {
|
||||
func IsRFC6598(na *wire.NetAddress) bool {
|
||||
return rfc6598Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
@@ -190,7 +192,7 @@ func IsRFC6598(na *domainmessage.NetAddress) bool {
|
||||
// considered invalid under the following circumstances:
|
||||
// IPv4: It is either a zero or all bits set address.
|
||||
// IPv6: It is either a zero or RFC3849 documentation address.
|
||||
func IsValid(na *domainmessage.NetAddress) bool {
|
||||
func IsValid(na *wire.NetAddress) bool {
|
||||
// IsUnspecified returns if address is 0, so only all bits set, and
|
||||
// RFC3849 need to be explicitly checked.
|
||||
return na.IP != nil && !(na.IP.IsUnspecified() ||
|
||||
@@ -200,8 +202,8 @@ func IsValid(na *domainmessage.NetAddress) bool {
|
||||
// IsRoutable returns whether or not the passed address is routable over
|
||||
// the public internet. This is true as long as the address is valid and is not
|
||||
// in any reserved ranges.
|
||||
func (am *AddressManager) IsRoutable(na *domainmessage.NetAddress) bool {
|
||||
if am.cfg.NetParams().AcceptUnroutable {
|
||||
func IsRoutable(na *wire.NetAddress) bool {
|
||||
if config.ActiveConfig().NetParams().AcceptUnroutable {
|
||||
return !IsLocal(na)
|
||||
}
|
||||
|
||||
@@ -215,11 +217,11 @@ func (am *AddressManager) IsRoutable(na *domainmessage.NetAddress) bool {
|
||||
// of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
|
||||
// "local" for a local address, and the string "unroutable" for an unroutable
|
||||
// address.
|
||||
func (am *AddressManager) GroupKey(na *domainmessage.NetAddress) string {
|
||||
func GroupKey(na *wire.NetAddress) string {
|
||||
if IsLocal(na) {
|
||||
return "local"
|
||||
}
|
||||
if !am.IsRoutable(na) {
|
||||
if !IsRoutable(na) {
|
||||
return "unroutable"
|
||||
}
|
||||
if IsIPv4(na) {
|
||||
249
app/app.go
249
app/app.go
@@ -1,249 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/addressmanager"
|
||||
|
||||
"github.com/kaspanet/kaspad/netadapter/id"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/blockdag/indexers"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/connmanager"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/dnsseed"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/mempool"
|
||||
"github.com/kaspanet/kaspad/mining"
|
||||
"github.com/kaspanet/kaspad/netadapter"
|
||||
"github.com/kaspanet/kaspad/protocol"
|
||||
"github.com/kaspanet/kaspad/rpc"
|
||||
"github.com/kaspanet/kaspad/signal"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
// App is a wrapper for all the kaspad services
|
||||
type App struct {
|
||||
cfg *config.Config
|
||||
rpcServer *rpc.Server
|
||||
addressManager *addressmanager.AddressManager
|
||||
protocolManager *protocol.Manager
|
||||
connectionManager *connmanager.ConnectionManager
|
||||
netAdapter *netadapter.NetAdapter
|
||||
|
||||
started, shutdown int32
|
||||
}
|
||||
|
||||
// Start launches all the kaspad services.
|
||||
func (a *App) Start() {
|
||||
// Already started?
|
||||
if atomic.AddInt32(&a.started, 1) != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace("Starting kaspad")
|
||||
|
||||
err := a.protocolManager.Start()
|
||||
if err != nil {
|
||||
panics.Exit(log, fmt.Sprintf("Error starting the p2p protocol: %+v", err))
|
||||
}
|
||||
|
||||
a.maybeSeedFromDNS()
|
||||
|
||||
a.connectionManager.Start()
|
||||
|
||||
if !a.cfg.DisableRPC {
|
||||
a.rpcServer.Start()
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down all the kaspad services.
|
||||
func (a *App) Stop() error {
|
||||
// Make sure this only happens once.
|
||||
if atomic.AddInt32(&a.shutdown, 1) != 1 {
|
||||
log.Infof("Kaspad is already in the process of shutting down")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Warnf("Kaspad shutting down")
|
||||
|
||||
a.connectionManager.Stop()
|
||||
|
||||
err := a.protocolManager.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping the p2p protocol: %+v", err)
|
||||
}
|
||||
|
||||
// Shutdown the RPC server if it's not disabled.
|
||||
if !a.cfg.DisableRPC {
|
||||
err := a.rpcServer.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping rpcServer: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New returns a new App instance configured to listen on addr for the
|
||||
// kaspa network type specified by dagParams. Use start to begin accepting
|
||||
// connections from peers.
|
||||
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{}) (*App, error) {
|
||||
indexManager, acceptanceIndex := setupIndexes(cfg)
|
||||
|
||||
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
|
||||
|
||||
// Create a new block DAG instance with the appropriate configuration.
|
||||
dag, err := setupDAG(cfg, databaseContext, interrupt, sigCache, indexManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txMempool := setupMempool(cfg, dag, sigCache)
|
||||
|
||||
netAdapter, err := netadapter.NewNetAdapter(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addressManager := addressmanager.New(cfg, databaseContext)
|
||||
|
||||
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcServer, err := setupRPC(
|
||||
cfg, dag, txMempool, sigCache, acceptanceIndex, connectionManager, addressManager, protocolManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &App{
|
||||
cfg: cfg,
|
||||
rpcServer: rpcServer,
|
||||
protocolManager: protocolManager,
|
||||
connectionManager: connectionManager,
|
||||
netAdapter: netAdapter,
|
||||
addressManager: addressManager,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *App) maybeSeedFromDNS() {
|
||||
if !a.cfg.DisableDNSSeed {
|
||||
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, domainmessage.SFNodeNetwork, false, nil,
|
||||
a.cfg.Lookup, func(addresses []*domainmessage.NetAddress) {
|
||||
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
|
||||
// IPs of nodes and not its own IP, we can not know real IP of
|
||||
// source. So we'll take first returned address as source.
|
||||
a.addressManager.AddAddresses(addresses, addresses[0], nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{},
|
||||
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
|
||||
|
||||
dag, err := blockdag.New(&blockdag.Config{
|
||||
Interrupt: interrupt,
|
||||
DatabaseContext: databaseContext,
|
||||
DAGParams: cfg.NetParams(),
|
||||
TimeSource: blockdag.NewTimeSource(),
|
||||
SigCache: sigCache,
|
||||
IndexManager: indexManager,
|
||||
SubnetworkID: cfg.SubnetworkID,
|
||||
})
|
||||
return dag, err
|
||||
}
|
||||
|
||||
func setupIndexes(cfg *config.Config) (blockdag.IndexManager, *indexers.AcceptanceIndex) {
|
||||
// Create indexes if needed.
|
||||
var indexes []indexers.Indexer
|
||||
var acceptanceIndex *indexers.AcceptanceIndex
|
||||
if cfg.AcceptanceIndex {
|
||||
log.Info("acceptance index is enabled")
|
||||
acceptanceIndex = indexers.NewAcceptanceIndex()
|
||||
indexes = append(indexes, acceptanceIndex)
|
||||
}
|
||||
|
||||
// Create an index manager if any of the optional indexes are enabled.
|
||||
if len(indexes) < 0 {
|
||||
return nil, nil
|
||||
}
|
||||
indexManager := indexers.NewManager(indexes)
|
||||
return indexManager, acceptanceIndex
|
||||
}
|
||||
|
||||
func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
|
||||
mempoolConfig := mempool.Config{
|
||||
Policy: mempool.Policy{
|
||||
AcceptNonStd: cfg.RelayNonStd,
|
||||
MaxOrphanTxs: cfg.MaxOrphanTxs,
|
||||
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
|
||||
MinRelayTxFee: cfg.MinRelayTxFee,
|
||||
MaxTxVersion: 1,
|
||||
},
|
||||
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
|
||||
return dag.CalcSequenceLockNoLock(tx, utxoSet, true)
|
||||
},
|
||||
IsDeploymentActive: dag.IsDeploymentActive,
|
||||
SigCache: sigCache,
|
||||
DAG: dag,
|
||||
}
|
||||
|
||||
return mempool.New(&mempoolConfig)
|
||||
}
|
||||
|
||||
func setupRPC(cfg *config.Config,
|
||||
dag *blockdag.BlockDAG,
|
||||
txMempool *mempool.TxPool,
|
||||
sigCache *txscript.SigCache,
|
||||
acceptanceIndex *indexers.AcceptanceIndex,
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
protocolManager *protocol.Manager) (*rpc.Server, error) {
|
||||
|
||||
if !cfg.DisableRPC {
|
||||
policy := mining.Policy{
|
||||
BlockMaxMass: cfg.BlockMaxMass,
|
||||
}
|
||||
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, txMempool, dag, sigCache)
|
||||
|
||||
rpcServer, err := rpc.NewRPCServer(cfg, dag, txMempool, acceptanceIndex, blockTemplateGenerator,
|
||||
connectionManager, addressManager, protocolManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Signal process shutdown when the RPC server requests it.
|
||||
spawn("setupRPC-handleShutdownRequest", func() {
|
||||
<-rpcServer.RequestedProcessShutdown()
|
||||
signal.ShutdownRequestChannel <- struct{}{}
|
||||
})
|
||||
|
||||
return rpcServer, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// P2PNodeID returns the network ID associated with this App
|
||||
func (a *App) P2PNodeID() *id.ID {
|
||||
return a.netAdapter.ID()
|
||||
}
|
||||
|
||||
// AddressManager returns the AddressManager associated with this App
|
||||
func (a *App) AddressManager() *addressmanager.AddressManager {
|
||||
return a.addressManager
|
||||
}
|
||||
|
||||
// WaitForShutdown blocks until the main listener and peer handlers are stopped.
|
||||
func (a *App) WaitForShutdown() {
|
||||
// TODO(libp2p)
|
||||
// a.p2pServer.WaitForShutdown()
|
||||
}
|
||||
14
app/log.go
14
app/log.go
@@ -1,14 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Copyright (c) 2017 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KASD)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
@@ -1,7 +1,7 @@
|
||||
blockchain
|
||||
==========
|
||||
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad/blockchain)
|
||||
|
||||
Package blockdag implements Kaspa block handling, organization of the blockDAG,
|
||||
|
||||
@@ -6,28 +6,16 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error {
|
||||
blockHeader := &block.MsgBlock().Header
|
||||
newNode, _ := dag.newBlockNode(blockHeader, newBlockSet())
|
||||
newNode, _ := dag.newBlockNode(blockHeader, newSet())
|
||||
newNode.status = statusInvalidAncestor
|
||||
dag.index.AddNode(newNode)
|
||||
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.index.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbTx.Commit()
|
||||
return dag.index.flushToDB()
|
||||
}
|
||||
|
||||
// maybeAcceptBlock potentially accepts a block into the block DAG. It
|
||||
@@ -42,8 +30,7 @@ func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error
|
||||
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
|
||||
parents, err := lookupParentNodes(block, dag)
|
||||
if err != nil {
|
||||
var ruleErr RuleError
|
||||
if ok := errors.As(err, &ruleErr); ok && ruleErr.ErrorCode == ErrInvalidAncestorBlock {
|
||||
if rErr, ok := err.(RuleError); ok && rErr.ErrorCode == ErrInvalidAncestorBlock {
|
||||
err := dag.addNodeToIndexWithInvalidAncestor(block)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -73,26 +60,13 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
// expensive connection logic. It also has some other nice properties
|
||||
// such as making blocks that never become part of the DAG or
|
||||
// blocks that fail to connect available for further analysis.
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
blockExists, err := dbaccess.HasBlock(dbTx, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockExists {
|
||||
err := storeBlock(dbTx, block)
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
err := dbStoreBlock(dbTx, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = dag.index.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
return dag.index.flushToDBWithTx(dbTx)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -106,6 +80,8 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
}
|
||||
}
|
||||
|
||||
block.SetBlueScore(newNode.blueScore)
|
||||
|
||||
// Connect the passed block to the DAG. This also handles validation of the
|
||||
// transaction scripts.
|
||||
chainUpdates, err := dag.addBlock(newNode, block, selectedParentAnticone, flags)
|
||||
@@ -132,18 +108,18 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupParentNodes(block *util.Block, dag *BlockDAG) (blockSet, error) {
|
||||
func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) {
|
||||
header := block.MsgBlock().Header
|
||||
parentHashes := header.ParentHashes
|
||||
|
||||
nodes := newBlockSet()
|
||||
nodes := newSet()
|
||||
for _, parentHash := range parentHashes {
|
||||
node, ok := dag.index.LookupNode(parentHash)
|
||||
if !ok {
|
||||
str := fmt.Sprintf("parent block %s is unknown", parentHash)
|
||||
node := blockDAG.index.LookupNode(parentHash)
|
||||
if node == nil {
|
||||
str := fmt.Sprintf("parent block %s is unknown", parentHashes)
|
||||
return nil, ruleError(ErrParentBlockUnknown, str)
|
||||
} else if dag.index.NodeStatus(node).KnownInvalid() {
|
||||
str := fmt.Sprintf("parent block %s is known to be invalid", parentHash)
|
||||
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
|
||||
str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes)
|
||||
return nil, ruleError(ErrInvalidAncestorBlock, str)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
@@ -10,7 +9,7 @@ import (
|
||||
|
||||
func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -34,8 +33,8 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Expected: %s, got: <nil>", ErrParentBlockUnknown)
|
||||
}
|
||||
var ruleErr RuleError
|
||||
if ok := errors.As(err, &ruleErr); !ok {
|
||||
ruleErr, ok := err.(RuleError)
|
||||
if !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrParentBlockUnknown {
|
||||
@@ -63,10 +62,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
if isOrphan {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan")
|
||||
}
|
||||
blockNode1, ok := dag.index.LookupNode(block1.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", block1.Hash())
|
||||
}
|
||||
blockNode1 := dag.index.LookupNode(block1.Hash())
|
||||
dag.index.SetStatusFlags(blockNode1, statusValidateFailed)
|
||||
|
||||
block2 := blocks[2]
|
||||
@@ -75,7 +71,8 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Expected: %s, got: <nil>", ErrInvalidAncestorBlock)
|
||||
}
|
||||
if ok := errors.As(err, &ruleErr); !ok {
|
||||
ruleErr, ok = err.(RuleError)
|
||||
if !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrInvalidAncestorBlock {
|
||||
@@ -94,7 +91,8 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Expected: %s, got: <nil>", ErrUnexpectedDifficulty)
|
||||
}
|
||||
if ok := errors.As(err, &ruleErr); !ok {
|
||||
ruleErr, ok = err.(RuleError)
|
||||
if !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrUnexpectedDifficulty {
|
||||
|
||||
@@ -10,21 +10,21 @@ import (
|
||||
// TestBlockHeap tests pushing, popping, and determining the length of the heap.
|
||||
func TestBlockHeap(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockHeap", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockHeap", Config{
|
||||
DAGParams: &dagconfig.MainnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestBlockHeap: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block0Header := dagconfig.SimnetParams.GenesisBlock.Header
|
||||
block0, _ := dag.newBlockNode(&block0Header, newBlockSet())
|
||||
block0Header := dagconfig.MainnetParams.GenesisBlock.Header
|
||||
block0, _ := dag.newBlockNode(&block0Header, newSet())
|
||||
|
||||
block100000Header := Block100000.Header
|
||||
block100000, _ := dag.newBlockNode(&block100000Header, blockSetFromSlice(block0))
|
||||
block100000, _ := dag.newBlockNode(&block100000Header, setFromSlice(block0))
|
||||
|
||||
block0smallHash, _ := dag.newBlockNode(&block0Header, newBlockSet())
|
||||
block0smallHash, _ := dag.newBlockNode(&block0Header, newSet())
|
||||
block0smallHash.hash = &daghash.Hash{}
|
||||
|
||||
tests := []struct {
|
||||
|
||||
136
blockdag/blockidhash.go
Normal file
136
blockdag/blockidhash.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// idByHashIndexBucketName is the name of the db bucket used to house
|
||||
// the block hash -> block id index.
|
||||
idByHashIndexBucketName = []byte("idbyhashidx")
|
||||
|
||||
// hashByIDIndexBucketName is the name of the db bucket used to house
|
||||
// the block id -> block hash index.
|
||||
hashByIDIndexBucketName = []byte("hashbyididx")
|
||||
|
||||
currentBlockIDKey = []byte("currentblockid")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// This is a mapping between block hashes and unique IDs. The ID
|
||||
// is simply a sequentially incremented uint64 that is used instead of block hash
|
||||
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
|
||||
// hashes and thus saves a ton of space when a block is referenced in an index.
|
||||
// It consists of three buckets: the first bucket maps the hash of each
|
||||
// block to the unique ID and the second maps that ID back to the block hash.
|
||||
// The third bucket contains the last received block ID, and is used
|
||||
// when starting the node to check that the enabled indexes are up to date
|
||||
// with the latest received block, and if not, initiate recovery process.
|
||||
//
|
||||
// The serialized format for keys and values in the block hash to ID bucket is:
|
||||
// <hash> = <ID>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash 32 bytes
|
||||
// ID uint64 8 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the ID to block hash bucket is:
|
||||
// <ID> = <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// ID uint64 8 bytes
|
||||
// hash daghash.Hash 32 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const blockIDSize = 8 // 8 bytes for block ID
|
||||
|
||||
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
|
||||
// block id for the provided hash from the index.
|
||||
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
|
||||
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
|
||||
serializedID := hashIndex.Get(hash[:])
|
||||
if serializedID == nil {
|
||||
return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash)
|
||||
}
|
||||
|
||||
return DeserializeBlockID(serializedID), nil
|
||||
}
|
||||
|
||||
// DBFetchBlockHashBySerializedID uses an existing database transaction to
|
||||
// retrieve the hash for the provided serialized block id from the index.
|
||||
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
|
||||
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
|
||||
hashBytes := idIndex.Get(serializedID)
|
||||
if hashBytes == nil {
|
||||
return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
|
||||
}
|
||||
|
||||
var hash daghash.Hash
|
||||
copy(hash[:], hashBytes)
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
|
||||
// the index entries for the hash to id and id to hash mappings for the provided
|
||||
// values.
|
||||
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
|
||||
// Add the block hash to ID mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(idByHashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block ID to hash mapping to the index.
|
||||
idIndex := meta.Bucket(hashByIDIndexBucketName)
|
||||
return idIndex.Put(serializedID[:], hash[:])
|
||||
}
|
||||
|
||||
// DBFetchCurrentBlockID returns the last known block ID.
|
||||
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
|
||||
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
|
||||
if serializedID == nil {
|
||||
return 0
|
||||
}
|
||||
return DeserializeBlockID(serializedID)
|
||||
}
|
||||
|
||||
// DeserializeBlockID returns a deserialized block id
|
||||
func DeserializeBlockID(serializedID []byte) uint64 {
|
||||
return byteOrder.Uint64(serializedID)
|
||||
}
|
||||
|
||||
// SerializeBlockID returns a serialized block id
|
||||
func SerializeBlockID(blockID uint64) []byte {
|
||||
serializedBlockID := make([]byte, blockIDSize)
|
||||
byteOrder.PutUint64(serializedBlockID, blockID)
|
||||
return serializedBlockID
|
||||
}
|
||||
|
||||
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
|
||||
// hash for the provided block id from the index.
|
||||
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
|
||||
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
|
||||
}
|
||||
|
||||
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
|
||||
currentBlockID := DBFetchCurrentBlockID(dbTx)
|
||||
newBlockID := currentBlockID + 1
|
||||
serializedNewBlockID := SerializeBlockID(newBlockID)
|
||||
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return newBlockID, nil
|
||||
}
|
||||
@@ -5,10 +5,10 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
@@ -18,6 +18,7 @@ type blockIndex struct {
|
||||
// The following fields are set when the instance is created and can't
|
||||
// be changed afterwards, so there is no need to protect them with a
|
||||
// separate mutex.
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
|
||||
sync.RWMutex
|
||||
@@ -28,8 +29,9 @@ type blockIndex struct {
|
||||
// newBlockIndex returns a new empty instance of a block index. The index will
|
||||
// be dynamically populated as block nodes are loaded from the database and
|
||||
// manually added.
|
||||
func newBlockIndex(dagParams *dagconfig.Params) *blockIndex {
|
||||
func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
|
||||
return &blockIndex{
|
||||
db: db,
|
||||
dagParams: dagParams,
|
||||
index: make(map[daghash.Hash]*blockNode),
|
||||
dirty: make(map[*blockNode]struct{}),
|
||||
@@ -41,8 +43,8 @@ func newBlockIndex(dagParams *dagconfig.Params) *blockIndex {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool {
|
||||
bi.RLock()
|
||||
defer bi.RUnlock()
|
||||
_, hasBlock := bi.index[*hash]
|
||||
bi.RUnlock()
|
||||
return hasBlock
|
||||
}
|
||||
|
||||
@@ -50,11 +52,11 @@ func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool {
|
||||
// return nil if there is no entry for the hash.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) LookupNode(hash *daghash.Hash) (*blockNode, bool) {
|
||||
func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
|
||||
bi.RLock()
|
||||
defer bi.RUnlock()
|
||||
node, ok := bi.index[*hash]
|
||||
return node, ok
|
||||
node := bi.index[*hash]
|
||||
bi.RUnlock()
|
||||
return node
|
||||
}
|
||||
|
||||
// AddNode adds the provided node to the block index and marks it as dirty.
|
||||
@@ -63,9 +65,9 @@ func (bi *blockIndex) LookupNode(hash *daghash.Hash) (*blockNode, bool) {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) AddNode(node *blockNode) {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
bi.addNode(node)
|
||||
bi.dirty[node] = struct{}{}
|
||||
bi.Unlock()
|
||||
}
|
||||
|
||||
// addNode adds the provided node to the block index, but does not mark it as
|
||||
@@ -81,8 +83,8 @@ func (bi *blockIndex) addNode(node *blockNode) {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
|
||||
bi.RLock()
|
||||
defer bi.RUnlock()
|
||||
status := node.status
|
||||
bi.RUnlock()
|
||||
return status
|
||||
}
|
||||
|
||||
@@ -93,9 +95,9 @@ func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
node.status |= flags
|
||||
bi.dirty[node] = struct{}{}
|
||||
bi.Unlock()
|
||||
}
|
||||
|
||||
// UnsetStatusFlags flips the provided status flags on the block node to off,
|
||||
@@ -104,13 +106,22 @@ func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
node.status &^= flags
|
||||
bi.dirty[node] = struct{}{}
|
||||
bi.Unlock()
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty block nodes to the database.
|
||||
func (bi *blockIndex) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
// flushToDB writes all dirty block nodes to the database. If all writes
|
||||
// succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDB() error {
|
||||
return bi.db.Update(func(dbTx database.Tx) error {
|
||||
return bi.flushToDBWithTx(dbTx)
|
||||
})
|
||||
}
|
||||
|
||||
// flushToDBWithTx writes all dirty block nodes to the database. If all
|
||||
// writes succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
if len(bi.dirty) == 0 {
|
||||
@@ -118,19 +129,13 @@ func (bi *blockIndex) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
}
|
||||
|
||||
for node := range bi.dirty {
|
||||
serializedBlockNode, err := serializeBlockNode(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := blockIndexKey(node.hash, node.blueScore)
|
||||
err = dbaccess.StoreIndexBlock(dbContext, key, serializedBlockNode)
|
||||
err := dbStoreBlockNode(dbTx, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If write was successful, clear the dirty set.
|
||||
bi.dirty = make(map[*blockNode]struct{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bi *blockIndex) clearDirtyEntries() {
|
||||
bi.dirty = make(map[*blockNode]struct{})
|
||||
}
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
)
|
||||
|
||||
func TestAncestorErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -17,7 +18,7 @@ func TestAncestorErrors(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
node := newTestNode(dag, newBlockSet(), int32(0x10000000), 0, mstime.Now())
|
||||
node := newTestNode(dag, newSet(), int32(0x10000000), 0, time.Unix(0, 0))
|
||||
node.blueScore = 2
|
||||
ancestor := node.SelectedAncestor(3)
|
||||
if ancestor != nil {
|
||||
|
||||
@@ -29,15 +29,8 @@ func (dag *BlockDAG) BlockLocatorFromHashes(highHash, lowHash *daghash.Hash) (Bl
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
|
||||
highNode, ok := dag.index.LookupNode(highHash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("block %s is unknown", highHash)
|
||||
}
|
||||
|
||||
lowNode, ok := dag.index.LookupNode(lowHash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("block %s is unknown", lowHash)
|
||||
}
|
||||
highNode := dag.index.LookupNode(highHash)
|
||||
lowNode := dag.index.LookupNode(lowHash)
|
||||
|
||||
return dag.blockLocator(highNode, lowNode)
|
||||
}
|
||||
@@ -95,8 +88,8 @@ func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (highHash,
|
||||
lowNode := dag.genesis
|
||||
nextBlockLocatorIndex := int64(len(locator) - 1)
|
||||
for i, hash := range locator {
|
||||
node, ok := dag.index.LookupNode(hash)
|
||||
if ok {
|
||||
node := dag.index.LookupNode(hash)
|
||||
if node != nil {
|
||||
lowNode = node
|
||||
nextBlockLocatorIndex = int64(i) - 1
|
||||
break
|
||||
|
||||
@@ -6,14 +6,13 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// blockStatus is a bit field representing the validation state of the block.
|
||||
@@ -32,6 +31,11 @@ const (
|
||||
// statusInvalidAncestor indicates that one of the block's ancestors has
|
||||
// has failed validation, thus the block is also invalid.
|
||||
statusInvalidAncestor
|
||||
|
||||
// statusNone indicates that the block has no validation state flags set.
|
||||
//
|
||||
// NOTE: This must be defined last in order to avoid influencing iota.
|
||||
statusNone blockStatus = 0
|
||||
)
|
||||
|
||||
// KnownValid returns whether the block is known to be valid. This will return
|
||||
@@ -76,7 +80,7 @@ type blockNode struct {
|
||||
|
||||
// bluesAnticoneSizes is a map holding the set of blues affected by this block and their
|
||||
// modified blue anticone size.
|
||||
bluesAnticoneSizes map[*blockNode]dagconfig.KType
|
||||
bluesAnticoneSizes map[daghash.Hash]dagconfig.KType
|
||||
|
||||
// hash is the double sha 256 of the block.
|
||||
hash *daghash.Hash
|
||||
@@ -106,13 +110,13 @@ type blockNode struct {
|
||||
// anticone of its selected parent (parent with highest blue score).
|
||||
// selectedParentAnticone is used to update reachability data we store for future reachability queries.
|
||||
// This function is NOT safe for concurrent access.
|
||||
func (dag *BlockDAG) newBlockNode(blockHeader *domainmessage.BlockHeader, parents blockSet) (node *blockNode, selectedParentAnticone []*blockNode) {
|
||||
func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSet) (node *blockNode, selectedParentAnticone []*blockNode) {
|
||||
node = &blockNode{
|
||||
parents: parents,
|
||||
children: make(blockSet),
|
||||
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
|
||||
timestamp: dag.Now().UnixMilliseconds(),
|
||||
bluesAnticoneSizes: make(map[*blockNode]dagconfig.KType),
|
||||
timestamp: dag.AdjustedTime().Unix(),
|
||||
bluesAnticoneSizes: make(map[daghash.Hash]dagconfig.KType),
|
||||
}
|
||||
|
||||
// blockHeader is nil only for the virtual block
|
||||
@@ -121,7 +125,7 @@ func (dag *BlockDAG) newBlockNode(blockHeader *domainmessage.BlockHeader, parent
|
||||
node.version = blockHeader.Version
|
||||
node.bits = blockHeader.Bits
|
||||
node.nonce = blockHeader.Nonce
|
||||
node.timestamp = blockHeader.Timestamp.UnixMilliseconds()
|
||||
node.timestamp = blockHeader.Timestamp.Unix()
|
||||
node.hashMerkleRoot = blockHeader.HashMerkleRoot
|
||||
node.acceptedIDMerkleRoot = blockHeader.AcceptedIDMerkleRoot
|
||||
node.utxoCommitment = blockHeader.UTXOCommitment
|
||||
@@ -160,15 +164,15 @@ func (node *blockNode) less(other *blockNode) bool {
|
||||
// Header constructs a block header from the node and returns it.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) Header() *domainmessage.BlockHeader {
|
||||
func (node *blockNode) Header() *wire.BlockHeader {
|
||||
// No lock is needed because all accessed fields are immutable.
|
||||
return &domainmessage.BlockHeader{
|
||||
return &wire.BlockHeader{
|
||||
Version: node.version,
|
||||
ParentHashes: node.ParentHashes(),
|
||||
HashMerkleRoot: node.hashMerkleRoot,
|
||||
AcceptedIDMerkleRoot: node.acceptedIDMerkleRoot,
|
||||
UTXOCommitment: node.utxoCommitment,
|
||||
Timestamp: node.time(),
|
||||
Timestamp: time.Unix(node.timestamp, 0),
|
||||
Bits: node.bits,
|
||||
Nonce: node.nonce,
|
||||
}
|
||||
@@ -180,7 +184,7 @@ func (node *blockNode) Header() *domainmessage.BlockHeader {
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) SelectedAncestor(blueScore uint64) *blockNode {
|
||||
if blueScore > node.blueScore {
|
||||
if blueScore < 0 || blueScore > node.blueScore {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -205,13 +209,13 @@ func (node *blockNode) RelativeAncestor(distance uint64) *blockNode {
|
||||
// prior to, and including, the block node.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) PastMedianTime(dag *BlockDAG) mstime.Time {
|
||||
func (node *blockNode) PastMedianTime(dag *BlockDAG) time.Time {
|
||||
window := blueBlockWindow(node, 2*dag.TimestampDeviationTolerance-1)
|
||||
medianTimestamp, err := window.medianTimestamp()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("blueBlockWindow: %s", err))
|
||||
}
|
||||
return mstime.UnixMilliseconds(medianTimestamp)
|
||||
return time.Unix(medianTimestamp, 0)
|
||||
}
|
||||
|
||||
func (node *blockNode) ParentHashes() []*daghash.Hash {
|
||||
@@ -224,14 +228,10 @@ func (node *blockNode) isGenesis() bool {
|
||||
}
|
||||
|
||||
func (node *blockNode) finalityScore(dag *BlockDAG) uint64 {
|
||||
return node.blueScore / uint64(dag.FinalityInterval())
|
||||
return node.blueScore / uint64(dag.dagParams.FinalityInterval)
|
||||
}
|
||||
|
||||
// String returns a string that contains the block hash.
|
||||
func (node blockNode) String() string {
|
||||
return node.hash.String()
|
||||
}
|
||||
|
||||
func (node *blockNode) time() mstime.Time {
|
||||
return mstime.UnixMilliseconds(node.timestamp)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
// This test is to ensure the size BlueAnticoneSizesSize is serialized to the size of KType.
|
||||
// We verify that by serializing and deserializing the block while making sure that we stay within the expected range.
|
||||
func TestBlueAnticoneSizesSize(t *testing.T) {
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -25,17 +25,16 @@ func TestBlueAnticoneSizesSize(t *testing.T) {
|
||||
}
|
||||
|
||||
blockHeader := dagconfig.SimnetParams.GenesisBlock.Header
|
||||
node, _ := dag.newBlockNode(&blockHeader, newBlockSet())
|
||||
fakeBlue := &blockNode{hash: &daghash.Hash{1}}
|
||||
dag.index.AddNode(fakeBlue)
|
||||
node, _ := dag.newBlockNode(&blockHeader, newSet())
|
||||
hash := daghash.Hash{1}
|
||||
// Setting maxKType to maximum value of KType.
|
||||
// As we verify above that KType is unsigned we can be sure that maxKType is indeed the maximum value of KType.
|
||||
maxKType := ^dagconfig.KType(0)
|
||||
node.bluesAnticoneSizes[fakeBlue] = maxKType
|
||||
node.bluesAnticoneSizes[hash] = maxKType
|
||||
serializedNode, _ := serializeBlockNode(node)
|
||||
deserializedNode, _ := dag.deserializeBlockNode(serializedNode)
|
||||
if deserializedNode.bluesAnticoneSizes[fakeBlue] != maxKType {
|
||||
if deserializedNode.bluesAnticoneSizes[hash] != maxKType {
|
||||
t.Fatalf("TestBlueAnticoneSizesSize: BlueAnticoneSize should not change when deserializing. Expected: %v but got %v",
|
||||
maxKType, deserializedNode.bluesAnticoneSizes[fakeBlue])
|
||||
maxKType, deserializedNode.bluesAnticoneSizes[hash])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,14 +9,14 @@ import (
|
||||
// blockSet implements a basic unsorted set of blocks
|
||||
type blockSet map[*blockNode]struct{}
|
||||
|
||||
// newBlockSet creates a new, empty BlockSet
|
||||
func newBlockSet() blockSet {
|
||||
// newSet creates a new, empty BlockSet
|
||||
func newSet() blockSet {
|
||||
return map[*blockNode]struct{}{}
|
||||
}
|
||||
|
||||
// blockSetFromSlice converts a slice of blockNodes into an unordered set represented as map
|
||||
func blockSetFromSlice(nodes ...*blockNode) blockSet {
|
||||
set := newBlockSet()
|
||||
// setFromSlice converts a slice of blockNodes into an unordered set represented as map
|
||||
func setFromSlice(nodes ...*blockNode) blockSet {
|
||||
set := newSet()
|
||||
for _, node := range nodes {
|
||||
set.add(node)
|
||||
}
|
||||
@@ -36,7 +36,7 @@ func (bs blockSet) remove(node *blockNode) {
|
||||
|
||||
// clone clones thie block set
|
||||
func (bs blockSet) clone() blockSet {
|
||||
clone := newBlockSet()
|
||||
clone := newSet()
|
||||
for node := range bs {
|
||||
clone.add(node)
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func (bs blockSet) clone() blockSet {
|
||||
|
||||
// subtract returns the difference between the BlockSet and another BlockSet
|
||||
func (bs blockSet) subtract(other blockSet) blockSet {
|
||||
diff := newBlockSet()
|
||||
diff := newSet()
|
||||
for node := range bs {
|
||||
if !other.contains(node) {
|
||||
diff.add(node)
|
||||
@@ -102,6 +102,17 @@ func (bs blockSet) String() string {
|
||||
return strings.Join(nodeStrs, ",")
|
||||
}
|
||||
|
||||
// anyChildInSet returns true iff any child of node is contained within this set
|
||||
func (bs blockSet) anyChildInSet(node *blockNode) bool {
|
||||
for child := range node.children {
|
||||
if bs.contains(child) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (bs blockSet) bluest() *blockNode {
|
||||
var bluestNode *blockNode
|
||||
var maxScore uint64
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func TestHashes(t *testing.T) {
|
||||
bs := blockSetFromSlice(
|
||||
bs := setFromSlice(
|
||||
&blockNode{
|
||||
hash: &daghash.Hash{3},
|
||||
},
|
||||
@@ -49,33 +49,33 @@ func TestBlockSetSubtract(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "subtract an empty set",
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "subtract from empty set",
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "subtract unrelated set",
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(node2),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(node2),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node2, node3),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -101,33 +101,33 @@ func TestBlockSetAddSet(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "add an empty set",
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add to empty set",
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add already added member",
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1, node2),
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node2, node3),
|
||||
expectedResult: blockSetFromSlice(node1, node2, node3),
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -153,33 +153,33 @@ func TestBlockSetAddSlice(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "add empty slice to empty set",
|
||||
set: blockSetFromSlice(),
|
||||
set: setFromSlice(),
|
||||
slice: []*blockNode{},
|
||||
expectedResult: blockSetFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "add an empty slice",
|
||||
set: blockSetFromSlice(node1),
|
||||
set: setFromSlice(node1),
|
||||
slice: []*blockNode{},
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add to empty set",
|
||||
set: blockSetFromSlice(),
|
||||
set: setFromSlice(),
|
||||
slice: []*blockNode{node1},
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add already added member",
|
||||
set: blockSetFromSlice(node1, node2),
|
||||
set: setFromSlice(node1, node2),
|
||||
slice: []*blockNode{node1},
|
||||
expectedResult: blockSetFromSlice(node1, node2),
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
set: blockSetFromSlice(node1, node2),
|
||||
set: setFromSlice(node1, node2),
|
||||
slice: []*blockNode{node2, node3},
|
||||
expectedResult: blockSetFromSlice(node1, node2, node3),
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -205,33 +205,33 @@ func TestBlockSetUnion(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "union against an empty set",
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "union from an empty set",
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "union with subset",
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1, node2),
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node2, node3),
|
||||
expectedResult: blockSetFromSlice(node1, node2, node3),
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/bigintpool"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"math/big"
|
||||
@@ -54,19 +53,13 @@ func (window blockWindow) minMaxTimestamps() (min, max int64) {
|
||||
return
|
||||
}
|
||||
|
||||
func (window blockWindow) averageTarget(averageTarget *big.Int) {
|
||||
averageTarget.SetInt64(0)
|
||||
|
||||
target := bigintpool.Acquire(0)
|
||||
defer bigintpool.Release(target)
|
||||
func (window blockWindow) averageTarget() *big.Int {
|
||||
averageTarget := big.NewInt(0)
|
||||
for _, node := range window {
|
||||
util.CompactToBigWithDestination(node.bits, target)
|
||||
target := util.CompactToBig(node.bits)
|
||||
averageTarget.Add(averageTarget, target)
|
||||
}
|
||||
|
||||
windowLen := bigintpool.Acquire(int64(len(window)))
|
||||
defer bigintpool.Release(windowLen)
|
||||
averageTarget.Div(averageTarget, windowLen)
|
||||
return averageTarget.Div(averageTarget, big.NewInt(int64(len(window))))
|
||||
}
|
||||
|
||||
func (window blockWindow) medianTimestamp() (int64, error) {
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
func TestBlueBlockWindow(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -51,14 +51,14 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D", "C"},
|
||||
parents: []string{"C", "D"},
|
||||
id: "E",
|
||||
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D", "C"},
|
||||
parents: []string{"C", "D"},
|
||||
id: "F",
|
||||
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
@@ -73,37 +73,37 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
{
|
||||
parents: []string{"H", "F"},
|
||||
id: "I",
|
||||
expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"F", "C", "D", "B", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "J",
|
||||
expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"I", "F", "C", "D", "B", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J"},
|
||||
id: "K",
|
||||
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "C", "D", "B", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K"},
|
||||
id: "L",
|
||||
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "C", "D", "B", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L"},
|
||||
id: "M",
|
||||
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "C", "D", "B", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "N",
|
||||
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "C", "D", "B", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N"},
|
||||
id: "O",
|
||||
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"},
|
||||
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "B"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -133,10 +133,7 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
t.Fatalf("block %v was unexpectedly orphan", blockData.id)
|
||||
}
|
||||
|
||||
node, ok := dag.index.LookupNode(utilBlock.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", utilBlock.Hash())
|
||||
}
|
||||
node := dag.index.LookupNode(utilBlock.Hash())
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
|
||||
@@ -4,16 +4,16 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/coinbasepayload"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/util/txsort"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/txsort"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// compactFeeData is a specialized data type to store a compact list of fees
|
||||
@@ -73,24 +73,55 @@ func (cfr *compactFeeIterator) next() (uint64, error) {
|
||||
}
|
||||
|
||||
// The following functions relate to storing and retrieving fee data from the database
|
||||
var feeBucket = []byte("fees")
|
||||
|
||||
// getBluesFeeData returns the compactFeeData for all nodes's blues,
|
||||
// used to calculate the fees this blockNode needs to pay
|
||||
func (dag *BlockDAG) getBluesFeeData(node *blockNode) (map[daghash.Hash]compactFeeData, error) {
|
||||
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
|
||||
bluesFeeData := make(map[daghash.Hash]compactFeeData)
|
||||
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbaccess.FetchFeeData(dag.databaseContext, blueBlock.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bluesFeeData, nil
|
||||
}
|
||||
|
||||
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
|
||||
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error creating or retrieving fee bucket: %s", err)
|
||||
}
|
||||
|
||||
return feeBucket.Put(blockHash.CloneBytes(), feeData)
|
||||
}
|
||||
|
||||
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
|
||||
feeBucket := dbTx.Metadata().Bucket(feeBucket)
|
||||
if feeBucket == nil {
|
||||
return nil, errors.New("Fee bucket does not exist")
|
||||
}
|
||||
|
||||
feeData := feeBucket.Get(blockHash.CloneBytes())
|
||||
if feeData == nil {
|
||||
return nil, errors.Errorf("No fee data found for block %s", blockHash)
|
||||
}
|
||||
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
// The following functions deal with building and validating the coinbase transaction
|
||||
|
||||
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
@@ -98,10 +129,7 @@ func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Bl
|
||||
return nil
|
||||
}
|
||||
blockCoinbaseTx := block.CoinbaseTransaction().MsgTx()
|
||||
_, scriptPubKey, extraData, err := coinbasepayload.DeserializeCoinbasePayload(blockCoinbaseTx)
|
||||
if errors.Is(err, coinbasepayload.ErrIncorrectScriptPubKeyLen) {
|
||||
return ruleError(ErrBadCoinbaseTransaction, err.Error())
|
||||
}
|
||||
scriptPubKey, extraData, err := DeserializeCoinbasePayload(blockCoinbaseTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -119,81 +147,132 @@ func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Bl
|
||||
|
||||
// expectedCoinbaseTransaction returns the coinbase transaction for the current block
|
||||
func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) {
|
||||
bluesFeeData, err := dag.getBluesFeeData(node)
|
||||
bluesFeeData, err := node.getBluesFeeData(dag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIns := []*domainmessage.TxIn{}
|
||||
txOuts := []*domainmessage.TxOut{}
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
txOut, err := coinbaseOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
|
||||
txIn, txOut, err := coinbaseInputAndOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txIns = append(txIns, txIn)
|
||||
if txOut != nil {
|
||||
txOuts = append(txOuts, txOut)
|
||||
}
|
||||
}
|
||||
payload, err := coinbasepayload.SerializeCoinbasePayload(node.blueScore, scriptPubKey, extraData)
|
||||
payload, err := SerializeCoinbasePayload(scriptPubKey, extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
coinbaseTx := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, txIns, txOuts, subnetworkid.SubnetworkIDCoinbase, 0, payload)
|
||||
coinbaseTx := wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkid.SubnetworkIDCoinbase, 0, payload)
|
||||
sortedCoinbaseTx := txsort.Sort(coinbaseTx)
|
||||
return util.NewTx(sortedCoinbaseTx), nil
|
||||
}
|
||||
|
||||
// coinbaseOutputForBlueBlock calculates the output that should go into the coinbase transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns nil for txOut
|
||||
func coinbaseOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (*domainmessage.TxOut, error) {
|
||||
// SerializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data.
|
||||
func SerializeCoinbasePayload(scriptPubKey []byte, extraData []byte) ([]byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := wire.WriteVarInt(w, uint64(len(scriptPubKey)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
|
||||
// DeserializeCoinbasePayload deserialize the coinbase payload to its component (scriptPubKey and extra data).
|
||||
func DeserializeCoinbasePayload(tx *wire.MsgTx) (scriptPubKey []byte, extraData []byte, err error) {
|
||||
r := bytes.NewReader(tx.Payload)
|
||||
scriptPubKeyLen, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
scriptPubKey = make([]byte, scriptPubKeyLen)
|
||||
_, err = r.Read(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
extraData = make([]byte, r.Len())
|
||||
if r.Len() != 0 {
|
||||
_, err = r.Read(extraData)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return scriptPubKey, extraData, nil
|
||||
}
|
||||
|
||||
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the coinbase transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns only txIn and nil for txOut
|
||||
func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
|
||||
*wire.TxIn, *wire.TxOut, error) {
|
||||
|
||||
blockTxsAcceptanceData, ok := txsAcceptanceData.FindAcceptanceData(blueBlock.hash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
return nil, nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
}
|
||||
blockFeeData, ok := feeData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
return nil, nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
}
|
||||
|
||||
if len(blockTxsAcceptanceData.TxAcceptanceData) != blockFeeData.Len() {
|
||||
return nil, errors.Errorf(
|
||||
return nil, nil, errors.Errorf(
|
||||
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
|
||||
len(blockTxsAcceptanceData.TxAcceptanceData), blockFeeData.Len(), blueBlock.hash)
|
||||
}
|
||||
|
||||
txIn := &wire.TxIn{
|
||||
SignatureScript: []byte{},
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID(*blueBlock.hash),
|
||||
Index: math.MaxUint32,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
totalFees := uint64(0)
|
||||
feeIterator := blockFeeData.iterator()
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
fee, err := feeIterator.next()
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
return nil, nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
}
|
||||
if txAcceptanceData.IsAccepted {
|
||||
totalFees += fee
|
||||
}
|
||||
}
|
||||
|
||||
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.Params) + totalFees
|
||||
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.dagParams) + totalFees
|
||||
|
||||
if totalReward == 0 {
|
||||
return nil, nil
|
||||
return txIn, nil, nil
|
||||
}
|
||||
|
||||
// the ScriptPubKey for the coinbase is parsed from the coinbase payload
|
||||
_, scriptPubKey, _, err := coinbasepayload.DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx())
|
||||
scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
txOut := &domainmessage.TxOut{
|
||||
txOut := &wire.TxOut{
|
||||
Value: totalReward,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
|
||||
return txOut, nil
|
||||
return txIn, txOut, nil
|
||||
}
|
||||
|
||||
@@ -7,22 +7,33 @@ package blockdag
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func loadBlocksWithLog(t *testing.T, filename string) ([]*util.Block, error) {
|
||||
blocks, err := LoadBlocks(filename)
|
||||
if err == nil {
|
||||
t.Logf("Loaded %d blocks from file %s", len(blocks), filename)
|
||||
for i, b := range blocks {
|
||||
t.Logf("Block #%d: %s", i, b.Hash())
|
||||
}
|
||||
}
|
||||
return blocks, err
|
||||
}
|
||||
|
||||
// loadUTXOSet returns a utxo view loaded from a file.
|
||||
func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
// The utxostore file format is:
|
||||
@@ -73,12 +84,19 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Deserialize the UTXO entry and add it to the UTXO set.
|
||||
entry, err := deserializeUTXOEntry(r)
|
||||
// Serialized utxo entry.
|
||||
serialized := make([]byte, numBytes)
|
||||
_, err = io.ReadAtLeast(r, serialized, int(numBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoSet.utxoCollection[domainmessage.Outpoint{TxID: txID, Index: index}] = entry
|
||||
|
||||
// Deserialize it and add it to the view.
|
||||
entry, err := deserializeUTXOEntry(serialized)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoSet.utxoCollection[wire.Outpoint{TxID: txID, Index: index}] = entry
|
||||
}
|
||||
|
||||
return utxoSet, nil
|
||||
@@ -87,7 +105,7 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
// TestSetCoinbaseMaturity makes the ability to set the coinbase maturity
|
||||
// available when running tests.
|
||||
func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
|
||||
dag.Params.BlockCoinbaseMaturity = maturity
|
||||
dag.dagParams.BlockCoinbaseMaturity = maturity
|
||||
}
|
||||
|
||||
// newTestDAG returns a DAG that is usable for syntetic tests. It is
|
||||
@@ -95,10 +113,12 @@ func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
|
||||
// it is not usable with all functions and the tests must take care when making
|
||||
// use of it.
|
||||
func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||
index := newBlockIndex(params)
|
||||
index := newBlockIndex(nil, params)
|
||||
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
|
||||
dag := &BlockDAG{
|
||||
Params: params,
|
||||
timeSource: NewTimeSource(),
|
||||
dagParams: params,
|
||||
timeSource: NewMedianTime(),
|
||||
targetTimePerBlock: targetTimePerBlock,
|
||||
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
|
||||
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
|
||||
powMaxBits: util.BigToCompact(params.PowMax),
|
||||
@@ -109,18 +129,18 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||
|
||||
// Create a genesis block node and block index index populated with it
|
||||
// on the above fake DAG.
|
||||
dag.genesis, _ = dag.newBlockNode(¶ms.GenesisBlock.Header, newBlockSet())
|
||||
dag.genesis, _ = dag.newBlockNode(¶ms.GenesisBlock.Header, newSet())
|
||||
index.AddNode(dag.genesis)
|
||||
|
||||
dag.virtual = newVirtualBlock(dag, blockSetFromSlice(dag.genesis))
|
||||
dag.virtual = newVirtualBlock(dag, setFromSlice(dag.genesis))
|
||||
return dag
|
||||
}
|
||||
|
||||
// newTestNode creates a block node connected to the passed parent with the
|
||||
// provided fields populated and fake values for the other fields.
|
||||
func newTestNode(dag *BlockDAG, parents blockSet, blockVersion int32, bits uint32, timestamp mstime.Time) *blockNode {
|
||||
func newTestNode(dag *BlockDAG, parents blockSet, blockVersion int32, bits uint32, timestamp time.Time) *blockNode {
|
||||
// Make up a header and create a block node from it.
|
||||
header := &domainmessage.BlockHeader{
|
||||
header := &wire.BlockHeader{
|
||||
Version: blockVersion,
|
||||
ParentHashes: parents.hashes(),
|
||||
Bits: bits,
|
||||
@@ -143,55 +163,62 @@ func addNodeAsChildToParents(node *blockNode) {
|
||||
// same type (either both nil or both of type RuleError) and their error codes
|
||||
// match when not nil.
|
||||
func checkRuleError(gotErr, wantErr error) error {
|
||||
if wantErr == nil && gotErr == nil {
|
||||
// Ensure the error code is of the expected type and the error
|
||||
// code matches the value specified in the test instance.
|
||||
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
|
||||
return errors.Errorf("wrong error - got %T (%[1]v), want %T",
|
||||
gotErr, wantErr)
|
||||
}
|
||||
if gotErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var gotRuleErr RuleError
|
||||
if ok := errors.As(gotErr, &gotRuleErr); !ok {
|
||||
return errors.Errorf("gotErr expected to be RuleError, but got %+v instead", gotErr)
|
||||
}
|
||||
|
||||
var wantRuleErr RuleError
|
||||
if ok := errors.As(wantErr, &wantRuleErr); !ok {
|
||||
return errors.Errorf("wantErr expected to be RuleError, but got %+v instead", wantErr)
|
||||
// Ensure the want error type is a script error.
|
||||
werr, ok := wantErr.(RuleError)
|
||||
if !ok {
|
||||
return errors.Errorf("unexpected test error type %T", wantErr)
|
||||
}
|
||||
|
||||
// Ensure the error codes match. It's safe to use a raw type assert
|
||||
// here since the code above already proved they are the same type and
|
||||
// the want error is a script error.
|
||||
if gotRuleErr.ErrorCode != wantRuleErr.ErrorCode {
|
||||
gotErrorCode := gotErr.(RuleError).ErrorCode
|
||||
if gotErrorCode != werr.ErrorCode {
|
||||
return errors.Errorf("mismatched error code - got %v (%v), want %v",
|
||||
gotRuleErr.ErrorCode, gotErr, wantRuleErr.ErrorCode)
|
||||
gotErrorCode, gotErr, werr.ErrorCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareAndProcessBlockByParentMsgBlocks(t *testing.T, dag *BlockDAG, parents ...*domainmessage.MsgBlock) *domainmessage.MsgBlock {
|
||||
func prepareAndProcessBlock(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock {
|
||||
parentHashes := make([]*daghash.Hash, len(parents))
|
||||
for i, parent := range parents {
|
||||
parentHashes[i] = parent.BlockHash()
|
||||
}
|
||||
return PrepareAndProcessBlockForTest(t, dag, parentHashes, nil)
|
||||
daghash.Sort(parentHashes)
|
||||
block, err := PrepareBlockForTest(dag, parentHashes, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in ProcessBlock: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("block is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("block was unexpectedly orphan")
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *domainmessage.MsgBlock) *blockNode {
|
||||
node, ok := dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNode {
|
||||
node := dag.index.LookupNode(block.BlockHash())
|
||||
if node == nil {
|
||||
t.Fatalf("couldn't find block node with hash %s", block.BlockHash())
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
type fakeTimeSource struct {
|
||||
time mstime.Time
|
||||
}
|
||||
|
||||
func (fts *fakeTimeSource) Now() mstime.Time {
|
||||
return fts.time
|
||||
}
|
||||
|
||||
func newFakeTimeSource(fakeTime mstime.Time) TimeSource {
|
||||
return &fakeTimeSource{time: fakeTime}
|
||||
}
|
||||
|
||||
584
blockdag/compress.go
Normal file
584
blockdag/compress.go
Normal file
@@ -0,0 +1,584 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
|
||||
// of binary octets to represent an arbitrarily large integer. The scheme
|
||||
// employs a most significant byte (MSB) base-128 encoding where the high bit in
|
||||
// each byte indicates whether or not the byte is the final one. In addition,
|
||||
// to ensure there are no redundant encodings, an offset is subtracted every
|
||||
// time a group of 7 bits is shifted out. Therefore each integer can be
|
||||
// represented in exactly one way, and each representation stands for exactly
|
||||
// one integer.
|
||||
//
|
||||
// Another nice property of this encoding is that it provides a compact
|
||||
// representation of values that are typically used to indicate sizes. For
|
||||
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
|
||||
// with two bytes, and 16512 - 2113663 with three bytes.
|
||||
//
|
||||
// While the encoding allows arbitrarily large integers, it is artificially
|
||||
// limited in this code to an unsigned 64-bit integer for efficiency purposes.
|
||||
//
|
||||
// Example encodings:
|
||||
// 0 -> [0x00]
|
||||
// 127 -> [0x7f] * Max 1-byte value
|
||||
// 128 -> [0x80 0x00]
|
||||
// 129 -> [0x80 0x01]
|
||||
// 255 -> [0x80 0x7f]
|
||||
// 256 -> [0x81 0x00]
|
||||
// 16511 -> [0xff 0x7f] * Max 2-byte value
|
||||
// 16512 -> [0x80 0x80 0x00]
|
||||
// 32895 -> [0x80 0xff 0x7f]
|
||||
// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value
|
||||
// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value
|
||||
// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f]
|
||||
//
|
||||
// References:
|
||||
// https://en.wikipedia.org/wiki/Variable-length_quantity
|
||||
// http://www.codecodex.com/wiki/Variable-Length_Integers
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// serializeSizeVLQ returns the number of bytes it would take to serialize the
|
||||
// passed number as a variable-length quantity according to the format described
|
||||
// above.
|
||||
func serializeSizeVLQ(n uint64) int {
|
||||
size := 1
|
||||
for ; n > 0x7f; n = (n >> 7) - 1 {
|
||||
size++
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// putVLQ serializes the provided number to a variable-length quantity according
|
||||
// to the format described above and returns the number of bytes of the encoded
|
||||
// value. The result is placed directly into the passed byte slice which must
|
||||
// be at least large enough to handle the number of bytes returned by the
|
||||
// serializeSizeVLQ function or it will panic.
|
||||
func putVLQ(target []byte, n uint64) int {
|
||||
offset := 0
|
||||
for ; ; offset++ {
|
||||
// The high bit is set when another byte follows.
|
||||
highBitMask := byte(0x80)
|
||||
if offset == 0 {
|
||||
highBitMask = 0x00
|
||||
}
|
||||
|
||||
target[offset] = byte(n&0x7f) | highBitMask
|
||||
if n <= 0x7f {
|
||||
break
|
||||
}
|
||||
n = (n >> 7) - 1
|
||||
}
|
||||
|
||||
// Reverse the bytes so it is MSB-encoded.
|
||||
for i, j := 0, offset; i < j; i, j = i+1, j-1 {
|
||||
target[i], target[j] = target[j], target[i]
|
||||
}
|
||||
|
||||
return offset + 1
|
||||
}
|
||||
|
||||
// deserializeVLQ deserializes the provided variable-length quantity according
|
||||
// to the format described above. It also returns the number of bytes
|
||||
// deserialized.
|
||||
func deserializeVLQ(serialized []byte) (uint64, int) {
|
||||
var n uint64
|
||||
var size int
|
||||
for _, val := range serialized {
|
||||
size++
|
||||
n = (n << 7) | uint64(val&0x7f)
|
||||
if val&0x80 != 0x80 {
|
||||
break
|
||||
}
|
||||
n++
|
||||
}
|
||||
|
||||
return n, size
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored scripts, a domain specific compression
|
||||
// algorithm is used which recognizes standard scripts and stores them using
|
||||
// less bytes than the original script.
|
||||
//
|
||||
// The general serialized format is:
|
||||
//
|
||||
// <script size or type><script data>
|
||||
//
|
||||
// Field Type Size
|
||||
// script size or type VLQ variable
|
||||
// script data []byte variable
|
||||
//
|
||||
// The specific serialized format for each recognized standard script is:
|
||||
//
|
||||
// - Pay-to-pubkey-hash: (21 bytes) - <0><20-byte pubkey hash>
|
||||
// - Pay-to-script-hash: (21 bytes) - <1><20-byte script hash>
|
||||
// - Pay-to-pubkey**: (33 bytes) - <2, 3, 4, or 5><32-byte pubkey X value>
|
||||
// 2, 3 = compressed pubkey with bit 0 specifying the y coordinate to use
|
||||
// 4, 5 = uncompressed pubkey with bit 0 specifying the y coordinate to use
|
||||
// ** Only valid public keys starting with 0x02, 0x03, and 0x04 are supported.
|
||||
//
|
||||
// Any scripts which are not recognized as one of the aforementioned standard
|
||||
// scripts are encoded using the general serialized format and encode the script
|
||||
// size as the sum of the actual size of the script and the number of special
|
||||
// cases.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// The following constants specify the special constants used to identify a
|
||||
// special script type in the domain-specific compressed script encoding.
|
||||
//
|
||||
// NOTE: This section specifically does not use iota since these values are
|
||||
// serialized and must be stable for long-term storage.
|
||||
const (
|
||||
// cstPayToPubKeyHash identifies a compressed pay-to-pubkey-hash script.
|
||||
cstPayToPubKeyHash = 0
|
||||
|
||||
// cstPayToScriptHash identifies a compressed pay-to-script-hash script.
|
||||
cstPayToScriptHash = 1
|
||||
|
||||
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp2 = 2
|
||||
|
||||
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp3 = 3
|
||||
|
||||
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp4 = 4
|
||||
|
||||
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp5 = 5
|
||||
|
||||
// numSpecialScripts is the number of special scripts recognized by the
|
||||
// domain-specific script compression algorithm.
|
||||
numSpecialScripts = 6
|
||||
)
|
||||
|
||||
// isPubKeyHash returns whether or not the passed public key script is a
|
||||
// standard pay-to-pubkey-hash script along with the pubkey hash it is paying to
|
||||
// if it is.
|
||||
func isPubKeyHash(script []byte) (bool, []byte) {
|
||||
if len(script) == 25 && script[0] == txscript.OpDup &&
|
||||
script[1] == txscript.OpHash160 &&
|
||||
script[2] == txscript.OpData20 &&
|
||||
script[23] == txscript.OpEqualVerify &&
|
||||
script[24] == txscript.OpCheckSig {
|
||||
|
||||
return true, script[3:23]
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isScriptHash returns whether or not the passed public key script is a
|
||||
// standard pay-to-script-hash script along with the script hash it is paying to
|
||||
// if it is.
|
||||
func isScriptHash(script []byte) (bool, []byte) {
|
||||
if len(script) == 23 && script[0] == txscript.OpHash160 &&
|
||||
script[1] == txscript.OpData20 &&
|
||||
script[22] == txscript.OpEqual {
|
||||
|
||||
return true, script[2:22]
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isPubKey returns whether or not the passed public key script is a standard
|
||||
// pay-to-pubkey script that pays to a valid compressed or uncompressed public
|
||||
// key along with the serialized pubkey it is paying to if it is.
|
||||
//
|
||||
// NOTE: This function ensures the public key is actually valid since the
|
||||
// compression algorithm requires valid pubkeys. It does not support hybrid
|
||||
// pubkeys. This means that even if the script has the correct form for a
|
||||
// pay-to-pubkey script, this function will only return true when it is paying
|
||||
// to a valid compressed or uncompressed pubkey.
|
||||
func isPubKey(script []byte) (bool, []byte) {
|
||||
// Pay-to-compressed-pubkey script.
|
||||
if len(script) == 35 && script[0] == txscript.OpData33 &&
|
||||
script[34] == txscript.OpCheckSig && (script[1] == 0x02 ||
|
||||
script[1] == 0x03) {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:34]
|
||||
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
}
|
||||
|
||||
// Pay-to-uncompressed-pubkey script.
|
||||
if len(script) == 67 && script[0] == txscript.OpData65 &&
|
||||
script[66] == txscript.OpCheckSig && script[1] == 0x04 {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:66]
|
||||
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// compressedScriptSize returns the number of bytes the passed script would take
|
||||
// when encoded with the domain specific compression algorithm described above.
|
||||
func compressedScriptSize(scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, _ := isPubKeyHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, _ := isScriptHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, _ := isPubKey(scriptPubKey); valid {
|
||||
return 33
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the script as is
|
||||
// preceded by the sum of its size and the number of special cases
|
||||
// encoded as a variable length quantity.
|
||||
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
|
||||
len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
|
||||
// script, possibly followed by other data, and returns the number of bytes it
|
||||
// occupies taking into account the special encoding of the script size by the
|
||||
// domain specific compression algorithm described above.
|
||||
func decodeCompressedScriptSize(serialized []byte) int {
|
||||
scriptSize, bytesRead := deserializeVLQ(serialized)
|
||||
if bytesRead == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
switch scriptSize {
|
||||
case cstPayToPubKeyHash:
|
||||
return 21
|
||||
|
||||
case cstPayToScriptHash:
|
||||
return 21
|
||||
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3, cstPayToPubKeyUncomp4,
|
||||
cstPayToPubKeyUncomp5:
|
||||
return 33
|
||||
}
|
||||
|
||||
scriptSize -= numSpecialScripts
|
||||
scriptSize += uint64(bytesRead)
|
||||
return int(scriptSize)
|
||||
}
|
||||
|
||||
// putCompressedScript compresses the passed script according to the domain
|
||||
// specific compression algorithm described above directly into the passed
|
||||
// target byte slice. The target byte slice must be at least large enough to
|
||||
// handle the number of bytes returned by the compressedScriptSize function or
|
||||
// it will panic.
|
||||
func putCompressedScript(target, scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, hash := isPubKeyHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToPubKeyHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, hash := isScriptHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToScriptHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
|
||||
pubKeyFormat := serializedPubKey[0]
|
||||
switch pubKeyFormat {
|
||||
case 0x02, 0x03:
|
||||
target[0] = pubKeyFormat
|
||||
copy(target[1:33], serializedPubKey[1:33])
|
||||
return 33
|
||||
case 0x04:
|
||||
// Encode the oddness of the serialized pubkey into the
|
||||
// compressed script type.
|
||||
target[0] = pubKeyFormat | (serializedPubKey[64] & 0x01)
|
||||
copy(target[1:33], serializedPubKey[1:33])
|
||||
return 33
|
||||
}
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the unmodified
|
||||
// script preceded by the sum of its size and the number of special
|
||||
// cases encoded as a variable length quantity.
|
||||
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
|
||||
vlqSizeLen := putVLQ(target, encodedSize)
|
||||
copy(target[vlqSizeLen:], scriptPubKey)
|
||||
return vlqSizeLen + len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decompressScript returns the original script obtained by decompressing the
|
||||
// passed compressed script according to the domain specific compression
|
||||
// algorithm described above.
|
||||
//
|
||||
// NOTE: The script parameter must already have been proven to be long enough
|
||||
// to contain the number of bytes returned by decodeCompressedScriptSize or it
|
||||
// will panic. This is acceptable since it is only an internal function.
|
||||
func decompressScript(compressedScriptPubKey []byte) []byte {
|
||||
// In practice this function will not be called with a zero-length or
|
||||
// nil script since the nil script encoding includes the length, however
|
||||
// the code below assumes the length exists, so just return nil now if
|
||||
// the function ever ends up being called with a nil script in the
|
||||
// future.
|
||||
if len(compressedScriptPubKey) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode the script size and examine it for the special cases.
|
||||
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
|
||||
switch encodedScriptSize {
|
||||
// Pay-to-pubkey-hash script. The resulting script is:
|
||||
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
|
||||
case cstPayToPubKeyHash:
|
||||
scriptPubKey := make([]byte, 25)
|
||||
scriptPubKey[0] = txscript.OpDup
|
||||
scriptPubKey[1] = txscript.OpHash160
|
||||
scriptPubKey[2] = txscript.OpData20
|
||||
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[23] = txscript.OpEqualVerify
|
||||
scriptPubKey[24] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-script-hash script. The resulting script is:
|
||||
// <OP_HASH160><20 byte script hash><OP_EQUAL>
|
||||
case cstPayToScriptHash:
|
||||
scriptPubKey := make([]byte, 23)
|
||||
scriptPubKey[0] = txscript.OpHash160
|
||||
scriptPubKey[1] = txscript.OpData20
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[22] = txscript.OpEqual
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-compressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
|
||||
scriptPubKey := make([]byte, 35)
|
||||
scriptPubKey[0] = txscript.OpData33
|
||||
scriptPubKey[1] = byte(encodedScriptSize)
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
|
||||
scriptPubKey[34] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-uncompressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
|
||||
// Change the leading byte to the appropriate compressed pubkey
|
||||
// identifier (0x02 or 0x03) so it can be decoded as a
|
||||
// compressed pubkey. This really should never fail since the
|
||||
// encoding ensures it is valid before compressing to this type.
|
||||
compressedKey := make([]byte, 33)
|
||||
compressedKey[0] = byte(encodedScriptSize - 2)
|
||||
copy(compressedKey[1:], compressedScriptPubKey[1:])
|
||||
key, err := ecc.ParsePubKey(compressedKey, ecc.S256())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
scriptPubKey := make([]byte, 67)
|
||||
scriptPubKey[0] = txscript.OpData65
|
||||
copy(scriptPubKey[1:], key.SerializeUncompressed())
|
||||
scriptPubKey[66] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// When none of the special cases apply, the script was encoded using
|
||||
// the general format, so reduce the script size by the number of
|
||||
// special cases and return the unmodified script.
|
||||
scriptSize := int(encodedScriptSize - numSpecialScripts)
|
||||
scriptPubKey := make([]byte, scriptSize)
|
||||
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored amounts, a domain specific compression
|
||||
// algorithm is used which relies on there typically being a lot of zeroes at
|
||||
// end of the amounts.
|
||||
//
|
||||
// While this is simply exchanging one uint64 for another, the resulting value
|
||||
// for typical amounts has a much smaller magnitude which results in fewer bytes
|
||||
// when encoded as variable length quantity. For example, consider the amount
|
||||
// of 0.1 KAS which is 10000000 sompi. Encoding 10000000 as a VLQ would take
|
||||
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
|
||||
//
|
||||
// Essentially the compression is achieved by splitting the value into an
|
||||
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
|
||||
// and encoding them in a way that can be decoded. More specifically, the
|
||||
// encoding is as follows:
|
||||
// - 0 is 0
|
||||
// - Find the exponent, e, as the largest power of 10 that evenly divides the
|
||||
// value up to a maximum of 9
|
||||
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
|
||||
// dividing the value by 10 (call the result n). The encoded value is thus:
|
||||
// 1 + 10*(9*n + d-1) + e
|
||||
// - When e==9, the only thing known is the amount is not 0. The encoded value
|
||||
// is thus:
|
||||
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
|
||||
//
|
||||
// Example encodings:
|
||||
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
|
||||
// 0 (1) -> 0 (1) * 0.00000000 KAS
|
||||
// 1000 (2) -> 4 (1) * 0.00001000 KAS
|
||||
// 10000 (2) -> 5 (1) * 0.00010000 KAS
|
||||
// 12345678 (4) -> 111111101(4) * 0.12345678 KAS
|
||||
// 50000000 (4) -> 47 (1) * 0.50000000 KAS
|
||||
// 100000000 (4) -> 9 (1) * 1.00000000 KAS
|
||||
// 500000000 (5) -> 49 (1) * 5.00000000 KAS
|
||||
// 1000000000 (5) -> 10 (1) * 10.00000000 KAS
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// compressTxOutAmount compresses the passed amount according to the domain
|
||||
// specific compression algorithm described above.
|
||||
func compressTxOutAmount(amount uint64) uint64 {
|
||||
// No need to do any work if it's zero.
|
||||
if amount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Find the largest power of 10 (max of 9) that evenly divides the
|
||||
// value.
|
||||
exponent := uint64(0)
|
||||
for amount%10 == 0 && exponent < 9 {
|
||||
amount /= 10
|
||||
exponent++
|
||||
}
|
||||
|
||||
// The compressed result for exponents less than 9 is:
|
||||
// 1 + 10*(9*n + d-1) + e
|
||||
if exponent < 9 {
|
||||
lastDigit := amount % 10
|
||||
amount /= 10
|
||||
return 1 + 10*(9*amount+lastDigit-1) + exponent
|
||||
}
|
||||
|
||||
// The compressed result for an exponent of 9 is:
|
||||
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
|
||||
return 10 + 10*(amount-1)
|
||||
}
|
||||
|
||||
// decompressTxOutAmount returns the original amount the passed compressed
|
||||
// amount represents according to the domain specific compression algorithm
|
||||
// described above.
|
||||
func decompressTxOutAmount(amount uint64) uint64 {
|
||||
// No need to do any work if it's zero.
|
||||
if amount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// The decompressed amount is either of the following two equations:
|
||||
// x = 1 + 10*(9*n + d - 1) + e
|
||||
// x = 1 + 10*(n - 1) + 9
|
||||
amount--
|
||||
|
||||
// The decompressed amount is now one of the following two equations:
|
||||
// x = 10*(9*n + d - 1) + e
|
||||
// x = 10*(n - 1) + 9
|
||||
exponent := amount % 10
|
||||
amount /= 10
|
||||
|
||||
// The decompressed amount is now one of the following two equations:
|
||||
// x = 9*n + d - 1 | where e < 9
|
||||
// x = n - 1 | where e = 9
|
||||
n := uint64(0)
|
||||
if exponent < 9 {
|
||||
lastDigit := amount%9 + 1
|
||||
amount /= 9
|
||||
n = amount*10 + lastDigit
|
||||
} else {
|
||||
n = amount + 1
|
||||
}
|
||||
|
||||
// Apply the exponent.
|
||||
for ; exponent > 0; exponent-- {
|
||||
n *= 10
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Compressed transaction outputs consist of an amount and a public key script
|
||||
// both compressed using the domain specific compression algorithms previously
|
||||
// described.
|
||||
//
|
||||
// The serialized format is:
|
||||
//
|
||||
// <compressed amount><compressed script>
|
||||
//
|
||||
// Field Type Size
|
||||
// compressed amount VLQ variable
|
||||
// compressed script []byte variable
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// compressedTxOutSize returns the number of bytes the passed transaction output
|
||||
// fields would take when encoded with the format described above.
|
||||
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
|
||||
return serializeSizeVLQ(compressTxOutAmount(amount)) +
|
||||
compressedScriptSize(scriptPubKey)
|
||||
}
|
||||
|
||||
// putCompressedTxOut compresses the passed amount and script according to their
|
||||
// domain specific compression algorithms and encodes them directly into the
|
||||
// passed target byte slice with the format described above. The target byte
|
||||
// slice must be at least large enough to handle the number of bytes returned by
|
||||
// the compressedTxOutSize function or it will panic.
|
||||
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
|
||||
offset := putVLQ(target, compressTxOutAmount(amount))
|
||||
offset += putCompressedScript(target[offset:], scriptPubKey)
|
||||
return offset
|
||||
}
|
||||
|
||||
// decodeCompressedTxOut decodes the passed compressed txout, possibly followed
|
||||
// by other data, into its uncompressed amount and script and returns them along
|
||||
// with the number of bytes they occupied prior to decompression.
|
||||
func decodeCompressedTxOut(serialized []byte) (uint64, []byte, int, error) {
|
||||
// Deserialize the compressed amount and ensure there are bytes
|
||||
// remaining for the compressed script.
|
||||
compressedAmount, bytesRead := deserializeVLQ(serialized)
|
||||
if bytesRead >= len(serialized) {
|
||||
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
|
||||
"data after compressed amount")
|
||||
}
|
||||
|
||||
// Decode the compressed script size and ensure there are enough bytes
|
||||
// left in the slice for it.
|
||||
scriptSize := decodeCompressedScriptSize(serialized[bytesRead:])
|
||||
if len(serialized[bytesRead:]) < scriptSize {
|
||||
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
|
||||
"data after script size")
|
||||
}
|
||||
|
||||
// Decompress and return the amount and script.
|
||||
amount := decompressTxOutAmount(compressedAmount)
|
||||
script := decompressScript(serialized[bytesRead : bytesRead+scriptSize])
|
||||
return amount, script, bytesRead + scriptSize, nil
|
||||
}
|
||||
436
blockdag/compress_test.go
Normal file
436
blockdag/compress_test.go
Normal file
@@ -0,0 +1,436 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
// is an error. This is only provided for the hard-coded constants so errors in
|
||||
// the source code can be detected. It will only (and must only) be called with
|
||||
// hard-coded values.
|
||||
func hexToBytes(s string) []byte {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// TestVLQ ensures the variable length quantity serialization, deserialization,
|
||||
// and size calculation works as expected.
|
||||
func TestVLQ(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
val uint64
|
||||
serialized []byte
|
||||
}{
|
||||
{0, hexToBytes("00")},
|
||||
{1, hexToBytes("01")},
|
||||
{127, hexToBytes("7f")},
|
||||
{128, hexToBytes("8000")},
|
||||
{129, hexToBytes("8001")},
|
||||
{255, hexToBytes("807f")},
|
||||
{256, hexToBytes("8100")},
|
||||
{16383, hexToBytes("fe7f")},
|
||||
{16384, hexToBytes("ff00")},
|
||||
{16511, hexToBytes("ff7f")}, // Max 2-byte value
|
||||
{16512, hexToBytes("808000")},
|
||||
{16513, hexToBytes("808001")},
|
||||
{16639, hexToBytes("80807f")},
|
||||
{32895, hexToBytes("80ff7f")},
|
||||
{2113663, hexToBytes("ffff7f")}, // Max 3-byte value
|
||||
{2113664, hexToBytes("80808000")},
|
||||
{270549119, hexToBytes("ffffff7f")}, // Max 4-byte value
|
||||
{270549120, hexToBytes("8080808000")},
|
||||
{2147483647, hexToBytes("86fefefe7f")},
|
||||
{2147483648, hexToBytes("86fefeff00")},
|
||||
{4294967295, hexToBytes("8efefefe7f")}, // Max uint32, 5 bytes
|
||||
// Max uint64, 10 bytes
|
||||
{18446744073709551615, hexToBytes("80fefefefefefefefe7f")},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the value is calculated properly.
|
||||
gotSize := serializeSizeVLQ(test.val)
|
||||
if gotSize != len(test.serialized) {
|
||||
t.Errorf("serializeSizeVLQ: did not get expected size "+
|
||||
"for %d - got %d, want %d", test.val, gotSize,
|
||||
len(test.serialized))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the value serializes to the expected bytes.
|
||||
gotBytes := make([]byte, gotSize)
|
||||
gotBytesWritten := putVLQ(gotBytes, test.val)
|
||||
if !bytes.Equal(gotBytes, test.serialized) {
|
||||
t.Errorf("putVLQUnchecked: did not get expected bytes "+
|
||||
"for %d - got %x, want %x", test.val, gotBytes,
|
||||
test.serialized)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.serialized) {
|
||||
t.Errorf("putVLQUnchecked: did not get expected number "+
|
||||
"of bytes written for %d - got %d, want %d",
|
||||
test.val, gotBytesWritten, len(test.serialized))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the serialized bytes deserialize to the expected
|
||||
// value.
|
||||
gotVal, gotBytesRead := deserializeVLQ(test.serialized)
|
||||
if gotVal != test.val {
|
||||
t.Errorf("deserializeVLQ: did not get expected value "+
|
||||
"for %x - got %d, want %d", test.serialized,
|
||||
gotVal, test.val)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.serialized) {
|
||||
t.Errorf("deserializeVLQ: did not get expected number "+
|
||||
"of bytes read for %d - got %d, want %d",
|
||||
test.serialized, gotBytesRead,
|
||||
len(test.serialized))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestScriptCompression ensures the domain-specific script compression and
|
||||
// decompression works as expected.
|
||||
func TestScriptCompression(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
uncompressed []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
uncompressed: nil,
|
||||
compressed: hexToBytes("06"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash 1",
|
||||
uncompressed: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash 2",
|
||||
uncompressed: hexToBytes("76a914e34cce70c86373273efcc54ce7d2a491bb4a0e8488ac"),
|
||||
compressed: hexToBytes("00e34cce70c86373273efcc54ce7d2a491bb4a0e84"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-script-hash 1",
|
||||
uncompressed: hexToBytes("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87"),
|
||||
compressed: hexToBytes("01da1745e9b549bd0bfa1a569971c77eba30cd5a4b"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-script-hash 2",
|
||||
uncompressed: hexToBytes("a914f815b036d9bbbce5e9f2a00abd1bf3dc91e9551087"),
|
||||
compressed: hexToBytes("01f815b036d9bbbce5e9f2a00abd1bf3dc91e95510"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey compressed 0x02",
|
||||
uncompressed: hexToBytes("2102192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4ac"),
|
||||
compressed: hexToBytes("02192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey compressed 0x03",
|
||||
uncompressed: hexToBytes("2103b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65ac"),
|
||||
compressed: hexToBytes("03b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 0x04 even",
|
||||
uncompressed: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("04192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 0x04 odd",
|
||||
uncompressed: hexToBytes("410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac"),
|
||||
compressed: hexToBytes("0511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey invalid pubkey",
|
||||
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
},
|
||||
{
|
||||
name: "requires 2 size bytes - data push 200 bytes",
|
||||
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
// [0x80, 0x50] = 208 as a variable length quantity
|
||||
// [0x4c, 0xc8] = OP_PUSHDATA1 200
|
||||
compressed: append(hexToBytes("80504cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the value is calculated properly.
|
||||
gotSize := compressedScriptSize(test.uncompressed)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedScriptSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the script compresses to the expected bytes.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedScript(gotCompressed,
|
||||
test.uncompressed)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("putCompressedScript (%s): did not get "+
|
||||
"expected bytes - got %x, want %x", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.compressed) {
|
||||
t.Errorf("putCompressedScript (%s): did not get "+
|
||||
"expected number of bytes written - got %d, "+
|
||||
"want %d", test.name, gotBytesWritten,
|
||||
len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the compressed script size is properly decoded from
|
||||
// the compressed script.
|
||||
gotDecodedSize := decodeCompressedScriptSize(test.compressed)
|
||||
if gotDecodedSize != len(test.compressed) {
|
||||
t.Errorf("decodeCompressedScriptSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotDecodedSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the script decompresses to the expected bytes.
|
||||
gotDecompressed := decompressScript(test.compressed)
|
||||
if !bytes.Equal(gotDecompressed, test.uncompressed) {
|
||||
t.Errorf("decompressScript (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
gotDecompressed, test.uncompressed)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestScriptCompressionErrors ensures calling various functions related to
|
||||
// script compression with incorrect data returns the expected results.
|
||||
func TestScriptCompressionErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// A nil script must result in a decoded size of 0.
|
||||
if gotSize := decodeCompressedScriptSize(nil); gotSize != 0 {
|
||||
t.Fatalf("decodeCompressedScriptSize with nil script did not "+
|
||||
"return 0 - got %d", gotSize)
|
||||
}
|
||||
|
||||
// A nil script must result in a nil decompressed script.
|
||||
if gotScript := decompressScript(nil); gotScript != nil {
|
||||
t.Fatalf("decompressScript with nil script did not return nil "+
|
||||
"decompressed script - got %x", gotScript)
|
||||
}
|
||||
|
||||
// A compressed script for a pay-to-pubkey (uncompressed) that results
|
||||
// in an invalid pubkey must result in a nil decompressed script.
|
||||
compressedScript := hexToBytes("04012d74d0cb94344c9569c2e77901573d8d" +
|
||||
"7903c3ebec3a957724895dca52c6b4")
|
||||
if gotScript := decompressScript(compressedScript); gotScript != nil {
|
||||
t.Fatalf("decompressScript with compressed pay-to-"+
|
||||
"uncompressed-pubkey that is invalid did not return "+
|
||||
"nil decompressed script - got %x", gotScript)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAmountCompression ensures the domain-specific transaction output amount
|
||||
// compression and decompression works as expected.
|
||||
func TestAmountCompression(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
uncompressed uint64
|
||||
compressed uint64
|
||||
}{
|
||||
{
|
||||
name: "0 KAS",
|
||||
uncompressed: 0,
|
||||
compressed: 0,
|
||||
},
|
||||
{
|
||||
name: "546 Sompi (current network dust value)",
|
||||
uncompressed: 546,
|
||||
compressed: 4911,
|
||||
},
|
||||
{
|
||||
name: "0.00001 KAS (typical transaction fee)",
|
||||
uncompressed: 1000,
|
||||
compressed: 4,
|
||||
},
|
||||
{
|
||||
name: "0.0001 KAS (typical transaction fee)",
|
||||
uncompressed: 10000,
|
||||
compressed: 5,
|
||||
},
|
||||
{
|
||||
name: "0.12345678 KAS",
|
||||
uncompressed: 12345678,
|
||||
compressed: 111111101,
|
||||
},
|
||||
{
|
||||
name: "0.5 KAS",
|
||||
uncompressed: 50000000,
|
||||
compressed: 48,
|
||||
},
|
||||
{
|
||||
name: "1 KAS",
|
||||
uncompressed: 100000000,
|
||||
compressed: 9,
|
||||
},
|
||||
{
|
||||
name: "5 KAS",
|
||||
uncompressed: 500000000,
|
||||
compressed: 49,
|
||||
},
|
||||
{
|
||||
name: "21000000 KAS (max minted coins)",
|
||||
uncompressed: 2100000000000000,
|
||||
compressed: 21000000,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the amount compresses to the expected value.
|
||||
gotCompressed := compressTxOutAmount(test.uncompressed)
|
||||
if gotCompressed != test.compressed {
|
||||
t.Errorf("compressTxOutAmount (%s): did not get "+
|
||||
"expected value - got %d, want %d", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the value decompresses to the expected value.
|
||||
gotDecompressed := decompressTxOutAmount(test.compressed)
|
||||
if gotDecompressed != test.uncompressed {
|
||||
t.Errorf("decompressTxOutAmount (%s): did not get "+
|
||||
"expected value - got %d, want %d", test.name,
|
||||
gotDecompressed, test.uncompressed)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCompressedTxOut ensures the transaction output serialization and
|
||||
// deserialization works as expected.
|
||||
func TestCompressedTxOut(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
amount uint64
|
||||
scriptPubKey []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "pay-to-pubkey-hash dust",
|
||||
amount: 546,
|
||||
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 1 KAS",
|
||||
amount: 100000000,
|
||||
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the txout is calculated properly.
|
||||
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedTxOutSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the txout compresses to the expected value.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedTxOut(gotCompressed,
|
||||
test.amount, test.scriptPubKey)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"number of bytes written - got %d, want %d",
|
||||
test.name, gotBytesWritten,
|
||||
len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the serialized bytes are decoded back to the expected
|
||||
// uncompressed values.
|
||||
gotAmount, gotScript, gotBytesRead, err := decodeCompressedTxOut(
|
||||
test.compressed)
|
||||
if err != nil {
|
||||
t.Errorf("decodeCompressedTxOut (%s): unexpected "+
|
||||
"error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if gotAmount != test.amount {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected amount - got %d, want %d",
|
||||
test.name, gotAmount, test.amount)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(gotScript, test.scriptPubKey) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected script - got %x, want %x",
|
||||
test.name, gotScript, test.scriptPubKey)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.compressed) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected number of bytes read - got %d, want %d",
|
||||
test.name, gotBytesRead, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestTxOutCompressionErrors ensures calling various functions related to
|
||||
// txout compression with incorrect data returns the expected results.
|
||||
func TestTxOutCompressionErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// A compressed txout with missing compressed script must error.
|
||||
compressedTxOut := hexToBytes("00")
|
||||
_, _, _, err := decodeCompressedTxOut(compressedTxOut)
|
||||
if !isDeserializeErr(err) {
|
||||
t.Fatalf("decodeCompressedTxOut with missing compressed script "+
|
||||
"did not return expected error type - got %T, want "+
|
||||
"errDeserialize", err)
|
||||
}
|
||||
|
||||
// A compressed txout with short compressed script must error.
|
||||
compressedTxOut = hexToBytes("0010")
|
||||
_, _, _, err = decodeCompressedTxOut(compressedTxOut)
|
||||
if !isDeserializeErr(err) {
|
||||
t.Fatalf("decodeCompressedTxOut with short compressed script "+
|
||||
"did not return expected error type - got %T, want "+
|
||||
"errDeserialize", err)
|
||||
}
|
||||
}
|
||||
1077
blockdag/dag.go
1077
blockdag/dag.go
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -6,51 +6,39 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"github.com/pkg/errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// TestErrNotInDAG ensures the functions related to ErrNotInDAG work
|
||||
// TestErrNotInDAG ensures the functions related to errNotInDAG work
|
||||
// as expected.
|
||||
func TestErrNotInDAG(t *testing.T) {
|
||||
errStr := "no block at height 1 exists"
|
||||
err := error(ErrNotInDAG(errStr))
|
||||
err := error(errNotInDAG(errStr))
|
||||
|
||||
// Ensure the stringized output for the error is as expected.
|
||||
if err.Error() != errStr {
|
||||
t.Fatalf("ErrNotInDAG retuned unexpected error string - "+
|
||||
t.Fatalf("errNotInDAG retuned unexpected error string - "+
|
||||
"got %q, want %q", err.Error(), errStr)
|
||||
}
|
||||
|
||||
// Ensure error is detected as the correct type.
|
||||
if !IsNotInDAGErr(err) {
|
||||
t.Fatalf("IsNotInDAGErr did not detect as expected type")
|
||||
if !isNotInDAGErr(err) {
|
||||
t.Fatalf("isNotInDAGErr did not detect as expected type")
|
||||
}
|
||||
err = errors.New("something else")
|
||||
if IsNotInDAGErr(err) {
|
||||
t.Fatalf("IsNotInDAGErr detected incorrect type")
|
||||
if isNotInDAGErr(err) {
|
||||
t.Fatalf("isNotInDAGErr detected incorrect type")
|
||||
}
|
||||
}
|
||||
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
// is an error. This is only provided for the hard-coded constants so errors in
|
||||
// the source code can be detected. It will only (and must only) be called with
|
||||
// hard-coded values.
|
||||
func hexToBytes(s string) []byte {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// TestUTXOSerialization ensures serializing and deserializing unspent
|
||||
// TestUtxoSerialization ensures serializing and deserializing unspent
|
||||
// trasaction output entries works as expected.
|
||||
func TestUTXOSerialization(t *testing.T) {
|
||||
func TestUtxoSerialization(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
@@ -66,7 +54,7 @@ func TestUTXOSerialization(t *testing.T) {
|
||||
blockBlueScore: 1,
|
||||
packedFlags: tfCoinbase,
|
||||
},
|
||||
serialized: hexToBytes("01000000000000000100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
|
||||
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
|
||||
},
|
||||
{
|
||||
name: "blue score 100001, not coinbase",
|
||||
@@ -76,21 +64,13 @@ func TestUTXOSerialization(t *testing.T) {
|
||||
blockBlueScore: 100001,
|
||||
packedFlags: 0,
|
||||
},
|
||||
serialized: hexToBytes("a1860100000000000040420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
|
||||
serialized: hexToBytes("8b99420700ee8bd501094a7d5ca318da2506de35e1cb025ddc"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
// Ensure the utxo entry serializes to the expected value.
|
||||
w := &bytes.Buffer{}
|
||||
err := serializeUTXOEntry(w, test.entry)
|
||||
if err != nil {
|
||||
t.Errorf("serializeUTXOEntry #%d (%s) unexpected "+
|
||||
"error: %v", i, test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
gotBytes := w.Bytes()
|
||||
gotBytes := serializeUTXOEntry(test.entry)
|
||||
if !bytes.Equal(gotBytes, test.serialized) {
|
||||
t.Errorf("serializeUTXOEntry #%d (%s): mismatched "+
|
||||
"bytes - got %x, want %x", i, test.name,
|
||||
@@ -98,8 +78,8 @@ func TestUTXOSerialization(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Deserialize to a utxo entry.gotBytes
|
||||
utxoEntry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
|
||||
// Deserialize to a utxo entry.
|
||||
utxoEntry, err := deserializeUTXOEntry(test.serialized)
|
||||
if err != nil {
|
||||
t.Errorf("deserializeUTXOEntry #%d (%s) unexpected "+
|
||||
"error: %v", i, test.name, err)
|
||||
@@ -144,24 +124,28 @@ func TestUtxoEntryDeserializeErrors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
serialized []byte
|
||||
errType error
|
||||
}{
|
||||
{
|
||||
name: "no data after header code",
|
||||
serialized: hexToBytes("02"),
|
||||
errType: errDeserialize(""),
|
||||
},
|
||||
{
|
||||
name: "incomplete compressed txout",
|
||||
serialized: hexToBytes("0232"),
|
||||
errType: errDeserialize(""),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the expected error type is returned and the returned
|
||||
// entry is nil.
|
||||
entry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
|
||||
if err == nil {
|
||||
t.Errorf("deserializeUTXOEntry (%s): didn't return an error",
|
||||
test.name)
|
||||
entry, err := deserializeUTXOEntry(test.serialized)
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
|
||||
t.Errorf("deserializeUTXOEntry (%s): expected error "+
|
||||
"type does not match - got %T, want %T",
|
||||
test.name, err, test.errType)
|
||||
continue
|
||||
}
|
||||
if entry != nil {
|
||||
@@ -188,7 +172,7 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
|
||||
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
|
||||
},
|
||||
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
|
||||
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
|
||||
},
|
||||
{
|
||||
name: "block 1",
|
||||
@@ -196,7 +180,7 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
|
||||
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
|
||||
},
|
||||
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
|
||||
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -233,6 +217,50 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestDAGStateDeserializeErrors performs negative tests against
|
||||
// deserializing the DAG state to ensure error paths work as expected.
|
||||
func TestDAGStateDeserializeErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
serialized []byte
|
||||
errType error
|
||||
}{
|
||||
{
|
||||
name: "nothing serialized",
|
||||
serialized: hexToBytes(""),
|
||||
errType: database.Error{ErrorCode: database.ErrCorruption},
|
||||
},
|
||||
{
|
||||
name: "corrupted data",
|
||||
serialized: []byte("[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,7"),
|
||||
errType: database.Error{ErrorCode: database.ErrCorruption},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the expected error type and code is returned.
|
||||
_, err := deserializeDAGState(test.serialized)
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
|
||||
t.Errorf("deserializeDAGState (%s): expected "+
|
||||
"error type does not match - got %T, want %T",
|
||||
test.name, err, test.errType)
|
||||
continue
|
||||
}
|
||||
if derr, ok := err.(database.Error); ok {
|
||||
tderr := test.errType.(database.Error)
|
||||
if derr.ErrorCode != tderr.ErrorCode {
|
||||
t.Errorf("deserializeDAGState (%s): "+
|
||||
"wrong error code got: %v, want: %v",
|
||||
test.name, derr.ErrorCode,
|
||||
tderr.ErrorCode)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// daghash.Hash. It only differs from the one available in daghash in that
|
||||
// it panics in case of an error since it will only (and must only) be
|
||||
|
||||
@@ -5,17 +5,17 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/bigintpool"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// requiredDifficulty calculates the required difficulty for a
|
||||
// block given its bluest parent.
|
||||
func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime mstime.Time) uint32 {
|
||||
func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime time.Time) uint32 {
|
||||
// Genesis block.
|
||||
if dag.Params.DisableDifficultyAdjustment || bluestParent == nil || bluestParent.blueScore < dag.difficultyAdjustmentWindowSize+1 {
|
||||
if bluestParent == nil || bluestParent.blueScore < dag.difficultyAdjustmentWindowSize+1 {
|
||||
return dag.powMaxBits
|
||||
}
|
||||
|
||||
@@ -30,21 +30,12 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ms
|
||||
// averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize))
|
||||
// The result uses integer division which means it will be slightly
|
||||
// rounded down.
|
||||
newTarget := bigintpool.Acquire(0)
|
||||
defer bigintpool.Release(newTarget)
|
||||
windowTimeStampDifference := bigintpool.Acquire(windowMaxTimeStamp - windowMinTimestamp)
|
||||
defer bigintpool.Release(windowTimeStampDifference)
|
||||
targetTimePerBlock := bigintpool.Acquire(dag.Params.TargetTimePerBlock.Milliseconds())
|
||||
defer bigintpool.Release(targetTimePerBlock)
|
||||
difficultyAdjustmentWindowSize := bigintpool.Acquire(int64(dag.difficultyAdjustmentWindowSize))
|
||||
defer bigintpool.Release(difficultyAdjustmentWindowSize)
|
||||
|
||||
targetsWindow.averageTarget(newTarget)
|
||||
newTarget := targetsWindow.averageTarget()
|
||||
newTarget.
|
||||
Mul(newTarget, windowTimeStampDifference).
|
||||
Div(newTarget, targetTimePerBlock).
|
||||
Div(newTarget, difficultyAdjustmentWindowSize)
|
||||
if newTarget.Cmp(dag.Params.PowMax) > 0 {
|
||||
Mul(newTarget, big.NewInt(windowMaxTimeStamp-windowMinTimestamp)).
|
||||
Div(newTarget, big.NewInt(dag.targetTimePerBlock)).
|
||||
Div(newTarget, big.NewInt(int64(dag.difficultyAdjustmentWindowSize)))
|
||||
if newTarget.Cmp(dag.dagParams.PowMax) > 0 {
|
||||
return dag.powMaxBits
|
||||
}
|
||||
newTargetBits := util.BigToCompact(newTarget)
|
||||
@@ -55,7 +46,7 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ms
|
||||
// be built on top of the current tips.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) NextRequiredDifficulty(timestamp mstime.Time) uint32 {
|
||||
func (dag *BlockDAG) NextRequiredDifficulty(timestamp time.Time) uint32 {
|
||||
difficulty := dag.requiredDifficulty(dag.virtual.parents.bluest(), timestamp)
|
||||
return difficulty
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -80,10 +79,10 @@ func TestCalcWork(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDifficulty(t *testing.T) {
|
||||
params := dagconfig.MainnetParams
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.DifficultyAdjustmentWindowSize = 264
|
||||
dag, teardownFunc, err := DAGSetup("TestDifficulty", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestDifficulty", Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -91,12 +90,11 @@ func TestDifficulty(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
zeroTime := mstime.Time{}
|
||||
addNode := func(parents blockSet, blockTime mstime.Time) *blockNode {
|
||||
zeroTime := time.Unix(0, 0)
|
||||
addNode := func(parents blockSet, blockTime time.Time) *blockNode {
|
||||
bluestParent := parents.bluest()
|
||||
if blockTime.IsZero() {
|
||||
blockTime = bluestParent.time()
|
||||
blockTime = blockTime.Add(params.TargetTimePerBlock)
|
||||
if blockTime == zeroTime {
|
||||
blockTime = time.Unix(bluestParent.timestamp+1, 0)
|
||||
}
|
||||
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
|
||||
if err != nil {
|
||||
@@ -115,40 +113,34 @@ func TestDifficulty(t *testing.T) {
|
||||
if isOrphan {
|
||||
t.Fatalf("block was unexpectedly orphan")
|
||||
}
|
||||
node, ok := dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", block.BlockHash())
|
||||
}
|
||||
return node
|
||||
return dag.index.LookupNode(block.BlockHash())
|
||||
}
|
||||
tip := dag.genesis
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if tip.bits != dag.genesis.bits {
|
||||
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment " +
|
||||
"window size, the difficulty should be the same as genesis'")
|
||||
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment window size, the difficulty should be the same as genesis'")
|
||||
}
|
||||
}
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+100; i++ {
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if tip.bits != dag.genesis.bits {
|
||||
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
|
||||
}
|
||||
}
|
||||
nodeInThePast := addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
|
||||
nodeInThePast := addNode(setFromSlice(tip), tip.PastMedianTime(dag))
|
||||
if nodeInThePast.bits != tip.bits {
|
||||
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
|
||||
}
|
||||
tip = nodeInThePast
|
||||
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if tip.bits != nodeInThePast.bits {
|
||||
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
|
||||
}
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if compareBits(tip.bits, nodeInThePast.bits) >= 0 {
|
||||
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the " +
|
||||
"block rate, so the difficulty should increase as well")
|
||||
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the block rate, so the difficulty should increase as well")
|
||||
}
|
||||
expectedBits := uint32(0x207f83df)
|
||||
if tip.bits != expectedBits {
|
||||
@@ -157,7 +149,7 @@ func TestDifficulty(t *testing.T) {
|
||||
|
||||
// Increase block rate to increase difficulty
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
|
||||
tip = addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
|
||||
tip = addNode(setFromSlice(tip), tip.PastMedianTime(dag))
|
||||
if compareBits(tip.bits, tip.parents.bluest().bits) > 0 {
|
||||
t.Fatalf("Because we're increasing the block rate, the difficulty can't decrease")
|
||||
}
|
||||
@@ -167,7 +159,7 @@ func TestDifficulty(t *testing.T) {
|
||||
lastBits := tip.bits
|
||||
sameBitsCount := uint64(0)
|
||||
for sameBitsCount < dag.difficultyAdjustmentWindowSize+1 {
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if tip.bits == lastBits {
|
||||
sameBitsCount++
|
||||
} else {
|
||||
@@ -175,41 +167,37 @@ func TestDifficulty(t *testing.T) {
|
||||
sameBitsCount = 0
|
||||
}
|
||||
}
|
||||
slowBlockTime := tip.time()
|
||||
slowBlockTime = slowBlockTime.Add(params.TargetTimePerBlock + time.Second)
|
||||
slowNode := addNode(blockSetFromSlice(tip), slowBlockTime)
|
||||
slowNode := addNode(setFromSlice(tip), time.Unix(tip.timestamp+2, 0))
|
||||
if slowNode.bits != tip.bits {
|
||||
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
|
||||
}
|
||||
|
||||
tip = slowNode
|
||||
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if tip.bits != slowNode.bits {
|
||||
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
|
||||
}
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if compareBits(tip.bits, slowNode.bits) <= 0 {
|
||||
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block" +
|
||||
" rate, so the difficulty should decrease as well")
|
||||
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block rate, so the difficulty should decrease as well")
|
||||
}
|
||||
|
||||
splitNode := addNode(blockSetFromSlice(tip), zeroTime)
|
||||
splitNode := addNode(setFromSlice(tip), zeroTime)
|
||||
tip = splitNode
|
||||
for i := 0; i < 100; i++ {
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
}
|
||||
blueTip := tip
|
||||
|
||||
redChainTip := splitNode
|
||||
for i := 0; i < 10; i++ {
|
||||
redChainTip = addNode(blockSetFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
|
||||
redChainTip = addNode(setFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
|
||||
}
|
||||
tipWithRedPast := addNode(blockSetFromSlice(redChainTip, blueTip), zeroTime)
|
||||
tipWithoutRedPast := addNode(blockSetFromSlice(blueTip), zeroTime)
|
||||
tipWithRedPast := addNode(setFromSlice(redChainTip, blueTip), zeroTime)
|
||||
tipWithoutRedPast := addNode(setFromSlice(blueTip), zeroTime)
|
||||
if tipWithoutRedPast.bits != tipWithRedPast.bits {
|
||||
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks" +
|
||||
" shouldn't affect the difficulty")
|
||||
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks shouldn't affect the difficulty")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,10 +6,28 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DeploymentError identifies an error that indicates a deployment ID was
|
||||
// specified that does not exist.
|
||||
type DeploymentError uint32
|
||||
|
||||
// Error returns the assertion error as a human-readable string and satisfies
|
||||
// the error interface.
|
||||
func (e DeploymentError) Error() string {
|
||||
return fmt.Sprintf("deployment ID %d does not exist", uint32(e))
|
||||
}
|
||||
|
||||
// AssertError identifies an error that indicates an internal code consistency
|
||||
// issue and should be treated as a critical and unrecoverable error.
|
||||
type AssertError string
|
||||
|
||||
// Error returns the assertion error as a human-readable string and satisfies
|
||||
// the error interface.
|
||||
func (e AssertError) Error() string {
|
||||
return "assertion failed: " + string(e)
|
||||
}
|
||||
|
||||
// ErrorCode identifies a kind of error.
|
||||
type ErrorCode int
|
||||
|
||||
@@ -28,6 +46,11 @@ const (
|
||||
// to a newer version.
|
||||
ErrBlockVersionTooOld
|
||||
|
||||
// ErrInvalidTime indicates the time in the passed block has a precision
|
||||
// that is more than one second. The DAG consensus rules require
|
||||
// timestamps to have a maximum precision of one second.
|
||||
ErrInvalidTime
|
||||
|
||||
// ErrTimeTooOld indicates the time is either before the median time of
|
||||
// the last several blocks per the DAG consensus rules.
|
||||
ErrTimeTooOld
|
||||
@@ -64,9 +87,6 @@ const (
|
||||
// the expected value.
|
||||
ErrBadUTXOCommitment
|
||||
|
||||
// ErrInvalidSubnetwork indicates the subnetwork is now allowed.
|
||||
ErrInvalidSubnetwork
|
||||
|
||||
// ErrFinalityPointTimeTooOld indicates a block has a timestamp before the
|
||||
// last finality point.
|
||||
ErrFinalityPointTimeTooOld
|
||||
@@ -101,11 +121,6 @@ const (
|
||||
// either does not exist or has already been spent.
|
||||
ErrMissingTxOut
|
||||
|
||||
// ErrDoubleSpendInSameBlock indicates a transaction
|
||||
// that spends an output that was already spent by another
|
||||
// transaction in the same block.
|
||||
ErrDoubleSpendInSameBlock
|
||||
|
||||
// ErrUnfinalizedTx indicates a transaction has not been finalized.
|
||||
// A valid block may only contain finalized transactions.
|
||||
ErrUnfinalizedTx
|
||||
@@ -202,14 +217,6 @@ const (
|
||||
// ErrInvalidParentsRelation indicates that one of the parents of a block
|
||||
// is also an ancestor of another parent
|
||||
ErrInvalidParentsRelation
|
||||
|
||||
// ErrDelayedBlockIsNotAllowed indicates that a block with a delayed timestamp was
|
||||
// submitted with BFDisallowDelay flag raised.
|
||||
ErrDelayedBlockIsNotAllowed
|
||||
|
||||
// ErrOrphanBlockIsNotAllowed indicates that an orphan block was submitted with
|
||||
// BFDisallowOrphans flag raised.
|
||||
ErrOrphanBlockIsNotAllowed
|
||||
)
|
||||
|
||||
// Map of ErrorCode values back to their constant names for pretty printing.
|
||||
@@ -217,6 +224,7 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrDuplicateBlock: "ErrDuplicateBlock",
|
||||
ErrBlockMassTooHigh: "ErrBlockMassTooHigh",
|
||||
ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
|
||||
ErrInvalidTime: "ErrInvalidTime",
|
||||
ErrTimeTooOld: "ErrTimeTooOld",
|
||||
ErrTimeTooNew: "ErrTimeTooNew",
|
||||
ErrNoParents: "ErrNoParents",
|
||||
@@ -233,7 +241,6 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
|
||||
ErrBadTxInput: "ErrBadTxInput",
|
||||
ErrMissingTxOut: "ErrMissingTxOut",
|
||||
ErrDoubleSpendInSameBlock: "ErrDoubleSpendInSameBlock",
|
||||
ErrUnfinalizedTx: "ErrUnfinalizedTx",
|
||||
ErrDuplicateTx: "ErrDuplicateTx",
|
||||
ErrOverwriteTx: "ErrOverwriteTx",
|
||||
@@ -257,8 +264,6 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrInvalidPayload: "ErrInvalidPayload",
|
||||
ErrInvalidPayloadHash: "ErrInvalidPayloadHash",
|
||||
ErrInvalidParentsRelation: "ErrInvalidParentsRelation",
|
||||
ErrDelayedBlockIsNotAllowed: "ErrDelayedBlockIsNotAllowed",
|
||||
ErrOrphanBlockIsNotAllowed: "ErrOrphanBlockIsNotAllowed",
|
||||
}
|
||||
|
||||
// String returns the ErrorCode as a human-readable name.
|
||||
@@ -284,10 +289,7 @@ func (e RuleError) Error() string {
|
||||
return e.Description
|
||||
}
|
||||
|
||||
func ruleError(c ErrorCode, desc string) error {
|
||||
return errors.WithStack(RuleError{ErrorCode: c, Description: desc})
|
||||
// ruleError creates an RuleError given a set of arguments.
|
||||
func ruleError(c ErrorCode, desc string) RuleError {
|
||||
return RuleError{ErrorCode: c, Description: desc}
|
||||
}
|
||||
|
||||
// ErrInvalidParameter signifies that an invalid parameter has been
|
||||
// supplied to one of the BlockDAG functions.
|
||||
var ErrInvalidParameter = errors.New("invalid parameter")
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -17,6 +18,7 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrDuplicateBlock, "ErrDuplicateBlock"},
|
||||
{ErrBlockMassTooHigh, "ErrBlockMassTooHigh"},
|
||||
{ErrBlockVersionTooOld, "ErrBlockVersionTooOld"},
|
||||
{ErrInvalidTime, "ErrInvalidTime"},
|
||||
{ErrTimeTooOld, "ErrTimeTooOld"},
|
||||
{ErrTimeTooNew, "ErrTimeTooNew"},
|
||||
{ErrNoParents, "ErrNoParents"},
|
||||
@@ -56,8 +58,6 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrInvalidPayload, "ErrInvalidPayload"},
|
||||
{ErrInvalidPayloadHash, "ErrInvalidPayloadHash"},
|
||||
{ErrInvalidParentsRelation, "ErrInvalidParentsRelation"},
|
||||
{ErrDelayedBlockIsNotAllowed, "ErrDelayedBlockIsNotAllowed"},
|
||||
{ErrOrphanBlockIsNotAllowed, "ErrOrphanBlockIsNotAllowed"},
|
||||
{0xffff, "Unknown ErrorCode (65535)"},
|
||||
}
|
||||
|
||||
@@ -98,3 +98,46 @@ func TestRuleError(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeploymentError tests the stringized output for the DeploymentError type.
|
||||
func TestDeploymentError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
in DeploymentError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
DeploymentError(0),
|
||||
"deployment ID 0 does not exist",
|
||||
},
|
||||
{
|
||||
DeploymentError(10),
|
||||
"deployment ID 10 does not exist",
|
||||
},
|
||||
{
|
||||
DeploymentError(123),
|
||||
"deployment ID 123 does not exist",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
result := test.in.Error()
|
||||
if result != test.want {
|
||||
t.Errorf("Error #%d\n got: %s want: %s", i, result,
|
||||
test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssertError(t *testing.T) {
|
||||
message := "abc 123"
|
||||
err := AssertError(message)
|
||||
expectedMessage := fmt.Sprintf("assertion failed: %s", message)
|
||||
if expectedMessage != err.Error() {
|
||||
t.Errorf("Unexpected AssertError message. "+
|
||||
"Got: %s, want: %s", err.Error(), expectedMessage)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,9 @@ package blockdag_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
@@ -15,10 +13,10 @@ import (
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/mining"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// TestFinality checks that the finality mechanism works as expected.
|
||||
@@ -41,8 +39,8 @@ import (
|
||||
func TestFinality(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.FinalityDuration = 100 * params.TargetTimePerBlock
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", true, blockdag.Config{
|
||||
params.FinalityInterval = 100
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -50,7 +48,7 @@ func TestFinality(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, parentHashes, nil, false)
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -75,7 +73,7 @@ func TestFinality(t *testing.T) {
|
||||
currentNode := genesis
|
||||
|
||||
// First we build a chain of params.FinalityInterval blocks for future use
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
for i := uint64(0); i < params.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -87,7 +85,7 @@ func TestFinality(t *testing.T) {
|
||||
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
|
||||
// we expect the block with height 1 * params.FinalityInterval to be the last finality point
|
||||
currentNode = genesis
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
for i := uint64(0); i < params.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -96,7 +94,7 @@ func TestFinality(t *testing.T) {
|
||||
|
||||
expectedFinalityPoint := currentNode
|
||||
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
for i := uint64(0); i < params.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -125,7 +123,7 @@ func TestFinality(t *testing.T) {
|
||||
t.Errorf("NextBlockCoinbaseTransaction: %s", err)
|
||||
}
|
||||
merkleRoot := blockdag.BuildHashMerkleTreeStore([]*util.Tx{fakeCoinbaseTx}).Root()
|
||||
beforeFinalityBlock := domainmessage.NewMsgBlock(&domainmessage.BlockHeader{
|
||||
beforeFinalityBlock := wire.NewMsgBlock(&wire.BlockHeader{
|
||||
Version: 0x10000000,
|
||||
ParentHashes: []*daghash.Hash{genesis.Hash()},
|
||||
HashMerkleRoot: merkleRoot,
|
||||
@@ -139,10 +137,10 @@ func TestFinality(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
var ruleErr blockdag.RuleError
|
||||
if errors.As(err, &ruleErr) {
|
||||
if ruleErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
|
||||
rErr, ok := err.(blockdag.RuleError)
|
||||
if ok {
|
||||
if rErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", err)
|
||||
@@ -154,17 +152,18 @@ func TestFinality(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
if errors.As(err, &ruleErr) {
|
||||
if ruleErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
|
||||
rErr, ok = err.(blockdag.RuleError)
|
||||
if ok {
|
||||
if rErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", ruleErr)
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFinalityInterval tests that the finality interval is
|
||||
// smaller then domainmessage.MaxInvPerMsg, so when a peer receives
|
||||
// smaller then wire.MaxInvPerMsg, so when a peer receives
|
||||
// a getblocks message it should always be able to send
|
||||
// all the necessary invs.
|
||||
func TestFinalityInterval(t *testing.T) {
|
||||
@@ -176,19 +175,9 @@ func TestFinalityInterval(t *testing.T) {
|
||||
&dagconfig.SimnetParams,
|
||||
}
|
||||
for _, params := range netParams {
|
||||
func() {
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinalityInterval", true, blockdag.Config{
|
||||
DAGParams: params,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance for %s: %v", params.Name, err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
if dag.FinalityInterval() > domainmessage.MaxInvPerMsg {
|
||||
t.Errorf("FinalityInterval in %s should be lower or equal to domainmessage.MaxInvPerMsg", params.Name)
|
||||
}
|
||||
}()
|
||||
if params.FinalityInterval > wire.MaxInvPerMsg {
|
||||
t.Errorf("FinalityInterval in %s should be lower or equal to wire.MaxInvPerMsg", params.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -197,8 +186,7 @@ func TestSubnetworkRegistry(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
params.EnableNonNativeSubnetworks = true
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -211,7 +199,7 @@ func TestSubnetworkRegistry(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
limit, err := dag.GasLimit(subnetworkID)
|
||||
limit, err := dag.SubnetworkStore.GasLimit(subnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("could not retrieve gas limit: %s", err)
|
||||
}
|
||||
@@ -224,7 +212,7 @@ func TestChainedTransactions(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", true, blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -232,7 +220,7 @@ func TestChainedTransactions(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block1, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{params.GenesisHash}, nil, false)
|
||||
block1, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{params.GenesisHash}, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -253,59 +241,48 @@ func TestChainedTransactions(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: domainmessage.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &domainmessage.TxOut{
|
||||
txOut := &wire.TxOut{
|
||||
ScriptPubKey: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
tx := domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{txIn}, []*domainmessage.TxOut{txOut})
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
|
||||
chainedTxIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: domainmessage.Outpoint{TxID: *tx.TxID(), Index: 0},
|
||||
chainedTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *tx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
scriptPubKey, err := txscript.PayToScriptHashScript(blockdag.OpTrueScript)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build public key script: %s", err)
|
||||
}
|
||||
chainedTxOut := &domainmessage.TxOut{
|
||||
chainedTxOut := &wire.TxOut{
|
||||
ScriptPubKey: scriptPubKey,
|
||||
Value: uint64(1),
|
||||
}
|
||||
chainedTx := domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{chainedTxIn}, []*domainmessage.TxOut{chainedTxOut})
|
||||
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
|
||||
|
||||
block2, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*domainmessage.MsgTx{tx}, false)
|
||||
block2, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Manually add a chained transaction to block2
|
||||
block2.Transactions = append(block2.Transactions, chainedTx)
|
||||
block2UtilTxs := make([]*util.Tx, len(block2.Transactions))
|
||||
for i, tx := range block2.Transactions {
|
||||
block2UtilTxs[i] = util.NewTx(tx)
|
||||
}
|
||||
block2.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block2UtilTxs).Root()
|
||||
|
||||
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Errorf("ProcessBlock expected an error")
|
||||
} else {
|
||||
var ruleErr blockdag.RuleError
|
||||
if ok := errors.As(err, &ruleErr); ok {
|
||||
if ruleErr.ErrorCode != blockdag.ErrMissingTxOut {
|
||||
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, ruleErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
|
||||
} else if rErr, ok := err.(blockdag.RuleError); ok {
|
||||
if rErr.ErrorCode != blockdag.ErrMissingTxOut {
|
||||
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block2 " +
|
||||
@@ -315,18 +292,18 @@ func TestChainedTransactions(t *testing.T) {
|
||||
t.Errorf("ProcessBlock: block2 got unexpectedly orphaned")
|
||||
}
|
||||
|
||||
nonChainedTxIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: domainmessage.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
nonChainedTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
nonChainedTxOut := &domainmessage.TxOut{
|
||||
nonChainedTxOut := &wire.TxOut{
|
||||
ScriptPubKey: scriptPubKey,
|
||||
Value: uint64(1),
|
||||
}
|
||||
nonChainedTx := domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{nonChainedTxIn}, []*domainmessage.TxOut{nonChainedTxOut})
|
||||
nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut})
|
||||
|
||||
block3, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*domainmessage.MsgTx{nonChainedTx}, false)
|
||||
block3, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -352,7 +329,7 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = math.MaxUint8
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", true, blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -363,27 +340,27 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
|
||||
|
||||
createBlock := func(previousBlock *util.Block) *util.Block {
|
||||
// Prepare a transaction that spends the previous block's coinbase transaction
|
||||
var txs []*domainmessage.MsgTx
|
||||
var txs []*wire.MsgTx
|
||||
if !previousBlock.IsGenesis() {
|
||||
previousCoinbaseTx := previousBlock.MsgBlock().Transactions[0]
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: domainmessage.Outpoint{TxID: *previousCoinbaseTx.TxID(), Index: 0},
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *previousCoinbaseTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &domainmessage.TxOut{
|
||||
txOut := &wire.TxOut{
|
||||
ScriptPubKey: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
txs = append(txs, domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{txIn}, []*domainmessage.TxOut{txOut}))
|
||||
txs = append(txs, wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}))
|
||||
}
|
||||
|
||||
// Create the block
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{previousBlock.Hash()}, txs, false)
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{previousBlock.Hash()}, txs, false)
|
||||
if err != nil {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %s", err)
|
||||
}
|
||||
@@ -422,8 +399,7 @@ func TestGasLimit(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
params.EnableNonNativeSubnetworks = true
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -438,9 +414,9 @@ func TestGasLimit(t *testing.T) {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
|
||||
cbTxs := []*domainmessage.MsgTx{}
|
||||
cbTxs := []*wire.MsgTx{}
|
||||
for i := 0; i < 4; i++ {
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), nil, false)
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -469,30 +445,30 @@ func TestGasLimit(t *testing.T) {
|
||||
t.Fatalf("Failed to build public key script: %s", err)
|
||||
}
|
||||
|
||||
tx1In := &domainmessage.TxIn{
|
||||
PreviousOutpoint: *domainmessage.NewOutpoint(cbTxs[0].TxID(), 0),
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
tx1In := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[0].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
tx1Out := &domainmessage.TxOut{
|
||||
tx1Out := &wire.TxOut{
|
||||
Value: cbTxs[0].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
tx1 := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{tx1In}, []*domainmessage.TxOut{tx1Out}, subnetworkID, 10000, []byte{})
|
||||
tx1 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx1In}, []*wire.TxOut{tx1Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
tx2In := &domainmessage.TxIn{
|
||||
PreviousOutpoint: *domainmessage.NewOutpoint(cbTxs[1].TxID(), 0),
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
tx2In := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[1].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
tx2Out := &domainmessage.TxOut{
|
||||
tx2Out := &wire.TxOut{
|
||||
Value: cbTxs[1].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
tx2 := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{tx2In}, []*domainmessage.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
|
||||
tx2 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx2In}, []*wire.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that has transactions that exceed the gas limit
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*domainmessage.MsgTx{tx1, tx2}, true)
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -500,11 +476,11 @@ func TestGasLimit(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error in block that exceeds gas limit")
|
||||
}
|
||||
var ruleErr blockdag.RuleError
|
||||
if !errors.As(err, &ruleErr) {
|
||||
rErr, ok := err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
|
||||
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
|
||||
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock " +
|
||||
@@ -514,20 +490,20 @@ func TestGasLimit(t *testing.T) {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
overflowGasTxIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: *domainmessage.NewOutpoint(cbTxs[2].TxID(), 0),
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
overflowGasTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[2].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
overflowGasTxOut := &domainmessage.TxOut{
|
||||
overflowGasTxOut := &wire.TxOut{
|
||||
Value: cbTxs[2].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
overflowGasTx := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{overflowGasTxIn}, []*domainmessage.TxOut{overflowGasTxOut},
|
||||
overflowGasTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{overflowGasTxIn}, []*wire.TxOut{overflowGasTxOut},
|
||||
subnetworkID, math.MaxUint64, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that its transactions' gas overflows uint64
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*domainmessage.MsgTx{tx1, overflowGasTx}, true)
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -535,33 +511,30 @@ func TestGasLimit(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error")
|
||||
}
|
||||
if !errors.As(err, &ruleErr) {
|
||||
rErr, ok = err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
|
||||
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
|
||||
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: overflowGasBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
|
||||
nonExistentSubnetwork := &subnetworkid.SubnetworkID{123}
|
||||
nonExistentSubnetworkTxIn := &domainmessage.TxIn{
|
||||
PreviousOutpoint: *domainmessage.NewOutpoint(cbTxs[3].TxID(), 0),
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
nonExistentSubnetworkTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[3].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
nonExistentSubnetworkTxOut := &domainmessage.TxOut{
|
||||
nonExistentSubnetworkTxOut := &wire.TxOut{
|
||||
Value: cbTxs[3].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
nonExistentSubnetworkTx := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{nonExistentSubnetworkTxIn},
|
||||
[]*domainmessage.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
|
||||
nonExistentSubnetworkTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{nonExistentSubnetworkTxIn},
|
||||
[]*wire.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
|
||||
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*domainmessage.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -570,19 +543,12 @@ func TestGasLimit(t *testing.T) {
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
|
||||
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
|
||||
nonExistentSubnetwork, nonExistentSubnetwork)
|
||||
if strings.Contains(err.Error(), expectedErrStr) {
|
||||
if err.Error() != expectedErrStr {
|
||||
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*domainmessage.MsgTx{tx1}, true)
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
// For further details see the article https://eprint.iacr.org/2018/104.pdf
|
||||
func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blockNode, err error) {
|
||||
newNode.selectedParent = newNode.parents.bluest()
|
||||
newNode.bluesAnticoneSizes[newNode.selectedParent] = 0
|
||||
newNode.bluesAnticoneSizes[*newNode.selectedParent.hash] = 0
|
||||
newNode.blues = []*blockNode{newNode.selectedParent}
|
||||
selectedParentAnticone, err = dag.selectedParentAnticone(newNode)
|
||||
if err != nil {
|
||||
@@ -57,7 +57,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
// newNode is always in the future of blueCandidate, so there's
|
||||
// no point in checking it.
|
||||
if chainBlock != newNode {
|
||||
if isAncestorOfBlueCandidate, err := dag.isInPast(chainBlock, blueCandidate); err != nil {
|
||||
if isAncestorOfBlueCandidate, err := dag.isAncestorOf(chainBlock, blueCandidate); err != nil {
|
||||
return nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
break
|
||||
@@ -66,7 +66,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
|
||||
for _, block := range chainBlock.blues {
|
||||
// Skip blocks that exist in the past of blueCandidate.
|
||||
if isAncestorOfBlueCandidate, err := dag.isInPast(block, blueCandidate); err != nil {
|
||||
if isAncestorOfBlueCandidate, err := dag.isAncestorOf(block, blueCandidate); err != nil {
|
||||
return nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
continue
|
||||
@@ -78,13 +78,13 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
}
|
||||
candidateAnticoneSize++
|
||||
|
||||
if candidateAnticoneSize > dag.Params.K {
|
||||
if candidateAnticoneSize > dag.dagParams.K {
|
||||
// k-cluster violation: The candidate's blue anticone exceeded k
|
||||
possiblyBlue = false
|
||||
break
|
||||
}
|
||||
|
||||
if candidateBluesAnticoneSizes[block] == dag.Params.K {
|
||||
if candidateBluesAnticoneSizes[block] == dag.dagParams.K {
|
||||
// k-cluster violation: A block in candidate's blue anticone already
|
||||
// has k blue blocks in its own anticone
|
||||
possiblyBlue = false
|
||||
@@ -93,7 +93,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
|
||||
// This is a sanity check that validates that a blue
|
||||
// block's blue anticone is not already larger than K.
|
||||
if candidateBluesAnticoneSizes[block] > dag.Params.K {
|
||||
if candidateBluesAnticoneSizes[block] > dag.dagParams.K {
|
||||
return nil, errors.New("found blue anticone size larger than k")
|
||||
}
|
||||
}
|
||||
@@ -102,14 +102,14 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
if possiblyBlue {
|
||||
// No k-cluster violation found, we can now set the candidate block as blue
|
||||
newNode.blues = append(newNode.blues, blueCandidate)
|
||||
newNode.bluesAnticoneSizes[blueCandidate] = candidateAnticoneSize
|
||||
newNode.bluesAnticoneSizes[*blueCandidate.hash] = candidateAnticoneSize
|
||||
for blue, blueAnticoneSize := range candidateBluesAnticoneSizes {
|
||||
newNode.bluesAnticoneSizes[blue] = blueAnticoneSize + 1
|
||||
newNode.bluesAnticoneSizes[*blue.hash] = blueAnticoneSize + 1
|
||||
}
|
||||
|
||||
// The maximum length of node.blues can be K+1 because
|
||||
// it contains the selected parent.
|
||||
if dagconfig.KType(len(newNode.blues)) == dag.Params.K+1 {
|
||||
if dagconfig.KType(len(newNode.blues)) == dag.dagParams.K+1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -126,9 +126,9 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
// we check whether it is in the past of the selected parent.
|
||||
// If not, we add the node to the resulting anticone-set and queue it for processing.
|
||||
func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, error) {
|
||||
anticoneSet := newBlockSet()
|
||||
anticoneSet := newSet()
|
||||
var anticoneSlice []*blockNode
|
||||
selectedParentPast := newBlockSet()
|
||||
selectedParentPast := newSet()
|
||||
var queue []*blockNode
|
||||
// Queueing all parents (other than the selected parent itself) for processing.
|
||||
for parent := range node.parents {
|
||||
@@ -148,7 +148,7 @@ func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, erro
|
||||
if anticoneSet.contains(parent) || selectedParentPast.contains(parent) {
|
||||
continue
|
||||
}
|
||||
isAncestorOfSelectedParent, err := dag.isInPast(parent, node.selectedParent)
|
||||
isAncestorOfSelectedParent, err := dag.isAncestorOf(parent, node.selectedParent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -168,7 +168,7 @@ func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, erro
|
||||
// Expects 'block' to be in the blue set of 'context'
|
||||
func (dag *BlockDAG) blueAnticoneSize(block, context *blockNode) (dagconfig.KType, error) {
|
||||
for current := context; current != nil; current = current.selectedParent {
|
||||
if blueAnticoneSize, ok := current.bluesAnticoneSizes[block]; ok {
|
||||
if blueAnticoneSize, ok := current.bluesAnticoneSizes[*block.hash]; ok {
|
||||
return blueAnticoneSize, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,15 +2,11 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
type testBlockData struct {
|
||||
@@ -34,7 +30,7 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
k: 3,
|
||||
expectedReds: []string{"F", "G", "H", "I", "N", "P"},
|
||||
expectedReds: []string{"F", "G", "H", "I", "O", "P"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
@@ -167,7 +163,7 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
id: "T",
|
||||
expectedScore: 13,
|
||||
expectedSelectedParent: "S",
|
||||
expectedBlues: []string{"S", "O", "Q"},
|
||||
expectedBlues: []string{"S", "N", "Q"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -177,7 +173,7 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
func() {
|
||||
resetExtraNonceForTest()
|
||||
dagParams.K = test.k
|
||||
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), true, Config{
|
||||
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), Config{
|
||||
DAGParams: &dagParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -216,10 +212,7 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
t.Fatalf("TestGHOSTDAG: block %v was unexpectedly orphan", blockData.id)
|
||||
}
|
||||
|
||||
node, ok := dag.index.LookupNode(utilBlock.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", utilBlock.Hash())
|
||||
}
|
||||
node := dag.index.LookupNode(utilBlock.Hash())
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
@@ -283,104 +276,3 @@ func checkReds(expectedReds []string, reds map[string]bool) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestBlueAnticoneSizeErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestBlueAnticoneSizeErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Prepare a block chain with size K beginning with the genesis block
|
||||
currentBlockA := dag.Params.GenesisBlock
|
||||
for i := dagconfig.KType(0); i < dag.Params.K; i++ {
|
||||
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockA)
|
||||
currentBlockA = newBlock
|
||||
}
|
||||
|
||||
// Prepare another block chain with size K beginning with the genesis block
|
||||
currentBlockB := dag.Params.GenesisBlock
|
||||
for i := dagconfig.KType(0); i < dag.Params.K; i++ {
|
||||
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockB)
|
||||
currentBlockB = newBlock
|
||||
}
|
||||
|
||||
// Get references to the tips of the two chains
|
||||
blockNodeA, ok := dag.index.LookupNode(currentBlockA.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", currentBlockA.BlockHash())
|
||||
}
|
||||
|
||||
blockNodeB, ok := dag.index.LookupNode(currentBlockB.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", currentBlockB.BlockHash())
|
||||
}
|
||||
|
||||
// Try getting the blueAnticoneSize between them. Since the two
|
||||
// blocks are not in the anticones of eachother, this should fail.
|
||||
_, err = dag.blueAnticoneSize(blockNodeA, blockNodeB)
|
||||
if err == nil {
|
||||
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize unexpectedly succeeded")
|
||||
}
|
||||
expectedErrSubstring := "is not in blue set of"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize returned wrong error. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGHOSTDAGErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAGErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Add two child blocks to the genesis
|
||||
block1 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
|
||||
block2 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
|
||||
|
||||
// Add a child block to the previous two blocks
|
||||
block3 := prepareAndProcessBlockByParentMsgBlocks(t, dag, block1, block2)
|
||||
|
||||
// Clear the reachability store
|
||||
dag.reachabilityTree.store.loaded = map[daghash.Hash]*reachabilityData{}
|
||||
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("NewTx: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
err = dbaccess.ClearReachabilityData(dbTx)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearReachabilityData: %s", err)
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Commit: %s", err)
|
||||
}
|
||||
|
||||
// Try to rerun GHOSTDAG on the last block. GHOSTDAG uses
|
||||
// reachability data, so we expect it to fail.
|
||||
blockNode3, ok := dag.index.LookupNode(block3.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", block3.BlockHash())
|
||||
}
|
||||
_, err = dag.ghostdag(blockNode3)
|
||||
if err == nil {
|
||||
t.Fatalf("TestGHOSTDAGErrors: ghostdag unexpectedly succeeded")
|
||||
}
|
||||
expectedErrSubstring := "couldn't find reachability data"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestGHOSTDAGErrors: ghostdag returned wrong error. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
indexers
|
||||
========
|
||||
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad/blockdag/indexers)
|
||||
|
||||
Package indexers implements optional block chain indexes.
|
||||
|
||||
@@ -3,20 +3,31 @@ package indexers
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// acceptanceIndexName is the human-readable name for the index.
|
||||
acceptanceIndexName = "acceptance index"
|
||||
)
|
||||
|
||||
var (
|
||||
// acceptanceIndexKey is the key of the acceptance index and the db bucket used
|
||||
// to house it.
|
||||
acceptanceIndexKey = []byte("acceptanceidx")
|
||||
)
|
||||
|
||||
// AcceptanceIndex implements a txAcceptanceData by block hash index. That is to say,
|
||||
// it stores a mapping between a block's hash and the set of transactions that the
|
||||
// block accepts among its blue blocks.
|
||||
type AcceptanceIndex struct {
|
||||
dag *blockdag.BlockDAG
|
||||
databaseContext *dbaccess.DatabaseContext
|
||||
db database.DB
|
||||
dag *blockdag.BlockDAG
|
||||
}
|
||||
|
||||
// Ensure the AcceptanceIndex type implements the Indexer interface.
|
||||
@@ -32,88 +43,127 @@ func NewAcceptanceIndex() *AcceptanceIndex {
|
||||
return &AcceptanceIndex{}
|
||||
}
|
||||
|
||||
// DropAcceptanceIndex drops the acceptance index.
|
||||
func DropAcceptanceIndex(databaseContext *dbaccess.DatabaseContext) error {
|
||||
dbTx, err := databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
// DropAcceptanceIndex drops the acceptance index from the provided database if it
|
||||
// exists.
|
||||
func DropAcceptanceIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, acceptanceIndexKey, acceptanceIndexName, interrupt)
|
||||
}
|
||||
|
||||
err = dbaccess.DropAcceptanceIndex(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Key() []byte {
|
||||
return acceptanceIndexKey
|
||||
}
|
||||
|
||||
return dbTx.Commit()
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Name() string {
|
||||
return acceptanceIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the bucket for the
|
||||
// acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Create(dbTx database.Tx) error {
|
||||
_, err := dbTx.Metadata().CreateBucket(acceptanceIndexKey)
|
||||
return err
|
||||
}
|
||||
|
||||
// Init initializes the hash-based acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Init(dag *blockdag.BlockDAG, databaseContext *dbaccess.DatabaseContext) error {
|
||||
func (idx *AcceptanceIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
idx.dag = dag
|
||||
idx.databaseContext = databaseContext
|
||||
return idx.recover()
|
||||
}
|
||||
|
||||
// recover attempts to insert any data that's missing from the
|
||||
// acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) recover() error {
|
||||
return idx.dag.ForEachHash(func(hash daghash.Hash) error {
|
||||
dbTx, err := idx.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
exists, err := dbaccess.HasAcceptanceData(dbTx, &hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(&hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = idx.ConnectBlock(dbTx, &hash, txAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbTx.Commit()
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the DAG.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbaccess.StoreAcceptanceData(dbContext, blockHash, serializedTxsAcceptanceData)
|
||||
func (idx *AcceptanceIndex) ConnectBlock(dbTx database.Tx, _ *util.Block, blockID uint64, _ *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
return dbPutTxsAcceptanceData(dbTx, blockID, txsAcceptanceData)
|
||||
}
|
||||
|
||||
// TxsAcceptanceData returns the acceptance data of all the transactions that
|
||||
// were accepted by the block with hash blockHash.
|
||||
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
serializedTxsAcceptanceData, err := dbaccess.FetchAcceptanceData(idx.databaseContext, blockHash)
|
||||
var txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
txsAcceptanceData, err = dbFetchTxsAcceptanceDataByHash(dbTx, blockHash)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return txsAcceptanceData, nil
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
for blockID := currentBlockID + 1; blockID <= lastKnownBlockID; blockID++ {
|
||||
hash, err := blockdag.DBFetchBlockHashByID(dbTx, currentBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = idx.ConnectBlock(dbTx, nil, blockID, nil, txAcceptanceData, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dbPutTxsAcceptanceData(dbTx database.Tx, blockID uint64,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByHash(dbTx database.Tx,
|
||||
hash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
|
||||
blockID, err := blockdag.DBFetchBlockIDByHash(dbTx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dbFetchTxsAcceptanceDataByID(dbTx, blockID)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByID(dbTx database.Tx,
|
||||
blockID uint64) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
serializedTxsAcceptanceData := bucket.Get(serializedBlockID)
|
||||
if serializedTxsAcceptanceData == nil {
|
||||
return nil, errors.Errorf("no entry in the accpetance index for block id %d", blockID)
|
||||
}
|
||||
|
||||
return deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
type serializableTxAcceptanceData struct {
|
||||
MsgTx domainmessage.MsgTx
|
||||
MsgTx wire.MsgTx
|
||||
IsAccepted bool
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,13 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -8,32 +15,24 @@ import (
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestAcceptanceIndexSerializationAndDeserialization(t *testing.T) {
|
||||
// Create test data
|
||||
hash, _ := daghash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
txIn1 := &domainmessage.TxIn{SignatureScript: []byte{1}, PreviousOutpoint: domainmessage.Outpoint{Index: 1}, Sequence: 0}
|
||||
txIn2 := &domainmessage.TxIn{SignatureScript: []byte{2}, PreviousOutpoint: domainmessage.Outpoint{Index: 2}, Sequence: 0}
|
||||
txOut1 := &domainmessage.TxOut{ScriptPubKey: []byte{1}, Value: 10}
|
||||
txOut2 := &domainmessage.TxOut{ScriptPubKey: []byte{2}, Value: 20}
|
||||
txIn1 := &wire.TxIn{SignatureScript: []byte{1}, PreviousOutpoint: wire.Outpoint{Index: 1}, Sequence: 0}
|
||||
txIn2 := &wire.TxIn{SignatureScript: []byte{2}, PreviousOutpoint: wire.Outpoint{Index: 2}, Sequence: 0}
|
||||
txOut1 := &wire.TxOut{ScriptPubKey: []byte{1}, Value: 10}
|
||||
txOut2 := &wire.TxOut{ScriptPubKey: []byte{2}, Value: 20}
|
||||
blockTxsAcceptanceData := blockdag.BlockTxsAcceptanceData{
|
||||
BlockHash: *hash,
|
||||
TxAcceptanceData: []blockdag.TxAcceptanceData{
|
||||
{
|
||||
Tx: util.NewTx(domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{txIn1}, []*domainmessage.TxOut{txOut1})),
|
||||
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn1}, []*wire.TxOut{txOut1})),
|
||||
IsAccepted: true,
|
||||
},
|
||||
{
|
||||
Tx: util.NewTx(domainmessage.NewNativeMsgTx(domainmessage.TxVersion, []*domainmessage.TxIn{txIn2}, []*domainmessage.TxOut{txOut2})),
|
||||
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn2}, []*wire.TxOut{txOut2})),
|
||||
IsAccepted: false,
|
||||
},
|
||||
},
|
||||
@@ -97,18 +96,18 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(db1Path)
|
||||
|
||||
databaseContext1, err := dbaccess.New(db1Path)
|
||||
db1, err := database.Create("ffldb", db1Path, params.Net)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
db1Config := blockdag.Config{
|
||||
IndexManager: db1IndexManager,
|
||||
DAGParams: params,
|
||||
DatabaseContext: databaseContext1,
|
||||
IndexManager: db1IndexManager,
|
||||
DAGParams: params,
|
||||
DB: db1,
|
||||
}
|
||||
|
||||
db1DAG, teardown, err := blockdag.DAGSetup("", false, db1Config)
|
||||
db1DAG, teardown, err := blockdag.DAGSetup("", db1Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
@@ -131,6 +130,11 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
err = db1.FlushCache()
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing database to disk: %s", err)
|
||||
}
|
||||
|
||||
db2Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover2")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
@@ -162,21 +166,17 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
t.Fatalf("Error fetching acceptance data: %s", err)
|
||||
}
|
||||
|
||||
err = databaseContext1.Close()
|
||||
db2, err := database.Open("ffldb", db2Path, params.Net)
|
||||
if err != nil {
|
||||
t.Fatalf("Error closing the database: %s", err)
|
||||
}
|
||||
databaseContext2, err := dbaccess.New(db2Path)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
t.Fatalf("Error opening database: %s", err)
|
||||
}
|
||||
|
||||
db2Config := blockdag.Config{
|
||||
DAGParams: params,
|
||||
DatabaseContext: databaseContext2,
|
||||
DAGParams: params,
|
||||
DB: db2,
|
||||
}
|
||||
|
||||
db2DAG, teardown, err := blockdag.DAGSetup("", false, db2Config)
|
||||
db2DAG, teardown, err := blockdag.DAGSetup("", db2Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
@@ -199,6 +199,10 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
err = db2.FlushCache()
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing database to disk: %s", err)
|
||||
}
|
||||
db3Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover3")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
@@ -209,24 +213,20 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
t.Fatalf("copyDirectory: %s", err)
|
||||
}
|
||||
|
||||
err = databaseContext2.Close()
|
||||
db3, err := database.Open("ffldb", db3Path, params.Net)
|
||||
if err != nil {
|
||||
t.Fatalf("Error closing the database: %s", err)
|
||||
}
|
||||
databaseContext3, err := dbaccess.New(db3Path)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
t.Fatalf("Error opening database: %s", err)
|
||||
}
|
||||
|
||||
db3AcceptanceIndex := NewAcceptanceIndex()
|
||||
db3IndexManager := NewManager([]Indexer{db3AcceptanceIndex})
|
||||
db3Config := blockdag.Config{
|
||||
IndexManager: db3IndexManager,
|
||||
DAGParams: params,
|
||||
DatabaseContext: databaseContext3,
|
||||
IndexManager: db3IndexManager,
|
||||
DAGParams: params,
|
||||
DB: db3,
|
||||
}
|
||||
|
||||
_, teardown, err = blockdag.DAGSetup("", false, db3Config)
|
||||
_, teardown, err = blockdag.DAGSetup("", db3Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
@@ -298,16 +298,16 @@ func copyDirectory(scrDir, dest string) error {
|
||||
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
|
||||
func copyFile(srcFile, dstFile string) error {
|
||||
out, err := os.Create(dstFile)
|
||||
defer out.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
in, err := os.Open(srcFile)
|
||||
defer in.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
|
||||
902
blockdag/indexers/addrindex.go
Normal file
902
blockdag/indexers/addrindex.go
Normal file
@@ -0,0 +1,902 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// addrIndexName is the human-readable name for the index.
|
||||
addrIndexName = "address index"
|
||||
|
||||
// level0MaxEntries is the maximum number of transactions that are
|
||||
// stored in level 0 of an address index entry. Subsequent levels store
|
||||
// 2^n * level0MaxEntries entries, or in words, double the maximum of
|
||||
// the previous level.
|
||||
level0MaxEntries = 8
|
||||
|
||||
// addrKeySize is the number of bytes an address key consumes in the
|
||||
// index. It consists of 1 byte address type + 20 bytes hash160.
|
||||
addrKeySize = 1 + 20
|
||||
|
||||
// levelKeySize is the number of bytes a level key in the address index
|
||||
// consumes. It consists of the address key + 1 byte for the level.
|
||||
levelKeySize = addrKeySize + 1
|
||||
|
||||
// levelOffset is the offset in the level key which identifes the level.
|
||||
levelOffset = levelKeySize - 1
|
||||
|
||||
// addrKeyTypePubKeyHash is the address type in an address key which
|
||||
// represents both a pay-to-pubkey-hash and a pay-to-pubkey address.
|
||||
// This is done because both are identical for the purposes of the
|
||||
// address index.
|
||||
addrKeyTypePubKeyHash = 0
|
||||
|
||||
// addrKeyTypeScriptHash is the address type in an address key which
|
||||
// represents a pay-to-script-hash address. This is necessary because
|
||||
// the hash of a pubkey address might be the same as that of a script
|
||||
// hash.
|
||||
addrKeyTypeScriptHash = 1
|
||||
|
||||
// Size of a transaction entry. It consists of 8 bytes block id + 4
|
||||
// bytes offset + 4 bytes length.
|
||||
txEntrySize = 8 + 4 + 4
|
||||
)
|
||||
|
||||
var (
|
||||
// addrIndexKey is the key of the address index and the db bucket used
|
||||
// to house it.
|
||||
addrIndexKey = []byte("txbyaddridx")
|
||||
|
||||
// errUnsupportedAddressType is an error that is used to signal an
|
||||
// unsupported address type has been used.
|
||||
errUnsupportedAddressType = errors.New("address type is not supported " +
|
||||
"by the address index")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The address index maps addresses referenced in the blockDAG to a list of
|
||||
// all the transactions involving that address. Transactions are stored
|
||||
// according to their order of appearance in the blockDAG. That is to say
|
||||
// first by block height and then by offset inside the block. It is also
|
||||
// important to note that this implementation requires the transaction index
|
||||
// since it is needed in order to catch up old blocks due to the fact the spent
|
||||
// outputs will already be pruned from the utxo set.
|
||||
//
|
||||
// The approach used to store the index is similar to a log-structured merge
|
||||
// tree (LSM tree) and is thus similar to how leveldb works internally.
|
||||
//
|
||||
// Every address consists of one or more entries identified by a level starting
|
||||
// from 0 where each level holds a maximum number of entries such that each
|
||||
// subsequent level holds double the maximum of the previous one. In equation
|
||||
// form, the number of entries each level holds is 2^n * firstLevelMaxSize.
|
||||
//
|
||||
// New transactions are appended to level 0 until it becomes full at which point
|
||||
// the entire level 0 entry is appended to the level 1 entry and level 0 is
|
||||
// cleared. This process continues until level 1 becomes full at which point it
|
||||
// will be appended to level 2 and cleared and so on.
|
||||
//
|
||||
// The result of this is the lower levels contain newer transactions and the
|
||||
// transactions within each level are ordered from oldest to newest.
|
||||
//
|
||||
// The intent of this approach is to provide a balance between space efficiency
|
||||
// and indexing cost. Storing one entry per transaction would have the lowest
|
||||
// indexing cost, but would waste a lot of space because the same address hash
|
||||
// would be duplicated for every transaction key. On the other hand, storing a
|
||||
// single entry with all transactions would be the most space efficient, but
|
||||
// would cause indexing cost to grow quadratically with the number of
|
||||
// transactions involving the same address. The approach used here provides
|
||||
// logarithmic insertion and retrieval.
|
||||
//
|
||||
// The serialized key format is:
|
||||
//
|
||||
// <addr type><addr hash><level>
|
||||
//
|
||||
// Field Type Size
|
||||
// addr type uint8 1 byte
|
||||
// addr hash hash160 20 bytes
|
||||
// level uint8 1 byte
|
||||
// -----
|
||||
// Total: 22 bytes
|
||||
//
|
||||
// The serialized value format is:
|
||||
//
|
||||
// [<block id><start offset><tx length>,...]
|
||||
//
|
||||
// Field Type Size
|
||||
// block id uint64 8 bytes
|
||||
// start offset uint32 4 bytes
|
||||
// tx length uint32 4 bytes
|
||||
// -----
|
||||
// Total: 16 bytes per indexed tx
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// fetchBlockHashFunc defines a callback function to use in order to convert a
|
||||
// serialized block ID to an associated block hash.
|
||||
type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error)
|
||||
|
||||
// serializeAddrIndexEntry serializes the provided block id and transaction
|
||||
// location according to the format described in detail above.
|
||||
func serializeAddrIndexEntry(blockID uint64, txLoc wire.TxLoc) []byte {
|
||||
// Serialize the entry.
|
||||
serialized := make([]byte, 16)
|
||||
byteOrder.PutUint64(serialized, blockID)
|
||||
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxStart))
|
||||
byteOrder.PutUint32(serialized[12:], uint32(txLoc.TxLen))
|
||||
return serialized
|
||||
}
|
||||
|
||||
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the
|
||||
// provided region struct according to the format described in detail above and
|
||||
// uses the passed block hash fetching function in order to conver the block ID
|
||||
// to the associated block hash.
|
||||
func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion, fetchBlockHash fetchBlockHashFunc) error {
|
||||
// Ensure there are enough bytes to decode.
|
||||
if len(serialized) < txEntrySize {
|
||||
return errDeserialize("unexpected end of data")
|
||||
}
|
||||
|
||||
hash, err := fetchBlockHash(serialized[0:8])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
region.Hash = hash
|
||||
region.Offset = byteOrder.Uint32(serialized[8:12])
|
||||
region.Len = byteOrder.Uint32(serialized[12:16])
|
||||
return nil
|
||||
}
|
||||
|
||||
// keyForLevel returns the key for a specific address and level in the address
|
||||
// index entry.
|
||||
func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte {
|
||||
var key [levelKeySize]byte
|
||||
copy(key[:], addrKey[:])
|
||||
key[levelOffset] = level
|
||||
return key
|
||||
}
|
||||
|
||||
// dbPutAddrIndexEntry updates the address index to include the provided entry
|
||||
// according to the level-based scheme described in detail above.
|
||||
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint64, txLoc wire.TxLoc) error {
|
||||
// Start with level 0 and its initial max number of entries.
|
||||
curLevel := uint8(0)
|
||||
maxLevelBytes := level0MaxEntries * txEntrySize
|
||||
|
||||
// Simply append the new entry to level 0 and return now when it will
|
||||
// fit. This is the most common path.
|
||||
newData := serializeAddrIndexEntry(blockID, txLoc)
|
||||
level0Key := keyForLevel(addrKey, 0)
|
||||
level0Data := bucket.Get(level0Key[:])
|
||||
if len(level0Data)+len(newData) <= maxLevelBytes {
|
||||
mergedData := newData
|
||||
if len(level0Data) > 0 {
|
||||
mergedData = make([]byte, len(level0Data)+len(newData))
|
||||
copy(mergedData, level0Data)
|
||||
copy(mergedData[len(level0Data):], newData)
|
||||
}
|
||||
return bucket.Put(level0Key[:], mergedData)
|
||||
}
|
||||
|
||||
// At this point, level 0 is full, so merge each level into higher
|
||||
// levels as many times as needed to free up level 0.
|
||||
prevLevelData := level0Data
|
||||
for {
|
||||
// Each new level holds twice as much as the previous one.
|
||||
curLevel++
|
||||
maxLevelBytes *= 2
|
||||
|
||||
// Move to the next level as long as the current level is full.
|
||||
curLevelKey := keyForLevel(addrKey, curLevel)
|
||||
curLevelData := bucket.Get(curLevelKey[:])
|
||||
if len(curLevelData) == maxLevelBytes {
|
||||
prevLevelData = curLevelData
|
||||
continue
|
||||
}
|
||||
|
||||
// The current level has room for the data in the previous one,
|
||||
// so merge the data from previous level into it.
|
||||
mergedData := prevLevelData
|
||||
if len(curLevelData) > 0 {
|
||||
mergedData = make([]byte, len(curLevelData)+
|
||||
len(prevLevelData))
|
||||
copy(mergedData, curLevelData)
|
||||
copy(mergedData[len(curLevelData):], prevLevelData)
|
||||
}
|
||||
err := bucket.Put(curLevelKey[:], mergedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Move all of the levels before the previous one up a level.
|
||||
for mergeLevel := curLevel - 1; mergeLevel > 0; mergeLevel-- {
|
||||
mergeLevelKey := keyForLevel(addrKey, mergeLevel)
|
||||
prevLevelKey := keyForLevel(addrKey, mergeLevel-1)
|
||||
prevData := bucket.Get(prevLevelKey[:])
|
||||
err := bucket.Put(mergeLevelKey[:], prevData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Finally, insert the new entry into level 0 now that it is empty.
|
||||
return bucket.Put(level0Key[:], newData)
|
||||
}
|
||||
|
||||
// dbFetchAddrIndexEntries returns block regions for transactions referenced by
|
||||
// the given address key and the number of entries skipped since it could have
|
||||
// been less in the case where there are less total entries than the requested
|
||||
// number of entries to skip.
|
||||
func dbFetchAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, numToSkip, numRequested uint32, reverse bool, fetchBlockHash fetchBlockHashFunc) ([]database.BlockRegion, uint32, error) {
|
||||
// When the reverse flag is not set, all levels need to be fetched
|
||||
// because numToSkip and numRequested are counted from the oldest
|
||||
// transactions (highest level) and thus the total count is needed.
|
||||
// However, when the reverse flag is set, only enough records to satisfy
|
||||
// the requested amount are needed.
|
||||
var level uint8
|
||||
var serialized []byte
|
||||
for !reverse || len(serialized) < int(numToSkip+numRequested)*txEntrySize {
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
levelData := bucket.Get(curLevelKey[:])
|
||||
if levelData == nil {
|
||||
// Stop when there are no more levels.
|
||||
break
|
||||
}
|
||||
|
||||
// Higher levels contain older transactions, so prepend them.
|
||||
prepended := make([]byte, len(serialized)+len(levelData))
|
||||
copy(prepended, levelData)
|
||||
copy(prepended[len(levelData):], serialized)
|
||||
serialized = prepended
|
||||
level++
|
||||
}
|
||||
|
||||
// When the requested number of entries to skip is larger than the
|
||||
// number available, skip them all and return now with the actual number
|
||||
// skipped.
|
||||
numEntries := uint32(len(serialized) / txEntrySize)
|
||||
if numToSkip >= numEntries {
|
||||
return nil, numEntries, nil
|
||||
}
|
||||
|
||||
// Nothing more to do when there are no requested entries.
|
||||
if numRequested == 0 {
|
||||
return nil, numToSkip, nil
|
||||
}
|
||||
|
||||
// Limit the number to load based on the number of available entries,
|
||||
// the number to skip, and the number requested.
|
||||
numToLoad := numEntries - numToSkip
|
||||
if numToLoad > numRequested {
|
||||
numToLoad = numRequested
|
||||
}
|
||||
|
||||
// Start the offset after all skipped entries and load the calculated
|
||||
// number.
|
||||
results := make([]database.BlockRegion, numToLoad)
|
||||
for i := uint32(0); i < numToLoad; i++ {
|
||||
// Calculate the read offset according to the reverse flag.
|
||||
var offset uint32
|
||||
if reverse {
|
||||
offset = (numEntries - numToSkip - i - 1) * txEntrySize
|
||||
} else {
|
||||
offset = (numToSkip + i) * txEntrySize
|
||||
}
|
||||
|
||||
// Deserialize and populate the result.
|
||||
err := deserializeAddrIndexEntry(serialized[offset:],
|
||||
&results[i], fetchBlockHash)
|
||||
if err != nil {
|
||||
// Ensure any deserialization errors are returned as
|
||||
// database corruption errors.
|
||||
if isDeserializeErr(err) {
|
||||
err = database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("failed to "+
|
||||
"deserialized address index "+
|
||||
"for key %x: %s", addrKey, err),
|
||||
}
|
||||
}
|
||||
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return results, numToSkip, nil
|
||||
}
|
||||
|
||||
// minEntriesToReachLevel returns the minimum number of entries that are
|
||||
// required to reach the given address index level.
|
||||
func minEntriesToReachLevel(level uint8) int {
|
||||
maxEntriesForLevel := level0MaxEntries
|
||||
minRequired := 1
|
||||
for l := uint8(1); l <= level; l++ {
|
||||
minRequired += maxEntriesForLevel
|
||||
maxEntriesForLevel *= 2
|
||||
}
|
||||
return minRequired
|
||||
}
|
||||
|
||||
// maxEntriesForLevel returns the maximum number of entries allowed for the
|
||||
// given address index level.
|
||||
func maxEntriesForLevel(level uint8) int {
|
||||
numEntries := level0MaxEntries
|
||||
for l := level; l > 0; l-- {
|
||||
numEntries *= 2
|
||||
}
|
||||
return numEntries
|
||||
}
|
||||
|
||||
// dbRemoveAddrIndexEntries removes the specified number of entries from from
|
||||
// the address index for the provided key. An assertion error will be returned
|
||||
// if the count exceeds the total number of entries in the index.
|
||||
func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error {
|
||||
// Nothing to do if no entries are being deleted.
|
||||
if count <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make use of a local map to track pending updates and define a closure
|
||||
// to apply it to the database. This is done in order to reduce the
|
||||
// number of database reads and because there is more than one exit
|
||||
// path that needs to apply the updates.
|
||||
pendingUpdates := make(map[uint8][]byte)
|
||||
applyPending := func() error {
|
||||
for level, data := range pendingUpdates {
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
if len(data) == 0 {
|
||||
err := bucket.Delete(curLevelKey[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
err := bucket.Put(curLevelKey[:], data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loop forwards through the levels while removing entries until the
|
||||
// specified number has been removed. This will potentially result in
|
||||
// entirely empty lower levels which will be backfilled below.
|
||||
var highestLoadedLevel uint8
|
||||
numRemaining := count
|
||||
for level := uint8(0); numRemaining > 0; level++ {
|
||||
// Load the data for the level from the database.
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
curLevelData := bucket.Get(curLevelKey[:])
|
||||
if len(curLevelData) == 0 && numRemaining > 0 {
|
||||
return AssertError(fmt.Sprintf("dbRemoveAddrIndexEntries "+
|
||||
"not enough entries for address key %x to "+
|
||||
"delete %d entries", addrKey, count))
|
||||
}
|
||||
pendingUpdates[level] = curLevelData
|
||||
highestLoadedLevel = level
|
||||
|
||||
// Delete the entire level as needed.
|
||||
numEntries := len(curLevelData) / txEntrySize
|
||||
if numRemaining >= numEntries {
|
||||
pendingUpdates[level] = nil
|
||||
numRemaining -= numEntries
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove remaining entries to delete from the level.
|
||||
offsetEnd := len(curLevelData) - (numRemaining * txEntrySize)
|
||||
pendingUpdates[level] = curLevelData[:offsetEnd]
|
||||
break
|
||||
}
|
||||
|
||||
// When all elements in level 0 were not removed there is nothing left
|
||||
// to do other than updating the database.
|
||||
if len(pendingUpdates[0]) != 0 {
|
||||
return applyPending()
|
||||
}
|
||||
|
||||
// At this point there are one or more empty levels before the current
|
||||
// level which need to be backfilled and the current level might have
|
||||
// had some entries deleted from it as well. Since all levels after
|
||||
// level 0 are required to either be empty, half full, or completely
|
||||
// full, the current level must be adjusted accordingly by backfilling
|
||||
// each previous levels in a way which satisfies the requirements. Any
|
||||
// entries that are left are assigned to level 0 after the loop as they
|
||||
// are guaranteed to fit by the logic in the loop. In other words, this
|
||||
// effectively squashes all remaining entries in the current level into
|
||||
// the lowest possible levels while following the level rules.
|
||||
//
|
||||
// Note that the level after the current level might also have entries
|
||||
// and gaps are not allowed, so this also keeps track of the lowest
|
||||
// empty level so the code below knows how far to backfill in case it is
|
||||
// required.
|
||||
lowestEmptyLevel := uint8(255)
|
||||
curLevelData := pendingUpdates[highestLoadedLevel]
|
||||
curLevelMaxEntries := maxEntriesForLevel(highestLoadedLevel)
|
||||
for level := highestLoadedLevel; level > 0; level-- {
|
||||
// When there are not enough entries left in the current level
|
||||
// for the number that would be required to reach it, clear the
|
||||
// the current level which effectively moves them all up to the
|
||||
// previous level on the next iteration. Otherwise, there are
|
||||
// are sufficient entries, so update the current level to
|
||||
// contain as many entries as possible while still leaving
|
||||
// enough remaining entries required to reach the level.
|
||||
numEntries := len(curLevelData) / txEntrySize
|
||||
prevLevelMaxEntries := curLevelMaxEntries / 2
|
||||
minPrevRequired := minEntriesToReachLevel(level - 1)
|
||||
if numEntries < prevLevelMaxEntries+minPrevRequired {
|
||||
lowestEmptyLevel = level
|
||||
pendingUpdates[level] = nil
|
||||
} else {
|
||||
// This level can only be completely full or half full,
|
||||
// so choose the appropriate offset to ensure enough
|
||||
// entries remain to reach the level.
|
||||
var offset int
|
||||
if numEntries-curLevelMaxEntries >= minPrevRequired {
|
||||
offset = curLevelMaxEntries * txEntrySize
|
||||
} else {
|
||||
offset = prevLevelMaxEntries * txEntrySize
|
||||
}
|
||||
pendingUpdates[level] = curLevelData[:offset]
|
||||
curLevelData = curLevelData[offset:]
|
||||
}
|
||||
|
||||
curLevelMaxEntries = prevLevelMaxEntries
|
||||
}
|
||||
pendingUpdates[0] = curLevelData
|
||||
if len(curLevelData) == 0 {
|
||||
lowestEmptyLevel = 0
|
||||
}
|
||||
|
||||
// When the highest loaded level is empty, it's possible the level after
|
||||
// it still has data and thus that data needs to be backfilled as well.
|
||||
for len(pendingUpdates[highestLoadedLevel]) == 0 {
|
||||
// When the next level is empty too, the is no data left to
|
||||
// continue backfilling, so there is nothing left to do.
|
||||
// Otherwise, populate the pending updates map with the newly
|
||||
// loaded data and update the highest loaded level accordingly.
|
||||
level := highestLoadedLevel + 1
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
levelData := bucket.Get(curLevelKey[:])
|
||||
if len(levelData) == 0 {
|
||||
break
|
||||
}
|
||||
pendingUpdates[level] = levelData
|
||||
highestLoadedLevel = level
|
||||
|
||||
// At this point the highest level is not empty, but it might
|
||||
// be half full. When that is the case, move it up a level to
|
||||
// simplify the code below which backfills all lower levels that
|
||||
// are still empty. This also means the current level will be
|
||||
// empty, so the loop will perform another another iteration to
|
||||
// potentially backfill this level with data from the next one.
|
||||
curLevelMaxEntries := maxEntriesForLevel(level)
|
||||
if len(levelData)/txEntrySize != curLevelMaxEntries {
|
||||
pendingUpdates[level] = nil
|
||||
pendingUpdates[level-1] = levelData
|
||||
level--
|
||||
curLevelMaxEntries /= 2
|
||||
}
|
||||
|
||||
// Backfill all lower levels that are still empty by iteratively
|
||||
// halfing the data until the lowest empty level is filled.
|
||||
for level > lowestEmptyLevel {
|
||||
offset := (curLevelMaxEntries / 2) * txEntrySize
|
||||
pendingUpdates[level] = levelData[:offset]
|
||||
levelData = levelData[offset:]
|
||||
pendingUpdates[level-1] = levelData
|
||||
level--
|
||||
curLevelMaxEntries /= 2
|
||||
}
|
||||
|
||||
// The lowest possible empty level is now the highest loaded
|
||||
// level.
|
||||
lowestEmptyLevel = highestLoadedLevel
|
||||
}
|
||||
|
||||
// Apply the pending updates.
|
||||
return applyPending()
|
||||
}
|
||||
|
||||
// addrToKey converts known address types to an addrindex key. An error is
|
||||
// returned for unsupported types.
|
||||
func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
|
||||
switch addr := addr.(type) {
|
||||
case *util.AddressPubKeyHash:
|
||||
var result [addrKeySize]byte
|
||||
result[0] = addrKeyTypePubKeyHash
|
||||
copy(result[1:], addr.Hash160()[:])
|
||||
return result, nil
|
||||
|
||||
case *util.AddressScriptHash:
|
||||
var result [addrKeySize]byte
|
||||
result[0] = addrKeyTypeScriptHash
|
||||
copy(result[1:], addr.Hash160()[:])
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return [addrKeySize]byte{}, errUnsupportedAddressType
|
||||
}
|
||||
|
||||
// AddrIndex implements a transaction by address index. That is to say, it
|
||||
// supports querying all transactions that reference a given address because
|
||||
// they are either crediting or debiting the address. The returned transactions
|
||||
// are ordered according to their order of appearance in the blockDAG. In
|
||||
// other words, first by block height and then by offset inside the block.
|
||||
//
|
||||
// In addition, support is provided for a memory-only index of unconfirmed
|
||||
// transactions such as those which are kept in the memory pool before inclusion
|
||||
// in a block.
|
||||
type AddrIndex struct {
|
||||
// The following fields are set when the instance is created and can't
|
||||
// be changed afterwards, so there is no need to protect them with a
|
||||
// separate mutex.
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
|
||||
// The following fields are used to quickly link transactions and
|
||||
// addresses that have not been included into a block yet when an
|
||||
// address index is being maintained. The are protected by the
|
||||
// unconfirmedLock field.
|
||||
//
|
||||
// The txnsByAddr field is used to keep an index of all transactions
|
||||
// which either create an output to a given address or spend from a
|
||||
// previous output to it keyed by the address.
|
||||
//
|
||||
// The addrsByTx field is essentially the reverse and is used to
|
||||
// keep an index of all addresses which a given transaction involves.
|
||||
// This allows fairly efficient updates when transactions are removed
|
||||
// once they are included into a block.
|
||||
unconfirmedLock sync.RWMutex
|
||||
txnsByAddr map[[addrKeySize]byte]map[daghash.TxID]*util.Tx
|
||||
addrsByTx map[daghash.TxID]map[[addrKeySize]byte]struct{}
|
||||
}
|
||||
|
||||
// Ensure the AddrIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*AddrIndex)(nil)
|
||||
|
||||
// Ensure the AddrIndex type implements the NeedsInputser interface.
|
||||
var _ NeedsInputser = (*AddrIndex)(nil)
|
||||
|
||||
// NeedsInputs signals that the index requires the referenced inputs in order
|
||||
// to properly create the index.
|
||||
//
|
||||
// This implements the NeedsInputser interface.
|
||||
func (idx *AddrIndex) NeedsInputs() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Init is only provided to satisfy the Indexer interface as there is nothing to
|
||||
// initialize for this index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Key() []byte {
|
||||
return addrIndexKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Name() string {
|
||||
return addrIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the bucket for the address
|
||||
// index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Create(dbTx database.Tx) error {
|
||||
_, err := dbTx.Metadata().CreateBucket(addrIndexKey)
|
||||
return err
|
||||
}
|
||||
|
||||
// writeIndexData represents the address index data to be written for one block.
|
||||
// It consists of the address mapped to an ordered list of the transactions
|
||||
// that involve the address in block. It is ordered so the transactions can be
|
||||
// stored in the order they appear in the block.
|
||||
type writeIndexData map[[addrKeySize]byte][]int
|
||||
|
||||
// indexScriptPubKey extracts all standard addresses from the passed public key
|
||||
// script and maps each of them to the associated transaction using the passed
|
||||
// map.
|
||||
func (idx *AddrIndex) indexScriptPubKey(data writeIndexData, scriptPubKey []byte, txIdx int) {
|
||||
// Nothing to index if the script is non-standard or otherwise doesn't
|
||||
// contain any addresses.
|
||||
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
|
||||
idx.dagParams)
|
||||
if err != nil || addr == nil {
|
||||
return
|
||||
}
|
||||
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
// Ignore unsupported address types.
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid inserting the transaction more than once. Since the
|
||||
// transactions are indexed serially any duplicates will be
|
||||
// indexed in a row, so checking the most recent entry for the
|
||||
// address is enough to detect duplicates.
|
||||
indexedTxns := data[addrKey]
|
||||
numTxns := len(indexedTxns)
|
||||
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
|
||||
return
|
||||
}
|
||||
indexedTxns = append(indexedTxns, txIdx)
|
||||
data[addrKey] = indexedTxns
|
||||
}
|
||||
|
||||
// indexBlock extract all of the standard addresses from all of the transactions
|
||||
// in the passed block and maps each of them to the associated transaction using
|
||||
// the passed map.
|
||||
func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *blockdag.BlockDAG) {
|
||||
for txIdx, tx := range block.Transactions() {
|
||||
// Coinbases do not reference any inputs. Since the block is
|
||||
// required to have already gone through full validation, it has
|
||||
// already been proven on the first transaction in the block is
|
||||
// a coinbase.
|
||||
if txIdx > util.CoinbaseTransactionIndex {
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
// The UTXO should always have the input since
|
||||
// the index contract requires it, however, be
|
||||
// safe and simply ignore any missing entries.
|
||||
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
idx.indexScriptPubKey(data, entry.ScriptPubKey(), txIdx)
|
||||
}
|
||||
}
|
||||
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
idx.indexScriptPubKey(data, txOut.ScriptPubKey, txIdx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the DAG. This indexer adds a mapping for each address
|
||||
// the transactions in the block involve.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
_ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
// The offset and length of the transactions within the serialized
|
||||
// block.
|
||||
txLocs, err := block.TxLoc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build all of the address to transaction mappings in a local map.
|
||||
addrsToTxns := make(writeIndexData)
|
||||
idx.indexBlock(addrsToTxns, block, dag)
|
||||
|
||||
// Add all of the index entries for each address.
|
||||
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
|
||||
for addrKey, txIdxs := range addrsToTxns {
|
||||
for _, txIdx := range txIdxs {
|
||||
err := dbPutAddrIndexEntry(addrIdxBucket, addrKey,
|
||||
blockID, txLocs[txIdx])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxRegionsForAddress returns a slice of block regions which identify each
|
||||
// transaction that involves the passed address according to the specified
|
||||
// number to skip, number requested, and whether or not the results should be
|
||||
// reversed. It also returns the number actually skipped since it could be less
|
||||
// in the case where there are not enough entries.
|
||||
//
|
||||
// NOTE: These results only include transactions confirmed in blocks. See the
|
||||
// UnconfirmedTxnsForAddress method for obtaining unconfirmed transactions
|
||||
// that involve a given address.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, numToSkip, numRequested uint32, reverse bool) ([]database.BlockRegion, uint32, error) {
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var regions []database.BlockRegion
|
||||
var skipped uint32
|
||||
err = idx.db.View(func(dbTx database.Tx) error {
|
||||
// Create closure to lookup the block hash given the ID using
|
||||
// the database transaction.
|
||||
fetchBlockHash := func(id []byte) (*daghash.Hash, error) {
|
||||
// Deserialize and populate the result.
|
||||
return blockdag.DBFetchBlockHashBySerializedID(dbTx, id)
|
||||
}
|
||||
|
||||
var err error
|
||||
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
|
||||
regions, skipped, err = dbFetchAddrIndexEntries(addrIdxBucket,
|
||||
addrKey, numToSkip, numRequested, reverse,
|
||||
fetchBlockHash)
|
||||
return err
|
||||
})
|
||||
|
||||
return regions, skipped, err
|
||||
}
|
||||
|
||||
// indexUnconfirmedAddresses modifies the unconfirmed (memory-only) address
|
||||
// index to include mappings for the addresses encoded by the passed public key
|
||||
// script to the transaction.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) indexUnconfirmedAddresses(scriptPubKey []byte, tx *util.Tx) {
|
||||
// The error is ignored here since the only reason it can fail is if the
|
||||
// script fails to parse and it was already validated before being
|
||||
// admitted to the mempool.
|
||||
_, addr, _ := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
|
||||
idx.dagParams)
|
||||
// Ignore unsupported address types.
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Add a mapping from the address to the transaction.
|
||||
idx.unconfirmedLock.Lock()
|
||||
addrIndexEntry := idx.txnsByAddr[addrKey]
|
||||
if addrIndexEntry == nil {
|
||||
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
|
||||
idx.txnsByAddr[addrKey] = addrIndexEntry
|
||||
}
|
||||
addrIndexEntry[*tx.ID()] = tx
|
||||
|
||||
// Add a mapping from the transaction to the address.
|
||||
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
|
||||
if addrsByTxEntry == nil {
|
||||
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
|
||||
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
|
||||
}
|
||||
addrsByTxEntry[addrKey] = struct{}{}
|
||||
idx.unconfirmedLock.Unlock()
|
||||
}
|
||||
|
||||
// AddUnconfirmedTx adds all addresses related to the transaction to the
|
||||
// unconfirmed (memory-only) address index.
|
||||
//
|
||||
// NOTE: This transaction MUST have already been validated by the memory pool
|
||||
// before calling this function with it and have all of the inputs available in
|
||||
// the provided utxo view. Failure to do so could result in some or all
|
||||
// addresses not being indexed.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
|
||||
// Index addresses of all referenced previous transaction outputs.
|
||||
//
|
||||
// The existence checks are elided since this is only called after the
|
||||
// transaction has already been validated and thus all inputs are
|
||||
// already known to exist.
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
entry, ok := utxoSet.Get(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
// Ignore missing entries. This should never happen
|
||||
// in practice since the function comments specifically
|
||||
// call out all inputs must be available.
|
||||
continue
|
||||
}
|
||||
idx.indexUnconfirmedAddresses(entry.ScriptPubKey(), tx)
|
||||
}
|
||||
|
||||
// Index addresses of all created outputs.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
idx.indexUnconfirmedAddresses(txOut.ScriptPubKey, tx)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveUnconfirmedTx removes the passed transaction from the unconfirmed
|
||||
// (memory-only) address index.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) RemoveUnconfirmedTx(txID *daghash.TxID) {
|
||||
idx.unconfirmedLock.Lock()
|
||||
defer idx.unconfirmedLock.Unlock()
|
||||
|
||||
// Remove all address references to the transaction from the address
|
||||
// index and remove the entry for the address altogether if it no longer
|
||||
// references any transactions.
|
||||
for addrKey := range idx.addrsByTx[*txID] {
|
||||
delete(idx.txnsByAddr[addrKey], *txID)
|
||||
if len(idx.txnsByAddr[addrKey]) == 0 {
|
||||
delete(idx.txnsByAddr, addrKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the entry from the transaction to address lookup map as well.
|
||||
delete(idx.addrsByTx, *txID)
|
||||
}
|
||||
|
||||
// UnconfirmedTxnsForAddress returns all transactions currently in the
|
||||
// unconfirmed (memory-only) address index that involve the passed address.
|
||||
// Unsupported address types are ignored and will result in no results.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
|
||||
// Ignore unsupported address types.
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Protect concurrent access.
|
||||
idx.unconfirmedLock.RLock()
|
||||
defer idx.unconfirmedLock.RUnlock()
|
||||
|
||||
// Return a new slice with the results if there are any. This ensures
|
||||
// safe concurrency.
|
||||
if txns, exists := idx.txnsByAddr[addrKey]; exists {
|
||||
addressTxns := make([]*util.Tx, 0, len(txns))
|
||||
for _, tx := range txns {
|
||||
addressTxns = append(addressTxns, tx)
|
||||
}
|
||||
return addressTxns
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
return errors.Errorf("addrindex was turned off for %d blocks and can't be recovered."+
|
||||
" To resume working drop the addrindex with --dropaddrindex", lastKnownBlockID-currentBlockID)
|
||||
}
|
||||
|
||||
// NewAddrIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of all addresses in the blockDAG to the respective transactions
|
||||
// that involve them.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||
// turn is used by the blockDAG package. This allows the index to be
|
||||
// seamlessly maintained along with the DAG.
|
||||
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
|
||||
return &AddrIndex{
|
||||
dagParams: dagParams,
|
||||
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.TxID]*util.Tx),
|
||||
addrsByTx: make(map[daghash.TxID]map[[addrKeySize]byte]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// DropAddrIndex drops the address index from the provided database if it
|
||||
// exists.
|
||||
func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, addrIndexKey, addrIndexName, interrupt)
|
||||
}
|
||||
277
blockdag/indexers/addrindex_test.go
Normal file
277
blockdag/indexers/addrindex_test.go
Normal file
@@ -0,0 +1,277 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// addrIndexBucket provides a mock address index database bucket by implementing
|
||||
// the internalBucket interface.
|
||||
type addrIndexBucket struct {
|
||||
levels map[[levelKeySize]byte][]byte
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of the mock address index bucket.
|
||||
func (b *addrIndexBucket) Clone() *addrIndexBucket {
|
||||
levels := make(map[[levelKeySize]byte][]byte)
|
||||
for k, v := range b.levels {
|
||||
vCopy := make([]byte, len(v))
|
||||
copy(vCopy, v)
|
||||
levels[k] = vCopy
|
||||
}
|
||||
return &addrIndexBucket{levels: levels}
|
||||
}
|
||||
|
||||
// Get returns the value associated with the key from the mock address index
|
||||
// bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Get(key []byte) []byte {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
return b.levels[levelKey]
|
||||
}
|
||||
|
||||
// Put stores the provided key/value pair to the mock address index bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Put(key []byte, value []byte) error {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
b.levels[levelKey] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes the provided key from the mock address index bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Delete(key []byte) error {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
delete(b.levels, levelKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
// printLevels returns a string with a visual representation of the provided
|
||||
// address key taking into account the max size of each level. It is useful
|
||||
// when creating and debugging test cases.
|
||||
func (b *addrIndexBucket) printLevels(addrKey [addrKeySize]byte) string {
|
||||
highestLevel := uint8(0)
|
||||
for k := range b.levels {
|
||||
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
|
||||
continue
|
||||
}
|
||||
level := uint8(k[levelOffset])
|
||||
if level > highestLevel {
|
||||
highestLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
var levelBuf bytes.Buffer
|
||||
_, _ = levelBuf.WriteString("\n")
|
||||
maxEntries := level0MaxEntries
|
||||
for level := uint8(0); level <= highestLevel; level++ {
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
for i := 0; i < numEntries; i++ {
|
||||
start := i * txEntrySize
|
||||
num := byteOrder.Uint32(data[start:])
|
||||
_, _ = levelBuf.WriteString(fmt.Sprintf("%02d ", num))
|
||||
}
|
||||
for i := numEntries; i < maxEntries; i++ {
|
||||
_, _ = levelBuf.WriteString("_ ")
|
||||
}
|
||||
_, _ = levelBuf.WriteString("\n")
|
||||
maxEntries *= 2
|
||||
}
|
||||
|
||||
return levelBuf.String()
|
||||
}
|
||||
|
||||
// sanityCheck ensures that all data stored in the bucket for the given address
|
||||
// adheres to the level-based rules described by the address index
|
||||
// documentation.
|
||||
func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal int) error {
|
||||
// Find the highest level for the key.
|
||||
highestLevel := uint8(0)
|
||||
for k := range b.levels {
|
||||
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
|
||||
continue
|
||||
}
|
||||
level := uint8(k[levelOffset])
|
||||
if level > highestLevel {
|
||||
highestLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the expected total number of entries are present and that
|
||||
// all levels adhere to the rules described in the address index
|
||||
// documentation.
|
||||
var totalEntries int
|
||||
maxEntries := level0MaxEntries
|
||||
for level := uint8(0); level <= highestLevel; level++ {
|
||||
// Level 0 can'have more entries than the max allowed if the
|
||||
// levels after it have data and it can't be empty. All other
|
||||
// levels must either be half full or full.
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
totalEntries += numEntries
|
||||
if level == 0 {
|
||||
if (highestLevel != 0 && numEntries == 0) ||
|
||||
numEntries > maxEntries {
|
||||
|
||||
return errors.Errorf("level %d has %d entries",
|
||||
level, numEntries)
|
||||
}
|
||||
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
|
||||
return errors.Errorf("level %d has %d entries", level,
|
||||
numEntries)
|
||||
}
|
||||
maxEntries *= 2
|
||||
}
|
||||
if totalEntries != expectedTotal {
|
||||
return errors.Errorf("expected %d entries - got %d", expectedTotal,
|
||||
totalEntries)
|
||||
}
|
||||
|
||||
// Ensure all of the numbers are in order starting from the highest
|
||||
// level moving to the lowest level.
|
||||
expectedNum := uint32(0)
|
||||
for level := highestLevel + 1; level > 0; level-- {
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
for i := 0; i < numEntries; i++ {
|
||||
start := i * txEntrySize
|
||||
num := byteOrder.Uint32(data[start:])
|
||||
if num != expectedNum {
|
||||
return errors.Errorf("level %d offset %d does "+
|
||||
"not contain the expected number of "+
|
||||
"%d - got %d", level, i, num,
|
||||
expectedNum)
|
||||
}
|
||||
expectedNum++
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestAddrIndexLevels ensures that adding and deleting entries to the address
|
||||
// index creates multiple levels as described by the address index
|
||||
// documentation.
|
||||
func TestAddrIndexLevels(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
key [addrKeySize]byte
|
||||
numInsert int
|
||||
printLevels bool // Set to help debug a specific test.
|
||||
}{
|
||||
{
|
||||
name: "level 0 not full",
|
||||
numInsert: level0MaxEntries - 1,
|
||||
},
|
||||
{
|
||||
name: "level 1 half",
|
||||
numInsert: level0MaxEntries + 1,
|
||||
},
|
||||
{
|
||||
name: "level 1 full",
|
||||
numInsert: level0MaxEntries*2 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 half, level 1 half",
|
||||
numInsert: level0MaxEntries*3 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 half, level 1 full",
|
||||
numInsert: level0MaxEntries*4 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 full, level 1 half",
|
||||
numInsert: level0MaxEntries*5 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 full, level 1 full",
|
||||
numInsert: level0MaxEntries*6 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 3 half, level 2 half, level 1 half",
|
||||
numInsert: level0MaxEntries*7 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 3 full, level 2 half, level 1 full",
|
||||
numInsert: level0MaxEntries*12 + 1,
|
||||
},
|
||||
}
|
||||
|
||||
nextTest:
|
||||
for testNum, test := range tests {
|
||||
// Insert entries in order.
|
||||
populatedBucket := &addrIndexBucket{
|
||||
levels: make(map[[levelKeySize]byte][]byte),
|
||||
}
|
||||
for i := 0; i < test.numInsert; i++ {
|
||||
txLoc := wire.TxLoc{TxStart: i * 2}
|
||||
err := dbPutAddrIndexEntry(populatedBucket, test.key,
|
||||
uint64(i), txLoc)
|
||||
if err != nil {
|
||||
t.Errorf("dbPutAddrIndexEntry #%d (%s) - "+
|
||||
"unexpected error: %v", testNum,
|
||||
test.name, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
if test.printLevels {
|
||||
t.Log(populatedBucket.printLevels(test.key))
|
||||
}
|
||||
|
||||
// Delete entries from the populated bucket until all entries
|
||||
// have been deleted. The bucket is reset to the fully
|
||||
// populated bucket on each iteration so every combination is
|
||||
// tested. Notice the upper limit purposes exceeds the number
|
||||
// of entries to ensure attempting to delete more entries than
|
||||
// there are works correctly.
|
||||
for numDelete := 0; numDelete <= test.numInsert+1; numDelete++ {
|
||||
// Clone populated bucket to run each delete against.
|
||||
bucket := populatedBucket.Clone()
|
||||
|
||||
// Remove the number of entries for this iteration.
|
||||
err := dbRemoveAddrIndexEntries(bucket, test.key,
|
||||
numDelete)
|
||||
if err != nil {
|
||||
if numDelete <= test.numInsert {
|
||||
t.Errorf("dbRemoveAddrIndexEntries (%s) "+
|
||||
" delete %d - unexpected error: "+
|
||||
"%v", test.name, numDelete, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
if test.printLevels {
|
||||
t.Log(bucket.printLevels(test.key))
|
||||
}
|
||||
|
||||
// Sanity check the levels to ensure the adhere to all
|
||||
// rules.
|
||||
numExpected := test.numInsert
|
||||
if numDelete <= test.numInsert {
|
||||
numExpected -= numDelete
|
||||
}
|
||||
err = bucket.sanityCheck(test.key, numExpected)
|
||||
if err != nil {
|
||||
t.Errorf("sanity check fail (%s) delete %d: %v",
|
||||
test.name, numDelete, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
112
blockdag/indexers/common.go
Normal file
112
blockdag/indexers/common.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package indexers implements optional block DAG indexes.
|
||||
*/
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// byteOrder is the preferred byte order used for serializing numeric
|
||||
// fields for storage in the database.
|
||||
byteOrder = binary.LittleEndian
|
||||
|
||||
// errInterruptRequested indicates that an operation was cancelled due
|
||||
// to a user-requested interrupt.
|
||||
errInterruptRequested = errors.New("interrupt requested")
|
||||
)
|
||||
|
||||
// NeedsInputser provides a generic interface for an indexer to specify the it
|
||||
// requires the ability to look up inputs for a transaction.
|
||||
type NeedsInputser interface {
|
||||
NeedsInputs() bool
|
||||
}
|
||||
|
||||
// Indexer provides a generic interface for an indexer that is managed by an
|
||||
// index manager such as the Manager type provided by this package.
|
||||
type Indexer interface {
|
||||
// Key returns the key of the index as a byte slice.
|
||||
Key() []byte
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
Name() string
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time.
|
||||
Create(dbTx database.Tx) error
|
||||
|
||||
// Init is invoked when the index manager is first initializing the
|
||||
// index. This differs from the Create method in that it is called on
|
||||
// every load, including the case the index was just created.
|
||||
Init(db database.DB, dag *blockdag.BlockDAG) error
|
||||
|
||||
// ConnectBlock is invoked when the index manager is notified that a new
|
||||
// block has been connected to the DAG.
|
||||
ConnectBlock(dbTx database.Tx,
|
||||
block *util.Block,
|
||||
blockID uint64,
|
||||
dag *blockdag.BlockDAG,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData,
|
||||
virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error
|
||||
}
|
||||
|
||||
// AssertError identifies an error that indicates an internal code consistency
|
||||
// issue and should be treated as a critical and unrecoverable error.
|
||||
type AssertError string
|
||||
|
||||
// Error returns the assertion error as a huma-readable string and satisfies
|
||||
// the error interface.
|
||||
func (e AssertError) Error() string {
|
||||
return "assertion failed: " + string(e)
|
||||
}
|
||||
|
||||
// errDeserialize signifies that a problem was encountered when deserializing
|
||||
// data.
|
||||
type errDeserialize string
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e errDeserialize) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// isDeserializeErr returns whether or not the passed error is an errDeserialize
|
||||
// error.
|
||||
func isDeserializeErr(err error) bool {
|
||||
_, ok := err.(errDeserialize)
|
||||
return ok
|
||||
}
|
||||
|
||||
// internalBucket is an abstraction over a database bucket. It is used to make
|
||||
// the code easier to test since it allows mock objects in the tests to only
|
||||
// implement these functions instead of everything a database.Bucket supports.
|
||||
type internalBucket interface {
|
||||
Get(key []byte) []byte
|
||||
Put(key []byte, value []byte) error
|
||||
Delete(key []byte) error
|
||||
}
|
||||
|
||||
// interruptRequested returns true when the provided channel has been closed.
|
||||
// This simplifies early shutdown slightly since the caller can just use an if
|
||||
// statement instead of a select.
|
||||
func interruptRequested(interrupted <-chan struct{}) bool {
|
||||
select {
|
||||
case <-interrupted:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package indexers implements optional block DAG indexes.
|
||||
*/
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// Indexer provides a generic interface for an indexer that is managed by an
|
||||
// index manager such as the Manager type provided by this package.
|
||||
type Indexer interface {
|
||||
// Init is invoked when the index manager is first initializing the
|
||||
// index.
|
||||
Init(dag *blockdag.BlockDAG, databaseContext *dbaccess.DatabaseContext) error
|
||||
|
||||
// ConnectBlock is invoked when the index manager is notified that a new
|
||||
// block has been connected to the DAG.
|
||||
ConnectBlock(dbContext *dbaccess.TxContext,
|
||||
blockHash *daghash.Hash,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData) error
|
||||
}
|
||||
@@ -6,6 +6,8 @@ package indexers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.INDX)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -6,25 +6,82 @@ package indexers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
var (
|
||||
// indexTipsBucketName is the name of the db bucket used to house the
|
||||
// current tip of each index.
|
||||
indexTipsBucketName = []byte("idxtips")
|
||||
|
||||
indexCurrentBlockIDBucketName = []byte("idxcurrentblockid")
|
||||
)
|
||||
|
||||
// Manager defines an index manager that manages multiple optional indexes and
|
||||
// implements the blockdag.IndexManager interface so it can be seamlessly
|
||||
// plugged into normal DAG processing.
|
||||
type Manager struct {
|
||||
db database.DB
|
||||
enabledIndexes []Indexer
|
||||
}
|
||||
|
||||
// Ensure the Manager type implements the blockdag.IndexManager interface.
|
||||
var _ blockdag.IndexManager = (*Manager)(nil)
|
||||
|
||||
// Init initializes the enabled indexes.
|
||||
// This is part of the blockdag.IndexManager interface.
|
||||
func (m *Manager) Init(dag *blockdag.BlockDAG, databaseContext *dbaccess.DatabaseContext) error {
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
if err := indexer.Init(dag, databaseContext); err != nil {
|
||||
// indexDropKey returns the key for an index which indicates it is in the
|
||||
// process of being dropped.
|
||||
func indexDropKey(idxKey []byte) []byte {
|
||||
dropKey := make([]byte, len(idxKey)+1)
|
||||
dropKey[0] = 'd'
|
||||
copy(dropKey[1:], idxKey)
|
||||
return dropKey
|
||||
}
|
||||
|
||||
// maybeFinishDrops determines if each of the enabled indexes are in the middle
|
||||
// of being dropped and finishes dropping them when the are. This is necessary
|
||||
// because dropping and index has to be done in several atomic steps rather than
|
||||
// one big atomic step due to the massive number of entries.
|
||||
func (m *Manager) maybeFinishDrops(interrupt <-chan struct{}) error {
|
||||
indexNeedsDrop := make([]bool, len(m.enabledIndexes))
|
||||
err := m.db.View(func(dbTx database.Tx) error {
|
||||
// None of the indexes needs to be dropped if the index tips
|
||||
// bucket hasn't been created yet.
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
if indexesBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark the indexer as requiring a drop if one is already in
|
||||
// progress.
|
||||
for i, indexer := range m.enabledIndexes {
|
||||
dropKey := indexDropKey(indexer.Key())
|
||||
if indexesBucket.Get(dropKey) != nil {
|
||||
indexNeedsDrop[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
// Finish dropping any of the enabled indexes that are already in the
|
||||
// middle of being dropped.
|
||||
for i, indexer := range m.enabledIndexes {
|
||||
if !indexNeedsDrop[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infof("Resuming %s drop", indexer.Name())
|
||||
err := dropIndex(m.db, indexer.Key(), indexer.Name(), interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -32,18 +89,140 @@ func (m *Manager) Init(dag *blockdag.BlockDAG, databaseContext *dbaccess.Databas
|
||||
return nil
|
||||
}
|
||||
|
||||
// maybeCreateIndexes determines if each of the enabled indexes have already
|
||||
// been created and creates them if not.
|
||||
func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
// Nothing to do if the index tip already exists.
|
||||
idxKey := indexer.Key()
|
||||
if indexesBucket.Get(idxKey) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// The tip for the index does not exist, so create it and
|
||||
// invoke the create callback for the index so it can perform
|
||||
// any one-time initialization it requires.
|
||||
if err := indexer.Create(dbTx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO (Mike): this is temporary solution to prevent node from not starting
|
||||
// because it thinks indexers are not initialized.
|
||||
// Indexers, however, do not work properly, and a general solution to their work operation is required
|
||||
indexesBucket.Put(idxKey, []byte{0})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init initializes the enabled indexes. This is called during DAG
|
||||
// initialization and primarily consists of catching up all indexes to the
|
||||
// current tips. This is necessary since each index can be disabled
|
||||
// and re-enabled at any time and attempting to catch-up indexes at the same
|
||||
// time new blocks are being downloaded would lead to an overall longer time to
|
||||
// catch up due to the I/O contention.
|
||||
//
|
||||
// This is part of the blockdag.IndexManager interface.
|
||||
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
|
||||
// Nothing to do when no indexes are enabled.
|
||||
if len(m.enabledIndexes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
m.db = db
|
||||
|
||||
// Finish and drops that were previously interrupted.
|
||||
if err := m.maybeFinishDrops(interrupt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the initial state for the indexes as needed.
|
||||
err := m.db.Update(func(dbTx database.Tx) error {
|
||||
// Create the bucket for the current tips as needed.
|
||||
meta := dbTx.Metadata()
|
||||
_, err := meta.CreateBucketIfNotExists(indexTipsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucketIfNotExists(indexCurrentBlockIDBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.maybeCreateIndexes(dbTx)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize each of the enabled indexes.
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
if err := indexer.Init(db, blockDAG); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return m.recoverIfNeeded()
|
||||
}
|
||||
|
||||
// recoverIfNeeded checks if the node worked for some time
|
||||
// without one of the current enabled indexes, and if it's
|
||||
// the case, recovers the missing blocks from the index.
|
||||
func (m *Manager) recoverIfNeeded() error {
|
||||
return m.db.Update(func(dbTx database.Tx) error {
|
||||
lastKnownBlockID := blockdag.DBFetchCurrentBlockID(dbTx)
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
serializedCurrentIdxBlockID := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Get(indexer.Key())
|
||||
currentIdxBlockID := uint64(0)
|
||||
if serializedCurrentIdxBlockID != nil {
|
||||
currentIdxBlockID = blockdag.DeserializeBlockID(serializedCurrentIdxBlockID)
|
||||
}
|
||||
if lastKnownBlockID > currentIdxBlockID {
|
||||
err := indexer.Recover(dbTx, currentIdxBlockID, lastKnownBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ConnectBlock must be invoked when a block is added to the DAG. It
|
||||
// keeps track of the state of each index it is managing, performs some sanity
|
||||
// checks, and invokes each indexer.
|
||||
//
|
||||
// This is part of the blockdag.IndexManager interface.
|
||||
func (m *Manager) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
// Call each of the currently active optional indexes with the block
|
||||
// being connected so they can update accordingly.
|
||||
for _, index := range m.enabledIndexes {
|
||||
// Notify the indexer with the connected block so it can index it.
|
||||
if err := index.ConnectBlock(dbContext, blockHash, txsAcceptanceData); err != nil {
|
||||
if err := index.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new block ID index entry for the block being connected and
|
||||
// update the current internal block ID accordingly.
|
||||
err := m.updateIndexersWithCurrentBlockID(dbTx, block.Hash(), blockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *daghash.Hash, blockID uint64) error {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
for _, index := range m.enabledIndexes {
|
||||
err := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Put(index.Key(), serializedBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -59,3 +238,152 @@ func NewManager(enabledIndexes []Indexer) *Manager {
|
||||
enabledIndexes: enabledIndexes,
|
||||
}
|
||||
}
|
||||
|
||||
// dropIndex drops the passed index from the database. Since indexes can be
|
||||
// massive, it deletes the index in multiple database transactions in order to
|
||||
// keep memory usage to reasonable levels. It also marks the drop in progress
|
||||
// so the drop can be resumed if it is stopped before it is done before the
|
||||
// index can be used again.
|
||||
func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan struct{}) error {
|
||||
// Nothing to do if the index doesn't already exist.
|
||||
var needsDelete bool
|
||||
err := db.View(func(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
if indexesBucket != nil && indexesBucket.Get(idxKey) != nil {
|
||||
needsDelete = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !needsDelete {
|
||||
log.Infof("Not dropping %s because it does not exist", idxName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark that the index is in the process of being dropped so that it
|
||||
// can be resumed on the next start if interrupted before the process is
|
||||
// complete.
|
||||
log.Infof("Dropping all %s entries. This might take a while...",
|
||||
idxName)
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
return indexesBucket.Put(indexDropKey(idxKey), idxKey)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since the indexes can be so large, attempting to simply delete
|
||||
// the bucket in a single database transaction would result in massive
|
||||
// memory usage and likely crash many systems due to ulimits. In order
|
||||
// to avoid this, use a cursor to delete a maximum number of entries out
|
||||
// of the bucket at a time. Recurse buckets depth-first to delete any
|
||||
// sub-buckets.
|
||||
const maxDeletions = 2000000
|
||||
var totalDeleted uint64
|
||||
|
||||
// Recurse through all buckets in the index, cataloging each for
|
||||
// later deletion.
|
||||
var subBuckets [][][]byte
|
||||
var subBucketClosure func(database.Tx, []byte, [][]byte) error
|
||||
subBucketClosure = func(dbTx database.Tx,
|
||||
subBucket []byte, tlBucket [][]byte) error {
|
||||
// Get full bucket name and append to subBuckets for later
|
||||
// deletion.
|
||||
var bucketName [][]byte
|
||||
if (tlBucket == nil) || (len(tlBucket) == 0) {
|
||||
bucketName = append(bucketName, subBucket)
|
||||
} else {
|
||||
bucketName = append(tlBucket, subBucket)
|
||||
}
|
||||
subBuckets = append(subBuckets, bucketName)
|
||||
// Recurse sub-buckets to append to subBuckets slice.
|
||||
bucket := dbTx.Metadata()
|
||||
for _, subBucketName := range bucketName {
|
||||
bucket = bucket.Bucket(subBucketName)
|
||||
}
|
||||
return bucket.ForEachBucket(func(k []byte) error {
|
||||
return subBucketClosure(dbTx, k, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// Call subBucketClosure with top-level bucket.
|
||||
err = db.View(func(dbTx database.Tx) error {
|
||||
return subBucketClosure(dbTx, idxKey, nil)
|
||||
})
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterate through each sub-bucket in reverse, deepest-first, deleting
|
||||
// all keys inside them and then dropping the buckets themselves.
|
||||
for i := range subBuckets {
|
||||
bucketName := subBuckets[len(subBuckets)-1-i]
|
||||
// Delete maxDeletions key/value pairs at a time.
|
||||
for numDeleted := maxDeletions; numDeleted == maxDeletions; {
|
||||
numDeleted = 0
|
||||
err := db.Update(func(dbTx database.Tx) error {
|
||||
subBucket := dbTx.Metadata()
|
||||
for _, subBucketName := range bucketName {
|
||||
subBucket = subBucket.Bucket(subBucketName)
|
||||
}
|
||||
cursor := subBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() &&
|
||||
numDeleted < maxDeletions {
|
||||
|
||||
if err := cursor.Delete(); err != nil {
|
||||
return err
|
||||
}
|
||||
numDeleted++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numDeleted > 0 {
|
||||
totalDeleted += uint64(numDeleted)
|
||||
log.Infof("Deleted %d keys (%d total) from %s",
|
||||
numDeleted, totalDeleted, idxName)
|
||||
}
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
// Drop the bucket itself.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata()
|
||||
for j := 0; j < len(bucketName)-1; j++ {
|
||||
bucket = bucket.Bucket(bucketName[j])
|
||||
}
|
||||
return bucket.DeleteBucket(bucketName[len(bucketName)-1])
|
||||
})
|
||||
}
|
||||
|
||||
// Remove the index tip, index bucket, and in-progress drop flag now
|
||||
// that all index entries have been removed.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
indexesBucket := meta.Bucket(indexTipsBucketName)
|
||||
if err := indexesBucket.Delete(idxKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := meta.Bucket(indexCurrentBlockIDBucketName).Delete(idxKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return indexesBucket.Delete(indexDropKey(idxKey))
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Dropped %s", idxName)
|
||||
return nil
|
||||
}
|
||||
|
||||
438
blockdag/indexers/txindex.go
Normal file
438
blockdag/indexers/txindex.go
Normal file
@@ -0,0 +1,438 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// txIndexName is the human-readable name for the index.
|
||||
txIndexName = "transaction index"
|
||||
|
||||
includingBlocksIndexKeyEntrySize = 8 // 4 bytes for offset + 4 bytes for transaction length
|
||||
)
|
||||
|
||||
var (
|
||||
includingBlocksIndexKey = []byte("includingblocksidx")
|
||||
|
||||
acceptingBlocksIndexKey = []byte("acceptingblocksidx")
|
||||
)
|
||||
|
||||
// txsAcceptedByVirtual is the in-memory index of txIDs that were accepted
|
||||
// by the current virtual
|
||||
var txsAcceptedByVirtual map[daghash.TxID]bool
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The transaction index consists of an entry for every transaction in the DAG.
|
||||
//
|
||||
// There are two buckets used in total. The first bucket maps the hash of
|
||||
// each transaction to its location in each block it's included in. The second bucket
|
||||
// contains all of the blocks that from their viewpoint the transaction has been
|
||||
// accepted (i.e. the transaction is found in their blue set without double spends),
|
||||
// and their blue block (or themselves) that included the transaction.
|
||||
//
|
||||
// NOTE: Although it is technically possible for multiple transactions to have
|
||||
// the same hash as long as the previous transaction with the same hash is fully
|
||||
// spent, this code only stores the most recent one because doing otherwise
|
||||
// would add a non-trivial amount of space and overhead for something that will
|
||||
// realistically never happen per the probability and even if it did, the old
|
||||
// one must be fully spent and so the most likely transaction a caller would
|
||||
// want for a given hash is the most recent one anyways.
|
||||
//
|
||||
// The including blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
|
||||
//
|
||||
// <block id> = <start offset><tx length>
|
||||
//
|
||||
// Field Type Size
|
||||
// block id uint64 8 bytes
|
||||
// start offset uint32 4 bytes
|
||||
// tx length uint32 4 bytes
|
||||
// -----
|
||||
// Total: 16 bytes
|
||||
//
|
||||
// The accepting blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
|
||||
//
|
||||
// <accepting block id> = <including block id>
|
||||
//
|
||||
// Field Type Size
|
||||
// accepting block id uint64 8 bytes
|
||||
// including block id uint64 8 bytes
|
||||
// -----
|
||||
// Total: 16 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
func putIncludingBlocksEntry(target []byte, txLoc wire.TxLoc) {
|
||||
byteOrder.PutUint32(target, uint32(txLoc.TxStart))
|
||||
byteOrder.PutUint32(target[4:], uint32(txLoc.TxLen))
|
||||
}
|
||||
|
||||
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
|
||||
}
|
||||
|
||||
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
|
||||
}
|
||||
|
||||
// dbFetchFirstTxRegion uses an existing database transaction to fetch the block
|
||||
// region for the provided transaction hash from the transaction index. When
|
||||
// there is no entry for the provided hash, nil will be returned for the both
|
||||
// the region and the error.
|
||||
//
|
||||
// P.S Because the transaction can be found in multiple blocks, this function arbitarily
|
||||
// returns the first block region that is stored in the txindex.
|
||||
func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.BlockRegion, error) {
|
||||
// Load the record from the database and return now if it doesn't exist.
|
||||
txBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txID[:])
|
||||
if txBucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region "+
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
cursor := txBucket.Cursor()
|
||||
if ok := cursor.First(); !ok {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region "+
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
serializedBlockID := cursor.Key()
|
||||
serializedData := cursor.Value()
|
||||
if len(serializedData) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Ensure the serialized data has enough bytes to properly deserialize.
|
||||
if len(serializedData) < includingBlocksIndexKeyEntrySize {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt transaction index "+
|
||||
"entry for %s", txID),
|
||||
}
|
||||
}
|
||||
|
||||
// Load the block hash associated with the block ID.
|
||||
hash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
|
||||
if err != nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt transaction index "+
|
||||
"entry for %s: %s", txID, err),
|
||||
}
|
||||
}
|
||||
|
||||
// Deserialize the final entry.
|
||||
region := database.BlockRegion{Hash: &daghash.Hash{}}
|
||||
copy(region.Hash[:], hash[:])
|
||||
region.Offset = byteOrder.Uint32(serializedData[:4])
|
||||
region.Len = byteOrder.Uint32(serializedData[4:])
|
||||
|
||||
return ®ion, nil
|
||||
}
|
||||
|
||||
// dbAddTxIndexEntries uses an existing database transaction to add a
|
||||
// transaction index entry for every transaction in the passed block.
|
||||
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// The offset and length of the transactions within the serialized
|
||||
// block.
|
||||
txLocs, err := block.TxLoc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As an optimization, allocate a single slice big enough to hold all
|
||||
// of the serialized transaction index entries for the block and
|
||||
// serialize them directly into the slice. Then, pass the appropriate
|
||||
// subslice to the database to be written. This approach significantly
|
||||
// cuts down on the number of required allocations.
|
||||
includingBlocksOffset := 0
|
||||
serializedIncludingBlocksValues := make([]byte, len(block.Transactions())*includingBlocksIndexKeyEntrySize)
|
||||
for i, tx := range block.Transactions() {
|
||||
putIncludingBlocksEntry(serializedIncludingBlocksValues[includingBlocksOffset:], txLocs[i])
|
||||
endOffset := includingBlocksOffset + includingBlocksIndexKeyEntrySize
|
||||
err := dbPutIncludingBlocksEntry(dbTx, tx.ID(), blockID,
|
||||
serializedIncludingBlocksValues[includingBlocksOffset:endOffset:endOffset])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
includingBlocksOffset += includingBlocksIndexKeyEntrySize
|
||||
}
|
||||
|
||||
for _, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
|
||||
var includingBlockID uint64
|
||||
if blockTxsAcceptanceData.BlockHash.IsEqual(block.Hash()) {
|
||||
includingBlockID = blockID
|
||||
} else {
|
||||
includingBlockID, err = blockdag.DBFetchBlockIDByHash(dbTx, &blockTxsAcceptanceData.BlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
serializedIncludingBlockID := blockdag.SerializeBlockID(includingBlockID)
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
if !txAcceptanceData.IsAccepted {
|
||||
continue
|
||||
}
|
||||
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, serializedIncludingBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateTxsAcceptedByVirtual(virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// Initialize a new txsAcceptedByVirtual
|
||||
entries := 0
|
||||
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
|
||||
entries += len(blockTxsAcceptanceData.TxAcceptanceData)
|
||||
}
|
||||
txsAcceptedByVirtual = make(map[daghash.TxID]bool, entries)
|
||||
|
||||
// Copy virtualTxsAcceptanceData to txsAcceptedByVirtual
|
||||
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
txsAcceptedByVirtual[*txAcceptanceData.Tx.ID()] = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxIndex implements a transaction by hash index. That is to say, it supports
|
||||
// querying all transactions by their hash.
|
||||
type TxIndex struct {
|
||||
db database.DB
|
||||
}
|
||||
|
||||
// Ensure the TxIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*TxIndex)(nil)
|
||||
|
||||
// Init initializes the hash-based transaction index. In particular, it finds
|
||||
// the highest used block ID and stores it for later use when connecting or
|
||||
// disconnecting blocks.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
|
||||
// Initialize the txsAcceptedByVirtual index
|
||||
virtualTxsAcceptanceData, err := dag.TxsAcceptedByVirtual()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Key() []byte {
|
||||
return includingBlocksIndexKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Name() string {
|
||||
return txIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the buckets for the hash-based
|
||||
// transaction index and the internal block ID indexes.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Create(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
if _, err := meta.CreateBucket(includingBlocksIndexKey); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := meta.CreateBucket(acceptingBlocksIndexKey)
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the DAG. This indexer adds a hash-to-transaction mapping
|
||||
// for every transaction in the passed block.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
if err := dbAddTxIndexEntries(dbTx, block, blockID, acceptedTxsData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxFirstBlockRegion returns the first block region for the provided transaction hash
|
||||
// from the transaction index. The block region can in turn be used to load the
|
||||
// raw transaction bytes. When there is no entry for the provided hash, nil
|
||||
// will be returned for the both the entry and the error.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *TxIndex) TxFirstBlockRegion(txID *daghash.TxID) (*database.BlockRegion, error) {
|
||||
var region *database.BlockRegion
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
region, err = dbFetchFirstTxRegion(dbTx, txID)
|
||||
return err
|
||||
})
|
||||
return region, err
|
||||
}
|
||||
|
||||
// TxBlocks returns the hashes of the blocks where the transaction exists
|
||||
func (idx *TxIndex) TxBlocks(txHash *daghash.Hash) ([]*daghash.Hash, error) {
|
||||
blockHashes := make([]*daghash.Hash, 0)
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
blockHashes, err = dbFetchTxBlocks(dbTx, txHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return blockHashes, err
|
||||
}
|
||||
|
||||
func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]*daghash.Hash, error) {
|
||||
blockHashes := make([]*daghash.Hash, 0)
|
||||
bucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txHash[:])
|
||||
if bucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No including blocks "+
|
||||
"were found for %s", txHash),
|
||||
}
|
||||
}
|
||||
err := bucket.ForEach(func(serializedBlockID, _ []byte) error {
|
||||
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHashes = append(blockHashes, blockHash)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockHashes, nil
|
||||
}
|
||||
|
||||
// BlockThatAcceptedTx returns the hash of the block where the transaction got accepted (from the virtual block point of view)
|
||||
func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txID *daghash.TxID) (*daghash.Hash, error) {
|
||||
var acceptingBlock *daghash.Hash
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
acceptingBlock, err = dbFetchTxAcceptingBlock(dbTx, txID, dag)
|
||||
return err
|
||||
})
|
||||
return acceptingBlock, err
|
||||
}
|
||||
|
||||
func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
|
||||
// If the transaction was accepted by the current virtual,
|
||||
// return the zeroHash immediately
|
||||
if _, ok := txsAcceptedByVirtual[*txID]; ok {
|
||||
return &daghash.ZeroHash, nil
|
||||
}
|
||||
|
||||
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
|
||||
if bucket == nil {
|
||||
return nil, nil
|
||||
}
|
||||
cursor := bucket.Cursor()
|
||||
if !cursor.First() {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("Accepting blocks bucket is "+
|
||||
"empty for %s", txID),
|
||||
}
|
||||
}
|
||||
for ; cursor.Key() != nil; cursor.Next() {
|
||||
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, cursor.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
isBlockInSelectedParentChain, err := dag.IsInSelectedParentChain(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isBlockInSelectedParentChain {
|
||||
return blockHash, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewTxIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of the hashes of all transactions in the blockDAG to the respective
|
||||
// block, location within the block, and size of the transaction.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||
// turn is used by the blockdag package. This allows the index to be
|
||||
// seamlessly maintained along with the DAG.
|
||||
func NewTxIndex() *TxIndex {
|
||||
return &TxIndex{}
|
||||
}
|
||||
|
||||
// DropTxIndex drops the transaction index from the provided database if it
|
||||
// exists. Since the address index relies on it, the address index will also be
|
||||
// dropped when it exists.
|
||||
func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
err := dropIndex(db, addrIndexKey, addrIndexName, interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dropIndex(db, includingBlocksIndexKey, addrIndexName, interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dropIndex(db, acceptingBlocksIndexKey, txIndexName, interrupt)
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
return errors.Errorf("txindex was turned off for %d blocks and can't be recovered."+
|
||||
" To resume working drop the txindex with --droptxindex", lastKnownBlockID-currentBlockID)
|
||||
}
|
||||
144
blockdag/indexers/txindex_test.go
Normal file
144
blockdag/indexers/txindex_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/mining"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func createTransaction(t *testing.T, value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating signature script: %s", err)
|
||||
}
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: *originTx.TxID(),
|
||||
Index: outputIndex,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
txOut := wire.NewTxOut(value, blockdag.OpTrueScript)
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
|
||||
return tx
|
||||
}
|
||||
|
||||
func TestTxIndexConnectBlock(t *testing.T) {
|
||||
blocks := make(map[daghash.Hash]*util.Block)
|
||||
|
||||
txIndex := NewTxIndex()
|
||||
indexManager := NewManager([]Indexer{txIndex})
|
||||
|
||||
params := dagconfig.SimnetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
params.K = 1
|
||||
|
||||
config := blockdag.Config{
|
||||
IndexManager: indexManager,
|
||||
DAGParams: ¶ms,
|
||||
}
|
||||
|
||||
dag, teardown, err := blockdag.DAGSetup("TestTxIndexConnectBlock", config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
}
|
||||
|
||||
prepareAndProcessBlock := func(parentHashes []*daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
|
||||
block, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, transactions, false)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %v got unexpected error from PrepareBlockForTest: %v", blockName, err)
|
||||
}
|
||||
utilBlock := util.NewBlock(block)
|
||||
blocks[*block.BlockHash()] = utilBlock
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %s "+
|
||||
"is too far in the future", blockName)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %v was unexpectedly orphan", blockName)
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
block1 := prepareAndProcessBlock([]*daghash.Hash{params.GenesisHash}, nil, "1")
|
||||
block2Tx := createTransaction(t, block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
|
||||
block2 := prepareAndProcessBlock([]*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
|
||||
block3Tx := createTransaction(t, block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
|
||||
block3 := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
|
||||
|
||||
block2TxID := block2Tx.TxID()
|
||||
block2TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
block3Hash := block3.BlockHash()
|
||||
if !block2TxNewAcceptedBlock.IsEqual(block3Hash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxNewAcceptedBlock)
|
||||
}
|
||||
|
||||
block3TxID := block3Tx.TxID()
|
||||
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block3TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
if !block3TxNewAcceptedBlock.IsEqual(&daghash.ZeroHash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
|
||||
"been accepted by the virtual block but instead got accepted in block %v", block3TxNewAcceptedBlock)
|
||||
}
|
||||
|
||||
block3A := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
|
||||
block4 := prepareAndProcessBlock([]*daghash.Hash{block3.BlockHash()}, nil, "4")
|
||||
prepareAndProcessBlock([]*daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
|
||||
|
||||
block2TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
|
||||
if !block2TxAcceptedBlock.IsEqual(block3Hash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxAcceptedBlock)
|
||||
}
|
||||
|
||||
region, err := txIndex.TxFirstBlockRegion(block3TxID)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: no block region was found for block3Tx")
|
||||
}
|
||||
regionBlock, ok := blocks[*region.Hash]
|
||||
if !ok {
|
||||
t.Fatalf("TestTxIndexConnectBlock: couldn't find block with hash %v", region.Hash)
|
||||
}
|
||||
|
||||
regionBlockBytes, err := regionBlock.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: Couldn't serialize block to bytes")
|
||||
}
|
||||
block3TxInBlock := regionBlockBytes[region.Offset : region.Offset+region.Len]
|
||||
|
||||
block3TxBuf := bytes.NewBuffer(make([]byte, 0, block3Tx.SerializeSize()))
|
||||
block3Tx.KaspaEncode(block3TxBuf, 0)
|
||||
blockTxBytes := block3TxBuf.Bytes()
|
||||
|
||||
if !reflect.DeepEqual(blockTxBytes, block3TxInBlock) {
|
||||
t.Errorf("TestTxIndexConnectBlock: the block region that was in the bucket doesn't match block3Tx")
|
||||
}
|
||||
|
||||
}
|
||||
206
blockdag/mediantime.go
Normal file
206
blockdag/mediantime.go
Normal file
@@ -0,0 +1,206 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxAllowedOffsetSeconds is the maximum number of seconds in either
|
||||
// direction that local clock will be adjusted. When the median time
|
||||
// of the network is outside of this range, no offset will be applied.
|
||||
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
|
||||
|
||||
// similarTimeSecs is the number of seconds in either direction from the
|
||||
// local clock that is used to determine that it is likley wrong and
|
||||
// hence to show a warning.
|
||||
similarTimeSecs = 5 * 60 // 5 minutes
|
||||
)
|
||||
|
||||
var (
|
||||
// maxMedianTimeEntries is the maximum number of entries allowed in the
|
||||
// median time data. This is a variable as opposed to a constant so the
|
||||
// test code can modify it.
|
||||
maxMedianTimeEntries = 200
|
||||
)
|
||||
|
||||
// MedianTimeSource provides a mechanism to add several time samples which are
|
||||
// used to determine a median time which is then used as an offset to the local
|
||||
// clock.
|
||||
type MedianTimeSource interface {
|
||||
// AdjustedTime returns the current time adjusted by the median time
|
||||
// offset as calculated from the time samples added by AddTimeSample.
|
||||
AdjustedTime() time.Time
|
||||
|
||||
// AddTimeSample adds a time sample that is used when determining the
|
||||
// median time of the added samples.
|
||||
AddTimeSample(id string, timeVal time.Time)
|
||||
|
||||
// Offset returns the number of seconds to adjust the local clock based
|
||||
// upon the median of the time samples added by AddTimeData.
|
||||
Offset() time.Duration
|
||||
}
|
||||
|
||||
// int64Sorter implements sort.Interface to allow a slice of 64-bit integers to
|
||||
// be sorted.
|
||||
type int64Sorter []int64
|
||||
|
||||
// Len returns the number of 64-bit integers in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s int64Sorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap swaps the 64-bit integers at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s int64Sorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less returns whether the 64-bit integer with index i should sort before the
|
||||
// 64-bit integer with index j. It is part of the sort.Interface
|
||||
// implementation.
|
||||
func (s int64Sorter) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
// medianTime provides an implementation of the MedianTimeSource interface.
|
||||
type medianTime struct {
|
||||
mtx sync.Mutex
|
||||
knownIDs map[string]struct{}
|
||||
offsets []int64
|
||||
offsetSecs int64
|
||||
invalidTimeChecked bool
|
||||
}
|
||||
|
||||
// Ensure the medianTime type implements the MedianTimeSource interface.
|
||||
var _ MedianTimeSource = (*medianTime)(nil)
|
||||
|
||||
// AdjustedTime returns the current time adjusted by the median time offset as
|
||||
// calculated from the time samples added by AddTimeSample.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) AdjustedTime() time.Time {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// Limit the adjusted time to 1 second precision.
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
return now.Add(time.Duration(m.offsetSecs) * time.Second)
|
||||
}
|
||||
|
||||
// AddTimeSample adds a time sample that is used when determining the median
|
||||
// time of the added samples.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// Don't add time data from the same source.
|
||||
if _, exists := m.knownIDs[sourceID]; exists {
|
||||
return
|
||||
}
|
||||
m.knownIDs[sourceID] = struct{}{}
|
||||
|
||||
// Truncate the provided offset to seconds and append it to the slice
|
||||
// of offsets while respecting the maximum number of allowed entries by
|
||||
// replacing the oldest entry with the new entry once the maximum number
|
||||
// of entries is reached.
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
offsetSecs := int64(timeVal.Sub(now).Seconds())
|
||||
numOffsets := len(m.offsets)
|
||||
if numOffsets == maxMedianTimeEntries && maxMedianTimeEntries > 0 {
|
||||
m.offsets = m.offsets[1:]
|
||||
numOffsets--
|
||||
}
|
||||
m.offsets = append(m.offsets, offsetSecs)
|
||||
numOffsets++
|
||||
|
||||
// Sort the offsets so the median can be obtained as needed later.
|
||||
sortedOffsets := make([]int64, numOffsets)
|
||||
copy(sortedOffsets, m.offsets)
|
||||
sort.Sort(int64Sorter(sortedOffsets))
|
||||
|
||||
offsetDuration := time.Duration(offsetSecs) * time.Second
|
||||
log.Debugf("Added time sample of %s (total: %d)", offsetDuration,
|
||||
numOffsets)
|
||||
|
||||
// The median offset is only updated when there are enough offsets and
|
||||
// the number of offsets is odd so the middle value is the true median.
|
||||
// Thus, there is nothing to do when those conditions are not met.
|
||||
if numOffsets < 5 || numOffsets&0x01 != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
// At this point the number of offsets in the list is odd, so the
|
||||
// middle value of the sorted offsets is the median.
|
||||
median := sortedOffsets[numOffsets/2]
|
||||
|
||||
// Set the new offset when the median offset is within the allowed
|
||||
// offset range.
|
||||
if math.Abs(float64(median)) < maxAllowedOffsetSecs {
|
||||
m.offsetSecs = median
|
||||
} else {
|
||||
// The median offset of all added time data is larger than the
|
||||
// maximum allowed offset, so don't use an offset. This
|
||||
// effectively limits how far the local clock can be skewed.
|
||||
m.offsetSecs = 0
|
||||
|
||||
if !m.invalidTimeChecked {
|
||||
m.invalidTimeChecked = true
|
||||
|
||||
// Find if any time samples have a time that is close
|
||||
// to the local time.
|
||||
var remoteHasCloseTime bool
|
||||
for _, offset := range sortedOffsets {
|
||||
if math.Abs(float64(offset)) < similarTimeSecs {
|
||||
remoteHasCloseTime = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Warn if none of the time samples are close.
|
||||
if !remoteHasCloseTime {
|
||||
log.Warnf("Please check your date and time " +
|
||||
"are correct! kaspad will not work " +
|
||||
"properly with an invalid time")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
medianDuration := time.Duration(m.offsetSecs) * time.Second
|
||||
log.Debugf("New time offset: %d", medianDuration)
|
||||
}
|
||||
|
||||
// Offset returns the number of seconds to adjust the local clock based upon the
|
||||
// median of the time samples added by AddTimeData.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) Offset() time.Duration {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
return time.Duration(m.offsetSecs) * time.Second
|
||||
}
|
||||
|
||||
// NewMedianTime returns a new instance of concurrency-safe implementation of
|
||||
// the MedianTimeSource interface. The returned implementation contains the
|
||||
// rules necessary for proper time handling in the DAG consensus rules and
|
||||
// expects the time samples to be added from the timestamp field of the version
|
||||
// message received from remote peers that successfully connect and negotiate.
|
||||
func NewMedianTime() MedianTimeSource {
|
||||
return &medianTime{
|
||||
knownIDs: make(map[string]struct{}),
|
||||
offsets: make([]int64, 0, maxMedianTimeEntries),
|
||||
}
|
||||
}
|
||||
102
blockdag/mediantime_test.go
Normal file
102
blockdag/mediantime_test.go
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestMedianTime tests the medianTime implementation.
|
||||
func TestMedianTime(t *testing.T) {
|
||||
tests := []struct {
|
||||
in []int64
|
||||
wantOffset int64
|
||||
useDupID bool
|
||||
}{
|
||||
// Not enough samples must result in an offset of 0.
|
||||
{in: []int64{1}, wantOffset: 0},
|
||||
{in: []int64{1, 2}, wantOffset: 0},
|
||||
{in: []int64{1, 2, 3}, wantOffset: 0},
|
||||
{in: []int64{1, 2, 3, 4}, wantOffset: 0},
|
||||
|
||||
// Various number of entries. The expected offset is only
|
||||
// updated on odd number of elements.
|
||||
{in: []int64{-13, 57, -4, -23, -12}, wantOffset: -12},
|
||||
{in: []int64{55, -13, 61, -52, 39, 55}, wantOffset: 39},
|
||||
{in: []int64{-62, -58, -30, -62, 51, -30, 15}, wantOffset: -30},
|
||||
{in: []int64{29, -47, 39, 54, 42, 41, 8, -33}, wantOffset: 39},
|
||||
{in: []int64{37, 54, 9, -21, -56, -36, 5, -11, -39}, wantOffset: -11},
|
||||
{in: []int64{57, -28, 25, -39, 9, 63, -16, 19, -60, 25}, wantOffset: 9},
|
||||
{in: []int64{-5, -4, -3, -2, -1}, wantOffset: -3, useDupID: true},
|
||||
|
||||
// The offset stops being updated once the max number of entries
|
||||
// has been reached.
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52}, wantOffset: 17},
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45}, wantOffset: 17},
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45, 4}, wantOffset: 17},
|
||||
|
||||
// Offsets that are too far away from the local time should
|
||||
// be ignored.
|
||||
{in: []int64{-4201, 4202, -4203, 4204, -4205}, wantOffset: 0},
|
||||
|
||||
// Exercise the condition where the median offset is greater
|
||||
// than the max allowed adjustment, but there is at least one
|
||||
// sample that is close enough to the current time to avoid
|
||||
// triggering a warning about an invalid local clock.
|
||||
{in: []int64{4201, 4202, 4203, 4204, -299}, wantOffset: 0},
|
||||
}
|
||||
|
||||
// Modify the max number of allowed median time entries for these tests.
|
||||
maxMedianTimeEntries = 10
|
||||
defer func() { maxMedianTimeEntries = 200 }()
|
||||
|
||||
for i, test := range tests {
|
||||
filter := NewMedianTime()
|
||||
for j, offset := range test.in {
|
||||
id := strconv.Itoa(j)
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
tOffset := now.Add(time.Duration(offset) * time.Second)
|
||||
filter.AddTimeSample(id, tOffset)
|
||||
|
||||
// Ensure the duplicate IDs are ignored.
|
||||
if test.useDupID {
|
||||
// Modify the offsets to ensure the final median
|
||||
// would be different if the duplicate is added.
|
||||
tOffset = tOffset.Add(time.Duration(offset) *
|
||||
time.Second)
|
||||
filter.AddTimeSample(id, tOffset)
|
||||
}
|
||||
}
|
||||
|
||||
// Since it is possible that the time.Now call in AddTimeSample
|
||||
// and the time.Now calls here in the tests will be off by one
|
||||
// second, allow a fudge factor to compensate.
|
||||
gotOffset := filter.Offset()
|
||||
wantOffset := time.Duration(test.wantOffset) * time.Second
|
||||
wantOffset2 := time.Duration(test.wantOffset-1) * time.Second
|
||||
if gotOffset != wantOffset && gotOffset != wantOffset2 {
|
||||
t.Errorf("Offset #%d: unexpected offset -- got %v, "+
|
||||
"want %v or %v", i, gotOffset, wantOffset,
|
||||
wantOffset2)
|
||||
continue
|
||||
}
|
||||
|
||||
// Since it is possible that the time.Now call in AdjustedTime
|
||||
// and the time.Now call here in the tests will be off by one
|
||||
// second, allow a fudge factor to compensate.
|
||||
adjustedTime := filter.AdjustedTime()
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
wantTime := now.Add(filter.Offset())
|
||||
wantTime2 := now.Add(filter.Offset() - time.Second)
|
||||
if !adjustedTime.Equal(wantTime) && !adjustedTime.Equal(wantTime2) {
|
||||
t.Errorf("AdjustedTime #%d: unexpected result -- got %v, "+
|
||||
"want %v or %v", i, adjustedTime, wantTime,
|
||||
wantTime2)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,22 +3,18 @@ package blockdag
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BlockForMining returns a block with the given transactions
|
||||
// that points to the current DAG tips, that is valid from
|
||||
// all aspects except proof of work.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*domainmessage.MsgBlock, error) {
|
||||
func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, error) {
|
||||
blockTimestamp := dag.NextBlockTime()
|
||||
requiredDifficulty := dag.NextRequiredDifficulty(blockTimestamp)
|
||||
|
||||
@@ -29,28 +25,40 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*domainmessage.Msg
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Sort transactions by subnetwork ID before building Merkle tree
|
||||
sort.Slice(transactions, func(i, j int) bool {
|
||||
if transactions[i].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
|
||||
return true
|
||||
}
|
||||
if transactions[j].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
|
||||
return false
|
||||
}
|
||||
return subnetworkid.Less(&transactions[i].MsgTx().SubnetworkID, &transactions[j].MsgTx().SubnetworkID)
|
||||
})
|
||||
|
||||
// Create a new block ready to be solved.
|
||||
hashMerkleTree := BuildHashMerkleTreeStore(transactions)
|
||||
acceptedIDMerkleRoot, err := dag.NextAcceptedIDMerkleRootNoLock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var msgBlock domainmessage.MsgBlock
|
||||
var msgBlock wire.MsgBlock
|
||||
for _, tx := range transactions {
|
||||
msgBlock.AddTransaction(tx.MsgTx())
|
||||
}
|
||||
|
||||
multiset, err := dag.NextBlockMultiset()
|
||||
utxoWithTransactions, err := dag.UTXOSet().WithTransactions(msgBlock.Transactions, UnacceptedBlueScore, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoCommitment := utxoWithTransactions.Multiset().Hash()
|
||||
|
||||
msgBlock.Header = domainmessage.BlockHeader{
|
||||
msgBlock.Header = wire.BlockHeader{
|
||||
Version: nextBlockVersion,
|
||||
ParentHashes: dag.TipHashes(),
|
||||
HashMerkleRoot: hashMerkleTree.Root(),
|
||||
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
|
||||
UTXOCommitment: (*daghash.Hash)(multiset.Finalize()),
|
||||
UTXOCommitment: utxoCommitment,
|
||||
Timestamp: blockTimestamp,
|
||||
Bits: requiredDifficulty,
|
||||
}
|
||||
@@ -58,19 +66,6 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*domainmessage.Msg
|
||||
return &msgBlock, nil
|
||||
}
|
||||
|
||||
// NextBlockMultiset returns the multiset of an assumed next block
|
||||
// built on top of the current tips.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) NextBlockMultiset() (*secp256k1.MultiSet, error) {
|
||||
_, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dag.virtual.blockNode.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO)
|
||||
}
|
||||
|
||||
// CoinbasePayloadExtraData returns coinbase payload extra data parameter
|
||||
// which is built from extra nonce and coinbase flags.
|
||||
func CoinbasePayloadExtraData(extraNonce uint64, coinbaseFlags string) ([]byte, error) {
|
||||
@@ -106,20 +101,20 @@ func (dag *BlockDAG) NextCoinbaseFromAddress(payToAddress util.Address, extraDat
|
||||
// on the end of the DAG. In particular, it is one second after
|
||||
// the median timestamp of the last several blocks per the DAG consensus
|
||||
// rules.
|
||||
func (dag *BlockDAG) NextBlockMinimumTime() mstime.Time {
|
||||
return dag.CalcPastMedianTime().Add(time.Millisecond)
|
||||
func (dag *BlockDAG) NextBlockMinimumTime() time.Time {
|
||||
return dag.CalcPastMedianTime().Add(time.Second)
|
||||
}
|
||||
|
||||
// NextBlockTime returns a valid block time for the
|
||||
// next block that will point to the existing DAG tips.
|
||||
func (dag *BlockDAG) NextBlockTime() mstime.Time {
|
||||
func (dag *BlockDAG) NextBlockTime() time.Time {
|
||||
// The timestamp for the block must not be before the median timestamp
|
||||
// of the last several blocks. Thus, choose the maximum between the
|
||||
// current time and one second after the past median time. The current
|
||||
// timestamp is truncated to a millisecond boundary before comparison since a
|
||||
// timestamp is truncated to a second boundary before comparison since a
|
||||
// block timestamp does not supported a precision greater than one
|
||||
// millisecond.
|
||||
newTimestamp := dag.Now()
|
||||
// second.
|
||||
newTimestamp := dag.AdjustedTime()
|
||||
minTimestamp := dag.NextBlockMinimumTime()
|
||||
if newTimestamp.Before(minTimestamp) {
|
||||
newTimestamp = minTimestamp
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"io"
|
||||
)
|
||||
|
||||
const multisetPointSize = 32
|
||||
|
||||
// serializeMultiset serializes an ECMH multiset.
|
||||
func serializeMultiset(w io.Writer, ms *secp256k1.MultiSet) error {
|
||||
serialized := ms.Serialize()
|
||||
err := binary.Write(w, byteOrder, serialized)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deserializeMultiset deserializes an EMCH multiset.
|
||||
func deserializeMultiset(r io.Reader) (*secp256k1.MultiSet, error) {
|
||||
serialized := &secp256k1.SerializedMultiSet{}
|
||||
err := binary.Read(r, byteOrder, serialized[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return secp256k1.DeserializeMultiSet(serialized)
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/locks"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type multisetStore struct {
|
||||
dag *BlockDAG
|
||||
new map[daghash.Hash]struct{}
|
||||
loaded map[daghash.Hash]secp256k1.MultiSet
|
||||
mtx *locks.PriorityMutex
|
||||
}
|
||||
|
||||
func newMultisetStore(dag *BlockDAG) *multisetStore {
|
||||
return &multisetStore{
|
||||
dag: dag,
|
||||
new: make(map[daghash.Hash]struct{}),
|
||||
loaded: make(map[daghash.Hash]secp256k1.MultiSet),
|
||||
}
|
||||
}
|
||||
|
||||
func (store *multisetStore) setMultiset(node *blockNode, ms *secp256k1.MultiSet) {
|
||||
store.loaded[*node.hash] = *ms
|
||||
store.addToNewBlocks(node.hash)
|
||||
}
|
||||
|
||||
func (store *multisetStore) addToNewBlocks(blockHash *daghash.Hash) {
|
||||
store.new[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func multisetNotFoundError(blockHash *daghash.Hash) error {
|
||||
return errors.Errorf("Couldn't find multiset data for block %s", blockHash)
|
||||
}
|
||||
|
||||
func (store *multisetStore) multisetByBlockNode(node *blockNode) (*secp256k1.MultiSet, error) {
|
||||
ms, exists := store.multisetByBlockHash(node.hash)
|
||||
if !exists {
|
||||
return nil, multisetNotFoundError(node.hash)
|
||||
}
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
func (store *multisetStore) multisetByBlockHash(hash *daghash.Hash) (*secp256k1.MultiSet, bool) {
|
||||
ms, ok := store.loaded[*hash]
|
||||
return &ms, ok
|
||||
}
|
||||
|
||||
// flushToDB writes all new multiset data to the database.
|
||||
func (store *multisetStore) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
if len(store.new) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
w := &bytes.Buffer{}
|
||||
for hash := range store.new {
|
||||
hash := hash // Copy hash to a new variable to avoid passing the same pointer
|
||||
|
||||
w.Reset()
|
||||
ms, exists := store.loaded[hash]
|
||||
if !exists {
|
||||
return multisetNotFoundError(&hash)
|
||||
}
|
||||
|
||||
err := serializeMultiset(w, &ms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = store.storeMultiset(dbContext, &hash, w.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *multisetStore) clearNewEntries() {
|
||||
store.new = make(map[daghash.Hash]struct{})
|
||||
}
|
||||
|
||||
func (store *multisetStore) init(dbContext dbaccess.Context) error {
|
||||
cursor, err := dbaccess.MultisetCursor(dbContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
key, err := cursor.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := daghash.NewHash(key.Suffix())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serializedMS, err := cursor.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ms, err := deserializeMultiset(bytes.NewReader(serializedMS))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store.loaded[*hash] = *ms
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// storeMultiset stores the multiset data to the database.
|
||||
func (store *multisetStore) storeMultiset(dbContext dbaccess.Context, blockHash *daghash.Hash, serializedMS []byte) error {
|
||||
exists, err := dbaccess.HasMultiset(dbContext, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
return errors.Errorf("Can't override an existing multiset database entry for block %s", blockHash)
|
||||
}
|
||||
|
||||
return dbaccess.StoreMultiset(dbContext, blockHash, serializedMS)
|
||||
}
|
||||
@@ -57,8 +57,8 @@ type Notification struct {
|
||||
// NotificationType for details on the types and contents of notifications.
|
||||
func (dag *BlockDAG) Subscribe(callback NotificationCallback) {
|
||||
dag.notificationsLock.Lock()
|
||||
defer dag.notificationsLock.Unlock()
|
||||
dag.notifications = append(dag.notifications, callback)
|
||||
dag.notificationsLock.Unlock()
|
||||
}
|
||||
|
||||
// sendNotification sends a notification with the passed type and data if the
|
||||
@@ -68,10 +68,10 @@ func (dag *BlockDAG) sendNotification(typ NotificationType, data interface{}) {
|
||||
// Generate and send the notification.
|
||||
n := Notification{Type: typ, Data: data}
|
||||
dag.notificationsLock.RLock()
|
||||
defer dag.notificationsLock.RUnlock()
|
||||
for _, callback := range dag.notifications {
|
||||
callback(&n)
|
||||
}
|
||||
dag.notificationsLock.RUnlock()
|
||||
}
|
||||
|
||||
// BlockAddedNotificationData defines data to be sent along with a BlockAdded
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestNotifications(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("notifications", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("notifications", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -6,10 +6,8 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
@@ -46,23 +44,15 @@ const (
|
||||
// in the block index but was never fully processed
|
||||
BFWasStored
|
||||
|
||||
// BFDisallowDelay is set to indicate that a delayed block should be rejected.
|
||||
// This is used for the case where a block is submitted through RPC.
|
||||
BFDisallowDelay
|
||||
|
||||
// BFDisallowOrphans is set to indicate that an orphan block should be rejected.
|
||||
// This is used for the case where a block is submitted through RPC.
|
||||
BFDisallowOrphans
|
||||
|
||||
// BFNone is a convenience value to specifically indicate no flags.
|
||||
BFNone BehaviorFlags = 0
|
||||
)
|
||||
|
||||
// IsInDAG determines whether a block with the given hash exists in
|
||||
// BlockExists determines whether a block with the given hash exists in
|
||||
// the DAG.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) IsInDAG(hash *daghash.Hash) bool {
|
||||
func (dag *BlockDAG) BlockExists(hash *daghash.Hash) bool {
|
||||
return dag.index.HaveBlock(hash)
|
||||
}
|
||||
|
||||
@@ -105,8 +95,7 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
|
||||
// still missing.
|
||||
_, err := lookupParentNodes(orphan.block, dag)
|
||||
if err != nil {
|
||||
var ruleErr RuleError
|
||||
if ok := errors.As(err, &ruleErr); ok && ruleErr.ErrorCode == ErrParentBlockUnknown {
|
||||
if ruleErr, ok := err.(RuleError); ok && ruleErr.ErrorCode == ErrParentBlockUnknown {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
@@ -122,7 +111,7 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
|
||||
if err != nil {
|
||||
// Since we don't want to reject the original block because of
|
||||
// a bad unorphaned child, only return an error if it's not a RuleError.
|
||||
if !errors.As(err, &RuleError{}) {
|
||||
if _, ok := err.(RuleError); !ok {
|
||||
return err
|
||||
}
|
||||
log.Warnf("Verification failed for orphan block %s: %s", orphanHash, err)
|
||||
@@ -155,14 +144,12 @@ func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (isOrp
|
||||
func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags) (isOrphan bool, isDelayed bool, err error) {
|
||||
isAfterDelay := flags&BFAfterDelay == BFAfterDelay
|
||||
wasBlockStored := flags&BFWasStored == BFWasStored
|
||||
disallowDelay := flags&BFDisallowDelay == BFDisallowDelay
|
||||
disallowOrphans := flags&BFDisallowOrphans == BFDisallowOrphans
|
||||
|
||||
blockHash := block.Hash()
|
||||
log.Tracef("Processing block %s", blockHash)
|
||||
|
||||
// The block must not already exist in the DAG.
|
||||
if dag.IsInDAG(blockHash) && !wasBlockStored {
|
||||
if dag.BlockExists(blockHash) && !wasBlockStored {
|
||||
str := fmt.Sprintf("already have block %s", blockHash)
|
||||
return false, false, ruleError(ErrDuplicateBlock, str)
|
||||
}
|
||||
@@ -185,11 +172,6 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
if delay != 0 && disallowDelay {
|
||||
str := fmt.Sprintf("Cannot process blocks beyond the allowed time offset while the BFDisallowDelay flag is raised %s", blockHash)
|
||||
return false, true, ruleError(ErrDelayedBlockIsNotAllowed, str)
|
||||
}
|
||||
|
||||
if delay != 0 {
|
||||
err = dag.addDelayedBlock(block, delay)
|
||||
if err != nil {
|
||||
@@ -201,20 +183,16 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
|
||||
|
||||
var missingParents []*daghash.Hash
|
||||
for _, parentHash := range block.MsgBlock().Header.ParentHashes {
|
||||
if !dag.IsInDAG(parentHash) {
|
||||
if !dag.BlockExists(parentHash) {
|
||||
missingParents = append(missingParents, parentHash)
|
||||
}
|
||||
}
|
||||
if len(missingParents) > 0 && disallowOrphans {
|
||||
str := fmt.Sprintf("Cannot process orphan blocks while the BFDisallowOrphans flag is raised %s", blockHash)
|
||||
return false, false, ruleError(ErrOrphanBlockIsNotAllowed, str)
|
||||
}
|
||||
|
||||
// Handle the case of a block with a valid timestamp(non-delayed) which points to a delayed block.
|
||||
delay, isParentDelayed := dag.maxDelayOfParents(missingParents)
|
||||
if isParentDelayed {
|
||||
// Add Millisecond to ensure that parent process time will be after its child.
|
||||
delay += time.Millisecond
|
||||
// Add Nanosecond to ensure that parent process time will be after its child.
|
||||
delay += time.Nanosecond
|
||||
err := dag.addDelayedBlock(block, delay)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
@@ -231,7 +209,7 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
|
||||
// The number K*2 was chosen since in peace times anticone is limited to K blocks,
|
||||
// while some red block can make it a bit bigger, but much more than that indicates
|
||||
// there might be some problem with the netsync process.
|
||||
if flags&BFIsSync == BFIsSync && dagconfig.KType(len(dag.orphans)) < dag.Params.K*2 {
|
||||
if flags&BFIsSync == BFIsSync && dagconfig.KType(len(dag.orphans)) < dag.dagParams.K*2 {
|
||||
log.Debugf("Adding orphan block %s. This is normal part of netsync process", blockHash)
|
||||
} else {
|
||||
log.Infof("Adding orphan block %s", blockHash)
|
||||
@@ -263,8 +241,6 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
|
||||
}
|
||||
}
|
||||
|
||||
dag.addBlockProcessingTimestamp()
|
||||
|
||||
log.Debugf("Accepted block %s", blockHash)
|
||||
|
||||
return false, false, nil
|
||||
@@ -276,7 +252,7 @@ func (dag *BlockDAG) maxDelayOfParents(parentHashes []*daghash.Hash) (delay time
|
||||
for _, parentHash := range parentHashes {
|
||||
if delayedParent, exists := dag.delayedBlocks[*parentHash]; exists {
|
||||
isDelayed = true
|
||||
parentDelay := delayedParent.processTime.Sub(dag.Now())
|
||||
parentDelay := delayedParent.processTime.Sub(dag.AdjustedTime())
|
||||
if parentDelay > delay {
|
||||
delay = parentDelay
|
||||
}
|
||||
|
||||
@@ -3,16 +3,13 @@ package blockdag
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
func TestProcessOrphans(t *testing.T) {
|
||||
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -64,175 +61,11 @@ func TestProcessOrphans(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make sure that the child block had been rejected
|
||||
node, ok := dag.index.LookupNode(childBlock.Hash())
|
||||
if !ok {
|
||||
node := dag.index.LookupNode(childBlock.Hash())
|
||||
if node == nil {
|
||||
t.Fatalf("TestProcessOrphans: child block missing from block index")
|
||||
}
|
||||
if !dag.index.NodeStatus(node).KnownInvalid() {
|
||||
t.Fatalf("TestProcessOrphans: child block erroneously not marked as invalid")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessDelayedBlocks(t *testing.T) {
|
||||
// We use dag1 so we can build the test blocks with the proper
|
||||
// block header (UTXO commitment, acceptedIDMerkleroot, etc), and
|
||||
// then we use dag2 for the actual test.
|
||||
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
isDAG1Open := true
|
||||
defer func() {
|
||||
if isDAG1Open {
|
||||
teardownFunc()
|
||||
}
|
||||
}()
|
||||
|
||||
initialTime := dag1.Params.GenesisBlock.Header.Timestamp
|
||||
// Here we use a fake time source that returns a timestamp
|
||||
// one hour into the future to make delayedBlock artificially
|
||||
// valid.
|
||||
dag1.timeSource = newFakeTimeSource(initialTime.Add(time.Hour))
|
||||
|
||||
delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.Params.GenesisBlock.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
|
||||
blockDelay := time.Duration(dag1.Params.TimestampDeviationTolerance)*dag1.Params.TargetTimePerBlock + 5*time.Second
|
||||
delayedBlock.Header.Timestamp = initialTime.Add(blockDelay)
|
||||
|
||||
isOrphan, isDelayed, err := dag1.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
|
||||
"is delayed\n")
|
||||
}
|
||||
|
||||
delayedBlockChild, err := PrepareBlockForTest(dag1, []*daghash.Hash{delayedBlock.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
|
||||
teardownFunc()
|
||||
isDAG1Open = false
|
||||
|
||||
// Here the actual test begins. We add a delayed block and
|
||||
// its child and check that they are not added to the DAG,
|
||||
// and check that they're added only if we add a new block
|
||||
// after the delayed block timestamp is valid.
|
||||
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc2()
|
||||
dag2.timeSource = newFakeTimeSource(initialTime)
|
||||
|
||||
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if !isDelayed {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
|
||||
"is not delayed\n")
|
||||
}
|
||||
|
||||
if dag2.IsInDAG(delayedBlock.BlockHash()) {
|
||||
t.Errorf("dag.IsInDAG should return false for a delayed block")
|
||||
}
|
||||
if !dag2.IsKnownBlock(delayedBlock.BlockHash()) {
|
||||
t.Errorf("dag.IsKnownBlock should return true for a a delayed block")
|
||||
}
|
||||
|
||||
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(delayedBlockChild), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlockChild " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if !isDelayed {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlockChild " +
|
||||
"is not delayed\n")
|
||||
}
|
||||
|
||||
if dag2.IsInDAG(delayedBlockChild.BlockHash()) {
|
||||
t.Errorf("dag.IsInDAG should return false for a child of a delayed block")
|
||||
}
|
||||
if !dag2.IsKnownBlock(delayedBlockChild.BlockHash()) {
|
||||
t.Errorf("dag.IsKnownBlock should return true for a child of a delayed block")
|
||||
}
|
||||
|
||||
blockBeforeDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.Params.GenesisBlock.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(blockBeforeDelay), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
|
||||
"is delayed\n")
|
||||
}
|
||||
|
||||
if dag2.IsInDAG(delayedBlock.BlockHash()) {
|
||||
t.Errorf("delayedBlock shouldn't be added to the DAG because its time hasn't reached yet")
|
||||
}
|
||||
if dag2.IsInDAG(delayedBlockChild.BlockHash()) {
|
||||
t.Errorf("delayedBlockChild shouldn't be added to the DAG because its parent is not in the DAG")
|
||||
}
|
||||
|
||||
// We advance the clock to the point where delayedBlock timestamp is valid.
|
||||
deviationTolerance := time.Duration(dag2.TimestampDeviationTolerance) * dag2.Params.TargetTimePerBlock
|
||||
timeUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.
|
||||
Add(-deviationTolerance).
|
||||
Sub(dag2.Now()) +
|
||||
time.Second
|
||||
dag2.timeSource = newFakeTimeSource(initialTime.Add(timeUntilDelayedBlockIsValid))
|
||||
|
||||
blockAfterDelay, err := PrepareBlockForTest(dag2,
|
||||
[]*daghash.Hash{dag2.Params.GenesisBlock.BlockHash()},
|
||||
nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(blockAfterDelay), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
|
||||
"is not delayed\n")
|
||||
}
|
||||
|
||||
if !dag2.IsInDAG(delayedBlock.BlockHash()) {
|
||||
t.Fatalf("delayedBlock should be added to the DAG because its time has been reached")
|
||||
}
|
||||
if !dag2.IsInDAG(delayedBlockChild.BlockHash()) {
|
||||
t.Errorf("delayedBlockChild shouldn't be added to the DAG because its parent has been added to the DAG")
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,7 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -13,20 +10,19 @@ func TestAddChild(t *testing.T) {
|
||||
// root -> a -> b -> c...
|
||||
// Create the root node of a new reachability tree
|
||||
root := newReachabilityTreeNode(&blockNode{})
|
||||
root.interval = newReachabilityInterval(1, 100)
|
||||
root.setInterval(newReachabilityInterval(1, 100))
|
||||
|
||||
// Add a chain of child nodes just before a reindex occurs (2^6=64 < 100)
|
||||
currentTip := root
|
||||
for i := 0; i < 6; i++ {
|
||||
node := newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes := newModifiedTreeNodes()
|
||||
err := currentTip.addChild(node, root, modifiedNodes)
|
||||
modifiedNodes, err := currentTip.addChild(node)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
|
||||
// Expect only the node and its parent to be affected
|
||||
expectedModifiedNodes := newModifiedTreeNodes(currentTip, node)
|
||||
expectedModifiedNodes := []*reachabilityTreeNode{currentTip, node}
|
||||
if !reflect.DeepEqual(modifiedNodes, expectedModifiedNodes) {
|
||||
t.Fatalf("TestAddChild: unexpected modifiedNodes. "+
|
||||
"want: %s, got: %s", expectedModifiedNodes, modifiedNodes)
|
||||
@@ -37,8 +33,7 @@ func TestAddChild(t *testing.T) {
|
||||
|
||||
// Add another node to the tip of the chain to trigger a reindex (100 < 2^7=128)
|
||||
lastChild := newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes := newModifiedTreeNodes()
|
||||
err := currentTip.addChild(lastChild, root, modifiedNodes)
|
||||
modifiedNodes, err := currentTip.addChild(lastChild)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
@@ -49,23 +44,19 @@ func TestAddChild(t *testing.T) {
|
||||
t.Fatalf("TestAddChild: unexpected amount of modifiedNodes.")
|
||||
}
|
||||
|
||||
// Expect the tip to have an interval of 1 and remaining interval of 0 both before and after
|
||||
// Expect the tip to have an interval of 1 and remaining interval of 0
|
||||
tipInterval := lastChild.interval.size()
|
||||
if tipInterval != 1 {
|
||||
t.Fatalf("TestAddChild: unexpected tip interval size: want: 1, got: %d", tipInterval)
|
||||
}
|
||||
tipRemainingIntervalBefore := lastChild.remainingIntervalBefore().size()
|
||||
if tipRemainingIntervalBefore != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected tip interval before size: want: 0, got: %d", tipRemainingIntervalBefore)
|
||||
}
|
||||
tipRemainingIntervalAfter := lastChild.remainingIntervalAfter().size()
|
||||
if tipRemainingIntervalAfter != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected tip interval after size: want: 0, got: %d", tipRemainingIntervalAfter)
|
||||
tipRemainingInterval := lastChild.remainingInterval.size()
|
||||
if tipRemainingInterval != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected tip interval size: want: 0, got: %d", tipRemainingInterval)
|
||||
}
|
||||
|
||||
// Expect all nodes to be descendant nodes of root
|
||||
currentNode := currentTip
|
||||
for currentNode != root {
|
||||
for currentNode != nil {
|
||||
if !root.isAncestorOf(currentNode) {
|
||||
t.Fatalf("TestAddChild: currentNode is not a descendant of root")
|
||||
}
|
||||
@@ -76,20 +67,19 @@ func TestAddChild(t *testing.T) {
|
||||
// root -> a, b, c...
|
||||
// Create the root node of a new reachability tree
|
||||
root = newReachabilityTreeNode(&blockNode{})
|
||||
root.interval = newReachabilityInterval(1, 100)
|
||||
root.setInterval(newReachabilityInterval(1, 100))
|
||||
|
||||
// Add child nodes to root just before a reindex occurs (2^6=64 < 100)
|
||||
childNodes := make([]*reachabilityTreeNode, 6)
|
||||
for i := 0; i < len(childNodes); i++ {
|
||||
childNodes[i] = newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes := newModifiedTreeNodes()
|
||||
err := root.addChild(childNodes[i], root, modifiedNodes)
|
||||
modifiedNodes, err := root.addChild(childNodes[i])
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
|
||||
// Expect only the node and the root to be affected
|
||||
expectedModifiedNodes := newModifiedTreeNodes(root, childNodes[i])
|
||||
expectedModifiedNodes := []*reachabilityTreeNode{root, childNodes[i]}
|
||||
if !reflect.DeepEqual(modifiedNodes, expectedModifiedNodes) {
|
||||
t.Fatalf("TestAddChild: unexpected modifiedNodes. "+
|
||||
"want: %s, got: %s", expectedModifiedNodes, modifiedNodes)
|
||||
@@ -98,8 +88,7 @@ func TestAddChild(t *testing.T) {
|
||||
|
||||
// Add another node to the root to trigger a reindex (100 < 2^7=128)
|
||||
lastChild = newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes = newModifiedTreeNodes()
|
||||
err = root.addChild(lastChild, root, modifiedNodes)
|
||||
modifiedNodes, err = root.addChild(lastChild)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
@@ -110,18 +99,14 @@ func TestAddChild(t *testing.T) {
|
||||
t.Fatalf("TestAddChild: unexpected amount of modifiedNodes.")
|
||||
}
|
||||
|
||||
// Expect the last-added child to have an interval of 1 and remaining interval of 0 both before and after
|
||||
// Expect the last-added child to have an interval of 1 and remaining interval of 0
|
||||
lastChildInterval := lastChild.interval.size()
|
||||
if lastChildInterval != 1 {
|
||||
t.Fatalf("TestAddChild: unexpected lastChild interval size: want: 1, got: %d", lastChildInterval)
|
||||
}
|
||||
lastChildRemainingIntervalBefore := lastChild.remainingIntervalBefore().size()
|
||||
if lastChildRemainingIntervalBefore != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected lastChild interval before size: want: 0, got: %d", lastChildRemainingIntervalBefore)
|
||||
}
|
||||
lastChildRemainingIntervalAfter := lastChild.remainingIntervalAfter().size()
|
||||
if lastChildRemainingIntervalAfter != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected lastChild interval after size: want: 0, got: %d", lastChildRemainingIntervalAfter)
|
||||
lastChildRemainingInterval := lastChild.remainingInterval.size()
|
||||
if lastChildRemainingInterval != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected lastChild interval size: want: 0, got: %d", lastChildRemainingInterval)
|
||||
}
|
||||
|
||||
// Expect all nodes to be descendant nodes of root
|
||||
@@ -132,91 +117,6 @@ func TestAddChild(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) {
|
||||
root := newReachabilityTreeNode(&blockNode{})
|
||||
currentTip := root
|
||||
const numberOfDescendants = 6
|
||||
descendants := make([]*reachabilityTreeNode, numberOfDescendants)
|
||||
for i := 0; i < numberOfDescendants; i++ {
|
||||
node := newReachabilityTreeNode(&blockNode{})
|
||||
err := currentTip.addChild(node, root, newModifiedTreeNodes())
|
||||
if err != nil {
|
||||
t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: addChild failed: %s", err)
|
||||
}
|
||||
descendants[i] = node
|
||||
currentTip = node
|
||||
}
|
||||
|
||||
// Expect all descendants to be in the future of root
|
||||
for _, node := range descendants {
|
||||
if !root.isAncestorOf(node) {
|
||||
t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: node is not a descendant of root")
|
||||
}
|
||||
}
|
||||
|
||||
if !root.isAncestorOf(root) {
|
||||
t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: root is expected to be an ancestor of root")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntervalContains(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
this, other *reachabilityInterval
|
||||
thisContainsOther bool
|
||||
}{
|
||||
{
|
||||
name: "this == other",
|
||||
this: newReachabilityInterval(10, 100),
|
||||
other: newReachabilityInterval(10, 100),
|
||||
thisContainsOther: true,
|
||||
},
|
||||
{
|
||||
name: "this.start == other.start && this.end < other.end",
|
||||
this: newReachabilityInterval(10, 90),
|
||||
other: newReachabilityInterval(10, 100),
|
||||
thisContainsOther: false,
|
||||
},
|
||||
{
|
||||
name: "this.start == other.start && this.end > other.end",
|
||||
this: newReachabilityInterval(10, 100),
|
||||
other: newReachabilityInterval(10, 90),
|
||||
thisContainsOther: true,
|
||||
},
|
||||
{
|
||||
name: "this.start > other.start && this.end == other.end",
|
||||
this: newReachabilityInterval(20, 100),
|
||||
other: newReachabilityInterval(10, 100),
|
||||
thisContainsOther: false,
|
||||
},
|
||||
{
|
||||
name: "this.start < other.start && this.end == other.end",
|
||||
this: newReachabilityInterval(10, 100),
|
||||
other: newReachabilityInterval(20, 100),
|
||||
thisContainsOther: true,
|
||||
},
|
||||
{
|
||||
name: "this.start > other.start && this.end < other.end",
|
||||
this: newReachabilityInterval(20, 90),
|
||||
other: newReachabilityInterval(10, 100),
|
||||
thisContainsOther: false,
|
||||
},
|
||||
{
|
||||
name: "this.start < other.start && this.end > other.end",
|
||||
this: newReachabilityInterval(10, 100),
|
||||
other: newReachabilityInterval(20, 90),
|
||||
thisContainsOther: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if thisContainsOther := test.this.contains(test.other); thisContainsOther != test.thisContainsOther {
|
||||
t.Errorf("test.this.contains(test.other) is expected to be %t but got %t",
|
||||
test.thisContainsOther, thisContainsOther)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitFraction(t *testing.T) {
|
||||
tests := []struct {
|
||||
interval *reachabilityInterval
|
||||
@@ -422,15 +322,6 @@ func TestSplitWithExponentialBias(t *testing.T) {
|
||||
newReachabilityInterval(41, 10_000),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100_000),
|
||||
sizes: []uint64{31_000, 31_000, 30_001},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 35_000),
|
||||
newReachabilityInterval(35_001, 69_999),
|
||||
newReachabilityInterval(70_000, 100_000),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
@@ -445,605 +336,140 @@ func TestSplitWithExponentialBias(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasAncestorOf(t *testing.T) {
|
||||
treeNodes := futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(2, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
func TestIsInFuture(t *testing.T) {
|
||||
blocks := futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(2, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
treeNode *reachabilityTreeNode
|
||||
block *futureCoveringBlock
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 1)},
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 1)}},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)},
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)}},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 76)},
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 76)}},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(78, 100)},
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(78, 100)}},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1980, 2000)},
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1980, 2000)}},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1920)},
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1920)}},
|
||||
expectedResult: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
result := treeNodes.hasAncestorOf(test.treeNode)
|
||||
result := blocks.isInFuture(test.block)
|
||||
if result != test.expectedResult {
|
||||
t.Errorf("TestHasAncestorOf: unexpected result in test #%d. Want: %t, got: %t",
|
||||
t.Errorf("TestIsInFuture: unexpected result in test #%d. Want: %t, got: %t",
|
||||
i, test.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertNode(t *testing.T) {
|
||||
treeNodes := futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
func TestInsertBlock(t *testing.T) {
|
||||
blocks := futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
toInsert []*reachabilityTreeNode
|
||||
expectedResult futureCoveringTreeNodeSet
|
||||
toInsert []*futureCoveringBlock
|
||||
expectedResult futureCoveringBlockSet
|
||||
}{
|
||||
{
|
||||
toInsert: []*reachabilityTreeNode{
|
||||
{interval: newReachabilityInterval(5, 7)},
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)}},
|
||||
},
|
||||
expectedResult: futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
},
|
||||
},
|
||||
{
|
||||
toInsert: []*reachabilityTreeNode{
|
||||
{interval: newReachabilityInterval(65, 78)},
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(65, 78)}},
|
||||
},
|
||||
expectedResult: futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(65, 78)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(65, 78)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
},
|
||||
},
|
||||
{
|
||||
toInsert: []*reachabilityTreeNode{
|
||||
{interval: newReachabilityInterval(88, 97)},
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
},
|
||||
expectedResult: futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(88, 97)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
},
|
||||
},
|
||||
{
|
||||
toInsert: []*reachabilityTreeNode{
|
||||
{interval: newReachabilityInterval(88, 97)},
|
||||
{interval: newReachabilityInterval(3000, 3010)},
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)}},
|
||||
},
|
||||
expectedResult: futureCoveringTreeNodeSet{
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(88, 97)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
|
||||
&reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
// Create a clone of treeNodes so that we have a clean start for every test
|
||||
treeNodesClone := make(futureCoveringTreeNodeSet, len(treeNodes))
|
||||
for i, treeNode := range treeNodes {
|
||||
treeNodesClone[i] = treeNode
|
||||
// Create a clone of blocks so that we have a clean start for every test
|
||||
blocksClone := make(futureCoveringBlockSet, len(blocks))
|
||||
for i, block := range blocks {
|
||||
blocksClone[i] = block
|
||||
}
|
||||
|
||||
for _, treeNode := range test.toInsert {
|
||||
treeNodesClone.insertNode(treeNode)
|
||||
for _, block := range test.toInsert {
|
||||
blocksClone.insertBlock(block)
|
||||
}
|
||||
if !reflect.DeepEqual(treeNodesClone, test.expectedResult) {
|
||||
t.Errorf("TestInsertNode: unexpected result in test #%d. Want: %s, got: %s",
|
||||
i, test.expectedResult, treeNodesClone)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitFractionErrors(t *testing.T) {
|
||||
interval := newReachabilityInterval(100, 200)
|
||||
|
||||
// Negative fraction
|
||||
_, _, err := interval.splitFraction(-0.5)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
|
||||
"didn't return an error for a negative fraction")
|
||||
}
|
||||
expectedErrSubstring := "fraction must be between 0 and 1"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
|
||||
"for a negative fraction. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
|
||||
// Fraction > 1
|
||||
_, _, err = interval.splitFraction(1.5)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
|
||||
"didn't return an error for a fraction greater than 1")
|
||||
}
|
||||
expectedErrSubstring = "fraction must be between 0 and 1"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
|
||||
"for a fraction greater than 1. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
|
||||
// Splitting an empty interval
|
||||
emptyInterval := newReachabilityInterval(1, 0)
|
||||
_, _, err = emptyInterval.splitFraction(0.5)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
|
||||
"didn't return an error for an empty interval")
|
||||
}
|
||||
expectedErrSubstring = "cannot split an empty interval"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
|
||||
"for an empty interval. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitExactErrors(t *testing.T) {
|
||||
interval := newReachabilityInterval(100, 199)
|
||||
|
||||
// Sum of sizes greater than the size of the interval
|
||||
sizes := []uint64{50, 51}
|
||||
_, err := interval.splitExact(sizes)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitExactErrors: splitExact unexpectedly " +
|
||||
"didn't return an error for (sum of sizes) > (size of interval)")
|
||||
}
|
||||
expectedErrSubstring := "sum of sizes must be equal to the interval's size"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitExactErrors: splitExact returned wrong error "+
|
||||
"for (sum of sizes) > (size of interval). "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
|
||||
// Sum of sizes smaller than the size of the interval
|
||||
sizes = []uint64{50, 49}
|
||||
_, err = interval.splitExact(sizes)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitExactErrors: splitExact unexpectedly " +
|
||||
"didn't return an error for (sum of sizes) < (size of interval)")
|
||||
}
|
||||
expectedErrSubstring = "sum of sizes must be equal to the interval's size"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitExactErrors: splitExact returned wrong error "+
|
||||
"for (sum of sizes) < (size of interval). "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitWithExponentialBiasErrors(t *testing.T) {
|
||||
interval := newReachabilityInterval(100, 199)
|
||||
|
||||
// Sum of sizes greater than the size of the interval
|
||||
sizes := []uint64{50, 51}
|
||||
_, err := interval.splitWithExponentialBias(sizes)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitWithExponentialBiasErrors: splitWithExponentialBias " +
|
||||
"unexpectedly didn't return an error")
|
||||
}
|
||||
expectedErrSubstring := "sum of sizes must be less than or equal to the interval's size"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitWithExponentialBiasErrors: splitWithExponentialBias "+
|
||||
"returned wrong error. Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReindexIntervalErrors(t *testing.T) {
|
||||
// Create a treeNode and give it size = 100
|
||||
treeNode := newReachabilityTreeNode(&blockNode{})
|
||||
treeNode.interval = newReachabilityInterval(0, 99)
|
||||
|
||||
// Add a chain of 100 child treeNodes to treeNode
|
||||
var err error
|
||||
currentTreeNode := treeNode
|
||||
for i := 0; i < 100; i++ {
|
||||
childTreeNode := newReachabilityTreeNode(&blockNode{})
|
||||
err = currentTreeNode.addChild(childTreeNode, treeNode, newModifiedTreeNodes())
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
currentTreeNode = childTreeNode
|
||||
}
|
||||
|
||||
// At the 100th addChild we expect a reindex. This reindex should
|
||||
// fail because our initial treeNode only has size = 100, and the
|
||||
// reindex requires size > 100.
|
||||
// This simulates the case when (somehow) there's more than 2^64
|
||||
// blocks in the DAG, since the genesis block has size = 2^64.
|
||||
if err == nil {
|
||||
t.Fatalf("TestReindexIntervalErrors: reindexIntervals " +
|
||||
"unexpectedly didn't return an error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "missing tree parent during reindexing") {
|
||||
t.Fatalf("TestReindexIntervalErrors: reindexIntervals "+
|
||||
"returned an expected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReindexInterval(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
root := newReachabilityTreeNode(&blockNode{})
|
||||
|
||||
const subTreeSize = 70000
|
||||
// We set the interval of the root to subTreeSize*2 because
|
||||
// its first child gets half of the interval, so a reindex
|
||||
// from the root should happen after adding subTreeSize
|
||||
// nodes.
|
||||
root.interval = newReachabilityInterval(0, subTreeSize*2)
|
||||
|
||||
currentTreeNode := root
|
||||
for i := 0; i < subTreeSize; i++ {
|
||||
childTreeNode := newReachabilityTreeNode(&blockNode{})
|
||||
err := currentTreeNode.addChild(childTreeNode, root, newModifiedTreeNodes())
|
||||
if err != nil {
|
||||
b.Fatalf("addChild: %s", err)
|
||||
}
|
||||
|
||||
currentTreeNode = childTreeNode
|
||||
}
|
||||
|
||||
originalRemainingInterval := *root.remainingIntervalAfter()
|
||||
// After we added subTreeSize nodes, adding the next
|
||||
// node should lead to a reindex from root.
|
||||
fullReindexTriggeringNode := newReachabilityTreeNode(&blockNode{})
|
||||
b.StartTimer()
|
||||
err := currentTreeNode.addChild(fullReindexTriggeringNode, root, newModifiedTreeNodes())
|
||||
b.StopTimer()
|
||||
if err != nil {
|
||||
b.Fatalf("addChild: %s", err)
|
||||
}
|
||||
|
||||
if *root.remainingIntervalAfter() == originalRemainingInterval {
|
||||
b.Fatal("Expected a reindex from root, but it didn't happen")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFutureCoveringTreeNodeSetString(t *testing.T) {
|
||||
treeNodeA := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeA.interval = newReachabilityInterval(123, 456)
|
||||
treeNodeB := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeB.interval = newReachabilityInterval(457, 789)
|
||||
futureCoveringSet := futureCoveringTreeNodeSet{treeNodeA, treeNodeB}
|
||||
|
||||
str := futureCoveringSet.String()
|
||||
expectedStr := "[123,456][457,789]"
|
||||
if str != expectedStr {
|
||||
t.Fatalf("TestFutureCoveringTreeNodeSetString: unexpected "+
|
||||
"string. Want: %s, got: %s", expectedStr, str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReachabilityTreeNodeString(t *testing.T) {
|
||||
treeNodeA := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeA.interval = newReachabilityInterval(100, 199)
|
||||
treeNodeB1 := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeB1.interval = newReachabilityInterval(100, 150)
|
||||
treeNodeB2 := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeB2.interval = newReachabilityInterval(150, 199)
|
||||
treeNodeC := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeC.interval = newReachabilityInterval(100, 149)
|
||||
treeNodeA.children = []*reachabilityTreeNode{treeNodeB1, treeNodeB2}
|
||||
treeNodeB2.children = []*reachabilityTreeNode{treeNodeC}
|
||||
|
||||
str := treeNodeA.String()
|
||||
expectedStr := "[100,149]\n[100,150][150,199]\n[100,199]"
|
||||
if str != expectedStr {
|
||||
t.Fatalf("TestReachabilityTreeNodeString: unexpected "+
|
||||
"string. Want: %s, got: %s", expectedStr, str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsInPast(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestIsInPast", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestIsInPast: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Add a chain of two blocks above the genesis. This will be the
|
||||
// selected parent chain.
|
||||
blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil)
|
||||
|
||||
// Add another block above the genesis
|
||||
blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
nodeC, ok := dag.index.LookupNode(blockC.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("TestIsInPast: block C is not in the block index")
|
||||
}
|
||||
|
||||
// Add a block whose parents are the two tips
|
||||
blockD := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash(), blockC.BlockHash()}, nil)
|
||||
nodeD, ok := dag.index.LookupNode(blockD.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("TestIsInPast: block C is not in the block index")
|
||||
}
|
||||
|
||||
// Make sure that node C is in the past of node D
|
||||
isInFuture, err := dag.reachabilityTree.isInPast(nodeC, nodeD)
|
||||
if err != nil {
|
||||
t.Fatalf("TestIsInPast: isInPast unexpectedly failed: %s", err)
|
||||
}
|
||||
if !isInFuture {
|
||||
t.Fatalf("TestIsInPast: node C is unexpectedly not the past of node D")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot",
|
||||
true, Config{DAGParams: &dagconfig.SimnetParams})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Set the reindex window to a low number to make this test run fast
|
||||
originalReachabilityReindexWindow := reachabilityReindexWindow
|
||||
reachabilityReindexWindow = 10
|
||||
defer func() {
|
||||
reachabilityReindexWindow = originalReachabilityReindexWindow
|
||||
}()
|
||||
|
||||
// Add a block on top of the genesis block
|
||||
chainRootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
|
||||
// Add chain of reachabilityReindexWindow blocks above chainRootBlock.
|
||||
// This should move the reindex root
|
||||
chainRootBlockTipHash := chainRootBlock.BlockHash()
|
||||
for i := uint64(0); i < reachabilityReindexWindow; i++ {
|
||||
chainBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chainRootBlockTipHash}, nil)
|
||||
chainRootBlockTipHash = chainBlock.BlockHash()
|
||||
}
|
||||
|
||||
// Add another block over genesis
|
||||
PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
}
|
||||
|
||||
func TestUpdateReindexRoot(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestUpdateReindexRoot", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Set the reindex window to a low number to make this test run fast
|
||||
originalReachabilityReindexWindow := reachabilityReindexWindow
|
||||
reachabilityReindexWindow = 10
|
||||
defer func() {
|
||||
reachabilityReindexWindow = originalReachabilityReindexWindow
|
||||
}()
|
||||
|
||||
// Add two blocks on top of the genesis block
|
||||
chain1RootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
chain2RootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
|
||||
// Add chain of reachabilityReindexWindow - 1 blocks above chain1RootBlock and
|
||||
// chain2RootBlock, respectively. This should not move the reindex root
|
||||
chain1RootBlockTipHash := chain1RootBlock.BlockHash()
|
||||
chain2RootBlockTipHash := chain2RootBlock.BlockHash()
|
||||
genesisTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(dag.genesis.hash)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
for i := uint64(0); i < reachabilityReindexWindow-1; i++ {
|
||||
chain1Block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain1RootBlockTipHash}, nil)
|
||||
chain1RootBlockTipHash = chain1Block.BlockHash()
|
||||
|
||||
chain2Block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain2RootBlockTipHash}, nil)
|
||||
chain2RootBlockTipHash = chain2Block.BlockHash()
|
||||
|
||||
if dag.reachabilityTree.reindexRoot != genesisTreeNode {
|
||||
t.Fatalf("reindex root unexpectedly moved")
|
||||
}
|
||||
}
|
||||
|
||||
// Add another block over chain1. This will move the reindex root to chain1RootBlock
|
||||
PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain1RootBlockTipHash}, nil)
|
||||
|
||||
// Make sure that chain1RootBlock is now the reindex root
|
||||
chain1RootTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(chain1RootBlock.BlockHash())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
if dag.reachabilityTree.reindexRoot != chain1RootTreeNode {
|
||||
t.Fatalf("chain1RootBlock is not the reindex root after reindex")
|
||||
}
|
||||
|
||||
// Make sure that tight intervals have been applied to chain2. Since
|
||||
// we added reachabilityReindexWindow-1 blocks to chain2, the size
|
||||
// of the interval at its root should be equal to reachabilityReindexWindow
|
||||
chain2RootTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(chain2RootBlock.BlockHash())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
if chain2RootTreeNode.interval.size() != reachabilityReindexWindow {
|
||||
t.Fatalf("got unexpected chain2RootNode interval. Want: %d, got: %d",
|
||||
chain2RootTreeNode.interval.size(), reachabilityReindexWindow)
|
||||
}
|
||||
|
||||
// Make sure that the rest of the interval has been allocated to
|
||||
// chain1RootNode, minus slack from both sides
|
||||
expectedChain1RootIntervalSize := genesisTreeNode.interval.size() - 1 -
|
||||
chain2RootTreeNode.interval.size() - 2*reachabilityReindexSlack
|
||||
if chain1RootTreeNode.interval.size() != expectedChain1RootIntervalSize {
|
||||
t.Fatalf("got unexpected chain1RootNode interval. Want: %d, got: %d",
|
||||
chain1RootTreeNode.interval.size(), expectedChain1RootIntervalSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReindexIntervalsEarlierThanReindexRoot(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestReindexIntervalsEarlierThanReindexRoot", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Set the reindex window and slack to low numbers to make this test
|
||||
// run fast
|
||||
originalReachabilityReindexWindow := reachabilityReindexWindow
|
||||
originalReachabilityReindexSlack := reachabilityReindexSlack
|
||||
reachabilityReindexWindow = 10
|
||||
reachabilityReindexSlack = 5
|
||||
defer func() {
|
||||
reachabilityReindexWindow = originalReachabilityReindexWindow
|
||||
reachabilityReindexSlack = originalReachabilityReindexSlack
|
||||
}()
|
||||
|
||||
// Add three children to the genesis: leftBlock, centerBlock, rightBlock
|
||||
leftBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
centerBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
rightBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
|
||||
// Add a chain of reachabilityReindexWindow blocks above centerBlock.
|
||||
// This will move the reindex root to centerBlock
|
||||
centerTipHash := centerBlock.BlockHash()
|
||||
for i := uint64(0); i < reachabilityReindexWindow; i++ {
|
||||
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{centerTipHash}, nil)
|
||||
centerTipHash = block.BlockHash()
|
||||
}
|
||||
|
||||
// Make sure that centerBlock is now the reindex root
|
||||
centerTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(centerBlock.BlockHash())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
if dag.reachabilityTree.reindexRoot != centerTreeNode {
|
||||
t.Fatalf("centerBlock is not the reindex root after reindex")
|
||||
}
|
||||
|
||||
// Get the current interval for leftBlock. The reindex should have
|
||||
// resulted in a tight interval there
|
||||
leftTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(leftBlock.BlockHash())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
if leftTreeNode.interval.size() != 1 {
|
||||
t.Fatalf("leftBlock interval not tight after reindex")
|
||||
}
|
||||
|
||||
// Get the current interval for rightBlock. The reindex should have
|
||||
// resulted in a tight interval there
|
||||
rightTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(rightBlock.BlockHash())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
if rightTreeNode.interval.size() != 1 {
|
||||
t.Fatalf("rightBlock interval not tight after reindex")
|
||||
}
|
||||
|
||||
// Get the current interval for centerBlock. Its interval should be:
|
||||
// genesisInterval - 1 - leftInterval - leftSlack - rightInterval - rightSlack
|
||||
genesisTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(dag.genesis.hash)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get tree node: %s", err)
|
||||
}
|
||||
expectedCenterInterval := genesisTreeNode.interval.size() - 1 -
|
||||
leftTreeNode.interval.size() - reachabilityReindexSlack -
|
||||
rightTreeNode.interval.size() - reachabilityReindexSlack
|
||||
if centerTreeNode.interval.size() != expectedCenterInterval {
|
||||
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
|
||||
expectedCenterInterval, centerTreeNode.interval.size())
|
||||
}
|
||||
|
||||
// Add a chain of reachabilityReindexWindow - 1 blocks above leftBlock.
|
||||
// Each addition will trigger a low-than-reindex-root reindex. We
|
||||
// expect the centerInterval to shrink by 1 each time, but its child
|
||||
// to remain unaffected
|
||||
treeChildOfCenterBlock := centerTreeNode.children[0]
|
||||
treeChildOfCenterBlockOriginalIntervalSize := treeChildOfCenterBlock.interval.size()
|
||||
leftTipHash := leftBlock.BlockHash()
|
||||
for i := uint64(0); i < reachabilityReindexWindow-1; i++ {
|
||||
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{leftTipHash}, nil)
|
||||
leftTipHash = block.BlockHash()
|
||||
|
||||
expectedCenterInterval--
|
||||
if centerTreeNode.interval.size() != expectedCenterInterval {
|
||||
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
|
||||
expectedCenterInterval, centerTreeNode.interval.size())
|
||||
}
|
||||
|
||||
if treeChildOfCenterBlock.interval.size() != treeChildOfCenterBlockOriginalIntervalSize {
|
||||
t.Fatalf("the interval of centerBlock's child unexpectedly changed")
|
||||
}
|
||||
}
|
||||
|
||||
// Add a chain of reachabilityReindexWindow - 1 blocks above rightBlock.
|
||||
// Each addition will trigger a low-than-reindex-root reindex. We
|
||||
// expect the centerInterval to shrink by 1 each time, but its child
|
||||
// to remain unaffected
|
||||
rightTipHash := rightBlock.BlockHash()
|
||||
for i := uint64(0); i < reachabilityReindexWindow-1; i++ {
|
||||
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{rightTipHash}, nil)
|
||||
rightTipHash = block.BlockHash()
|
||||
|
||||
expectedCenterInterval--
|
||||
if centerTreeNode.interval.size() != expectedCenterInterval {
|
||||
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
|
||||
expectedCenterInterval, centerTreeNode.interval.size())
|
||||
}
|
||||
|
||||
if treeChildOfCenterBlock.interval.size() != treeChildOfCenterBlockOriginalIntervalSize {
|
||||
t.Fatalf("the interval of centerBlock's child unexpectedly changed")
|
||||
if !reflect.DeepEqual(blocksClone, test.expectedResult) {
|
||||
t.Errorf("TestInsertBlock: unexpected result in test #%d. Want: %s, got: %s",
|
||||
i, test.expectedResult, blocksClone)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,16 +3,15 @@ package blockdag
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type reachabilityData struct {
|
||||
treeNode *reachabilityTreeNode
|
||||
futureCoveringSet futureCoveringTreeNodeSet
|
||||
futureCoveringSet futureCoveringBlockSet
|
||||
}
|
||||
|
||||
type reachabilityStore struct {
|
||||
@@ -29,7 +28,7 @@ func newReachabilityStore(dag *BlockDAG) *reachabilityStore {
|
||||
}
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) {
|
||||
func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) error {
|
||||
// load the reachability data from DB to store.loaded
|
||||
node := treeNode.blockNode
|
||||
_, exists := store.reachabilityDataByHash(node.hash)
|
||||
@@ -39,13 +38,14 @@ func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) {
|
||||
|
||||
store.loaded[*node.hash].treeNode = treeNode
|
||||
store.setBlockAsDirty(node.hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) setFutureCoveringSet(node *blockNode, futureCoveringSet futureCoveringTreeNodeSet) error {
|
||||
func (store *reachabilityStore) setFutureCoveringSet(node *blockNode, futureCoveringSet futureCoveringBlockSet) error {
|
||||
// load the reachability data from DB to store.loaded
|
||||
_, exists := store.reachabilityDataByHash(node.hash)
|
||||
if !exists {
|
||||
return reachabilityNotFoundError(node.hash)
|
||||
return reachabilityNotFoundError(node)
|
||||
}
|
||||
|
||||
store.loaded[*node.hash].futureCoveringSet = futureCoveringSet
|
||||
@@ -57,26 +57,22 @@ func (store *reachabilityStore) setBlockAsDirty(blockHash *daghash.Hash) {
|
||||
store.dirty[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func reachabilityNotFoundError(hash *daghash.Hash) error {
|
||||
return errors.Errorf("couldn't find reachability data for block %s", hash)
|
||||
func reachabilityNotFoundError(node *blockNode) error {
|
||||
return errors.Errorf("Couldn't find reachability data for block %s", node.hash)
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) treeNodeByBlockHash(hash *daghash.Hash) (*reachabilityTreeNode, error) {
|
||||
reachabilityData, exists := store.reachabilityDataByHash(hash)
|
||||
func (store *reachabilityStore) treeNodeByBlockNode(node *blockNode) (*reachabilityTreeNode, error) {
|
||||
reachabilityData, exists := store.reachabilityDataByHash(node.hash)
|
||||
if !exists {
|
||||
return nil, reachabilityNotFoundError(hash)
|
||||
return nil, reachabilityNotFoundError(node)
|
||||
}
|
||||
return reachabilityData.treeNode, nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) treeNodeByBlockNode(node *blockNode) (*reachabilityTreeNode, error) {
|
||||
return store.treeNodeByBlockHash(node.hash)
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) futureCoveringSetByBlockNode(node *blockNode) (futureCoveringTreeNodeSet, error) {
|
||||
func (store *reachabilityStore) futureCoveringSetByBlockNode(node *blockNode) (futureCoveringBlockSet, error) {
|
||||
reachabilityData, exists := store.reachabilityDataByHash(node.hash)
|
||||
if !exists {
|
||||
return nil, reachabilityNotFoundError(node.hash)
|
||||
return nil, reachabilityNotFoundError(node)
|
||||
}
|
||||
return reachabilityData.futureCoveringSet, nil
|
||||
}
|
||||
@@ -87,15 +83,14 @@ func (store *reachabilityStore) reachabilityDataByHash(hash *daghash.Hash) (*rea
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty reachability data to the database.
|
||||
func (store *reachabilityStore) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
|
||||
if len(store.dirty) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for hash := range store.dirty {
|
||||
hash := hash // Copy hash to a new variable to avoid passing the same pointer
|
||||
reachabilityData := store.loaded[hash]
|
||||
err := store.storeReachabilityData(dbContext, &hash, reachabilityData)
|
||||
err := store.dbStoreReachabilityData(dbTx, &hash, reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -107,25 +102,22 @@ func (store *reachabilityStore) clearDirtyEntries() {
|
||||
store.dirty = make(map[daghash.Hash]struct{})
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) init(dbContext dbaccess.Context) error {
|
||||
func (store *reachabilityStore) init(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
|
||||
|
||||
// TODO: (Stas) This is a quick and dirty hack.
|
||||
// We iterate over the entire bucket twice:
|
||||
// * First, populate the loaded set with all entries
|
||||
// * Second, connect the parent/children pointers in each entry
|
||||
// with other nodes, which are now guaranteed to exist
|
||||
cursor, err := dbaccess.ReachabilityDataCursor(dbContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
cursor := bucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
err := store.initReachabilityData(cursor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cursor = bucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
err := store.loadReachabilityDataFromCursor(cursor)
|
||||
if err != nil {
|
||||
@@ -136,12 +128,7 @@ func (store *reachabilityStore) init(dbContext dbaccess.Context) error {
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) error {
|
||||
key, err := cursor.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := daghash.NewHash(key.Suffix())
|
||||
hash, err := daghash.NewHash(cursor.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -154,12 +141,7 @@ func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) err
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.Cursor) error {
|
||||
key, err := cursor.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := daghash.NewHash(key.Suffix())
|
||||
hash, err := daghash.NewHash(cursor.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -169,34 +151,26 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
|
||||
return errors.Errorf("cannot find reachability data for block hash: %s", hash)
|
||||
}
|
||||
|
||||
serializedReachabilityData, err := cursor.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = store.deserializeReachabilityData(serializedReachabilityData, reachabilityData)
|
||||
err = store.deserializeReachabilityData(cursor.Value(), reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Connect the treeNode with its blockNode
|
||||
reachabilityData.treeNode.blockNode, ok = store.dag.index.LookupNode(hash)
|
||||
if !ok {
|
||||
return errors.Errorf("block %s does not exist in the DAG", hash)
|
||||
}
|
||||
reachabilityData.treeNode.blockNode = store.dag.index.LookupNode(hash)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// storeReachabilityData stores the reachability data to the database.
|
||||
// dbStoreReachabilityData stores the reachability data to the database.
|
||||
// This overwrites the current entry if there exists one.
|
||||
func (store *reachabilityStore) storeReachabilityData(dbContext dbaccess.Context, hash *daghash.Hash, reachabilityData *reachabilityData) error {
|
||||
func (store *reachabilityStore) dbStoreReachabilityData(dbTx database.Tx, hash *daghash.Hash, reachabilityData *reachabilityData) error {
|
||||
serializedReachabilyData, err := store.serializeReachabilityData(reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbaccess.StoreReachabilityData(dbContext, hash, serializedReachabilyData)
|
||||
return dbTx.Metadata().Bucket(reachabilityDataBucketName).Put(hash[:], serializedReachabilyData)
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) serializeReachabilityData(reachabilityData *reachabilityData) ([]byte, error) {
|
||||
@@ -219,26 +193,32 @@ func (store *reachabilityStore) serializeTreeNode(w io.Writer, treeNode *reachab
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the remaining interval
|
||||
err = store.serializeReachabilityInterval(w, treeNode.remainingInterval)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the parent
|
||||
// If this is the genesis block, write the zero hash instead
|
||||
parentHash := &daghash.ZeroHash
|
||||
if treeNode.parent != nil {
|
||||
parentHash = treeNode.parent.blockNode.hash
|
||||
}
|
||||
err = domainmessage.WriteElement(w, parentHash)
|
||||
err = wire.WriteElement(w, parentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the amount of children
|
||||
err = domainmessage.WriteVarInt(w, uint64(len(treeNode.children)))
|
||||
err = wire.WriteVarInt(w, uint64(len(treeNode.children)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the children
|
||||
for _, child := range treeNode.children {
|
||||
err = domainmessage.WriteElement(w, child.blockNode.hash)
|
||||
err = wire.WriteElement(w, child.blockNode.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -249,13 +229,13 @@ func (store *reachabilityStore) serializeTreeNode(w io.Writer, treeNode *reachab
|
||||
|
||||
func (store *reachabilityStore) serializeReachabilityInterval(w io.Writer, interval *reachabilityInterval) error {
|
||||
// Serialize start
|
||||
err := domainmessage.WriteElement(w, interval.start)
|
||||
err := wire.WriteElement(w, interval.start)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize end
|
||||
err = domainmessage.WriteElement(w, interval.end)
|
||||
err = wire.WriteElement(w, interval.end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -263,16 +243,16 @@ func (store *reachabilityStore) serializeReachabilityInterval(w io.Writer, inter
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) serializeFutureCoveringSet(w io.Writer, futureCoveringSet futureCoveringTreeNodeSet) error {
|
||||
func (store *reachabilityStore) serializeFutureCoveringSet(w io.Writer, futureCoveringSet futureCoveringBlockSet) error {
|
||||
// Serialize the set size
|
||||
err := domainmessage.WriteVarInt(w, uint64(len(futureCoveringSet)))
|
||||
err := wire.WriteVarInt(w, uint64(len(futureCoveringSet)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize each node in the set
|
||||
for _, node := range futureCoveringSet {
|
||||
err = domainmessage.WriteElement(w, node.blockNode.hash)
|
||||
// Serialize each block in the set
|
||||
for _, block := range futureCoveringSet {
|
||||
err = wire.WriteElement(w, block.blockNode.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -309,10 +289,17 @@ func (store *reachabilityStore) deserializeTreeNode(r io.Reader, destination *re
|
||||
}
|
||||
destination.treeNode.interval = interval
|
||||
|
||||
// Deserialize the remaining interval
|
||||
remainingInterval, err := store.deserializeReachabilityInterval(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destination.treeNode.remainingInterval = remainingInterval
|
||||
|
||||
// Deserialize the parent
|
||||
// If this is the zero hash, this node is the genesis and as such doesn't have a parent
|
||||
parentHash := &daghash.Hash{}
|
||||
err = domainmessage.ReadElement(r, parentHash)
|
||||
err = wire.ReadElement(r, parentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -325,7 +312,7 @@ func (store *reachabilityStore) deserializeTreeNode(r io.Reader, destination *re
|
||||
}
|
||||
|
||||
// Deserialize the amount of children
|
||||
childCount, err := domainmessage.ReadVarInt(r)
|
||||
childCount, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -334,7 +321,7 @@ func (store *reachabilityStore) deserializeTreeNode(r io.Reader, destination *re
|
||||
children := make([]*reachabilityTreeNode, childCount)
|
||||
for i := uint64(0); i < childCount; i++ {
|
||||
childHash := &daghash.Hash{}
|
||||
err = domainmessage.ReadElement(r, childHash)
|
||||
err = wire.ReadElement(r, childHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -354,7 +341,7 @@ func (store *reachabilityStore) deserializeReachabilityInterval(r io.Reader) (*r
|
||||
|
||||
// Deserialize start
|
||||
start := uint64(0)
|
||||
err := domainmessage.ReadElement(r, &start)
|
||||
err := wire.ReadElement(r, &start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -362,7 +349,7 @@ func (store *reachabilityStore) deserializeReachabilityInterval(r io.Reader) (*r
|
||||
|
||||
// Deserialize end
|
||||
end := uint64(0)
|
||||
err = domainmessage.ReadElement(r, &end)
|
||||
err = wire.ReadElement(r, &end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -373,24 +360,31 @@ func (store *reachabilityStore) deserializeReachabilityInterval(r io.Reader) (*r
|
||||
|
||||
func (store *reachabilityStore) deserializeFutureCoveringSet(r io.Reader, destination *reachabilityData) error {
|
||||
// Deserialize the set size
|
||||
setSize, err := domainmessage.ReadVarInt(r)
|
||||
setSize, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Deserialize each block in the set
|
||||
futureCoveringSet := make(futureCoveringTreeNodeSet, setSize)
|
||||
futureCoveringSet := make(futureCoveringBlockSet, setSize)
|
||||
for i := uint64(0); i < setSize; i++ {
|
||||
blockHash := &daghash.Hash{}
|
||||
err = domainmessage.ReadElement(r, blockHash)
|
||||
err = wire.ReadElement(r, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockNode := store.dag.index.LookupNode(blockHash)
|
||||
if blockNode == nil {
|
||||
return errors.Errorf("blockNode not found for hash %s", blockHash)
|
||||
}
|
||||
blockReachabilityData, ok := store.reachabilityDataByHash(blockHash)
|
||||
if !ok {
|
||||
return errors.Errorf("block reachability data not found for hash: %s", blockHash)
|
||||
}
|
||||
futureCoveringSet[i] = blockReachabilityData.treeNode
|
||||
futureCoveringSet[i] = &futureCoveringBlock{
|
||||
blockNode: blockNode,
|
||||
treeNode: blockReachabilityData.treeNode,
|
||||
}
|
||||
}
|
||||
destination.futureCoveringSet = futureCoveringSet
|
||||
|
||||
|
||||
@@ -9,15 +9,15 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// txValidateItem holds a transaction along with which input to validate.
|
||||
type txValidateItem struct {
|
||||
txInIndex int
|
||||
txIn *domainmessage.TxIn
|
||||
txIn *wire.TxIn
|
||||
tx *util.Tx
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ func (v *txValidator) Validate(items []*txValidateItem) error {
|
||||
// Start up validation handlers that are used to asynchronously
|
||||
// validate each transaction input.
|
||||
for i := 0; i < maxGoRoutines; i++ {
|
||||
spawn("txValidator.validateHandler", v.validateHandler)
|
||||
spawn(v.validateHandler)
|
||||
}
|
||||
|
||||
// Validate each of the inputs. The quit channel is closed when any
|
||||
@@ -179,6 +179,11 @@ func newTxValidator(utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscr
|
||||
// ValidateTransactionScripts validates the scripts for the passed transaction
|
||||
// using multiple goroutines.
|
||||
func ValidateTransactionScripts(tx *util.Tx, utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscript.SigCache) error {
|
||||
// Don't validate coinbase transaction scripts.
|
||||
if tx.IsCoinBase() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect all of the transaction inputs and required information for
|
||||
// validation.
|
||||
txIns := tx.MsgTx().TxIn
|
||||
@@ -208,6 +213,10 @@ func checkBlockScripts(block *blockNode, utxoSet UTXOSet, transactions []*util.T
|
||||
}
|
||||
txValItems := make([]*txValidateItem, 0, numInputs)
|
||||
for _, tx := range transactions {
|
||||
// Skip coinbase transactions.
|
||||
if tx.IsCoinBase() {
|
||||
continue
|
||||
}
|
||||
for txInIdx, txIn := range tx.MsgTx().TxIn {
|
||||
txVI := &txValidateItem{
|
||||
txInIndex: txInIdx,
|
||||
|
||||
@@ -4,22 +4,32 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// SubnetworkStore stores the subnetworks data
|
||||
type SubnetworkStore struct {
|
||||
db database.DB
|
||||
}
|
||||
|
||||
func newSubnetworkStore(db database.DB) *SubnetworkStore {
|
||||
return &SubnetworkStore{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// registerSubnetworks scans a list of transactions, singles out
|
||||
// subnetwork registry transactions, validates them, and registers a new
|
||||
// subnetwork based on it.
|
||||
// This function returns an error if one or more transactions are invalid
|
||||
func registerSubnetworks(dbContext dbaccess.Context, txs []*util.Tx) error {
|
||||
subnetworkRegistryTxs := make([]*domainmessage.MsgTx, 0)
|
||||
func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
|
||||
subnetworkRegistryTxs := make([]*wire.MsgTx, 0)
|
||||
for _, tx := range txs {
|
||||
msgTx := tx.MsgTx()
|
||||
|
||||
@@ -40,13 +50,13 @@ func registerSubnetworks(dbContext dbaccess.Context, txs []*util.Tx) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exists, err := dbaccess.HasSubnetwork(dbContext, subnetworkID)
|
||||
sNet, err := dbGetSubnetwork(dbTx, subnetworkID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
if sNet == nil {
|
||||
createdSubnetwork := newSubnetwork(registryTx)
|
||||
err := registerSubnetwork(dbContext, subnetworkID, createdSubnetwork)
|
||||
err := dbRegisterSubnetwork(dbTx, subnetworkID, createdSubnetwork)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed registering subnetwork"+
|
||||
"for tx '%s': %s", registryTx.TxHash(), err)
|
||||
@@ -60,7 +70,7 @@ func registerSubnetworks(dbContext dbaccess.Context, txs []*util.Tx) error {
|
||||
// validateSubnetworkRegistryTransaction makes sure that a given subnetwork registry
|
||||
// transaction is valid. Such a transaction is valid iff:
|
||||
// - Its entire payload is a uint64 (8 bytes)
|
||||
func validateSubnetworkRegistryTransaction(tx *domainmessage.MsgTx) error {
|
||||
func validateSubnetworkRegistryTransaction(tx *wire.MsgTx) error {
|
||||
if len(tx.Payload) != 8 {
|
||||
return ruleError(ErrSubnetworkRegistry, fmt.Sprintf("validation failed: subnetwork registry"+
|
||||
"tx '%s' has an invalid payload", tx.TxHash()))
|
||||
@@ -70,58 +80,85 @@ func validateSubnetworkRegistryTransaction(tx *domainmessage.MsgTx) error {
|
||||
}
|
||||
|
||||
// TxToSubnetworkID creates a subnetwork ID from a subnetwork registry transaction
|
||||
func TxToSubnetworkID(tx *domainmessage.MsgTx) (*subnetworkid.SubnetworkID, error) {
|
||||
func TxToSubnetworkID(tx *wire.MsgTx) (*subnetworkid.SubnetworkID, error) {
|
||||
txHash := tx.TxHash()
|
||||
return subnetworkid.New(util.Hash160(txHash[:]))
|
||||
}
|
||||
|
||||
// fetchSubnetwork returns a registered subnetwork.
|
||||
func (dag *BlockDAG) fetchSubnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
serializedSubnetwork, err := dbaccess.FetchSubnetworkData(dag.databaseContext, subnetworkID)
|
||||
// subnetwork returns a registered subnetwork. If the subnetwork does not exist
|
||||
// this method returns an error.
|
||||
func (s *SubnetworkStore) subnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
var sNet *subnetwork
|
||||
var err error
|
||||
dbErr := s.db.View(func(dbTx database.Tx) error {
|
||||
sNet, err = dbGetSubnetwork(dbTx, subnetworkID)
|
||||
return nil
|
||||
})
|
||||
if dbErr != nil {
|
||||
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, dbErr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
subnet, err := deserializeSubnetwork(serializedSubnetwork)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return subnet, nil
|
||||
return sNet, nil
|
||||
}
|
||||
|
||||
// GasLimit returns the gas limit of a registered subnetwork. If the subnetwork does not
|
||||
// exist this method returns an error.
|
||||
func (dag *BlockDAG) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
|
||||
sNet, err := dag.fetchSubnetwork(subnetworkID)
|
||||
func (s *SubnetworkStore) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
|
||||
sNet, err := s.subnetwork(subnetworkID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if sNet == nil {
|
||||
return 0, errors.Errorf("subnetwork '%s' not found", subnetworkID)
|
||||
}
|
||||
|
||||
return sNet.gasLimit, nil
|
||||
}
|
||||
|
||||
func registerSubnetwork(dbContext dbaccess.Context, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
|
||||
// dbRegisterSubnetwork stores mappings from ID of the subnetwork to the subnetwork data.
|
||||
func dbRegisterSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
|
||||
// Serialize the subnetwork
|
||||
serializedSubnetwork, err := serializeSubnetwork(network)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to serialize sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
return dbaccess.StoreSubnetwork(dbContext, subnetworkID, serializedSubnetwork)
|
||||
// Store the subnetwork
|
||||
subnetworksBucket := dbTx.Metadata().Bucket(subnetworksBucketName)
|
||||
err = subnetworksBucket.Put(subnetworkID[:], serializedSubnetwork)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to write sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbGetSubnetwork returns the subnetwork associated with subnetworkID or nil if the subnetwork was not found.
|
||||
func dbGetSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
bucket := dbTx.Metadata().Bucket(subnetworksBucketName)
|
||||
serializedSubnetwork := bucket.Get(subnetworkID[:])
|
||||
if serializedSubnetwork == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return deserializeSubnetwork(serializedSubnetwork)
|
||||
}
|
||||
|
||||
type subnetwork struct {
|
||||
gasLimit uint64
|
||||
}
|
||||
|
||||
func newSubnetwork(tx *domainmessage.MsgTx) *subnetwork {
|
||||
func newSubnetwork(tx *wire.MsgTx) *subnetwork {
|
||||
return &subnetwork{
|
||||
gasLimit: ExtractGasLimit(tx),
|
||||
}
|
||||
}
|
||||
|
||||
// ExtractGasLimit extracts the gas limit from the transaction payload
|
||||
func ExtractGasLimit(tx *domainmessage.MsgTx) uint64 {
|
||||
func ExtractGasLimit(tx *wire.MsgTx) uint64 {
|
||||
return binary.LittleEndian.Uint64(tx.Payload[:8])
|
||||
}
|
||||
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"time"
|
||||
)
|
||||
|
||||
const syncRateWindowDuration = 15 * time.Minute
|
||||
|
||||
// addBlockProcessingTimestamp adds the last block processing timestamp in order to measure the recent sync rate.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) addBlockProcessingTimestamp() {
|
||||
now := mstime.Now()
|
||||
dag.recentBlockProcessingTimestamps = append(dag.recentBlockProcessingTimestamps, now)
|
||||
dag.removeNonRecentTimestampsFromRecentBlockProcessingTimestamps()
|
||||
}
|
||||
|
||||
// removeNonRecentTimestampsFromRecentBlockProcessingTimestamps removes timestamps older than syncRateWindowDuration
|
||||
// from dag.recentBlockProcessingTimestamps
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) removeNonRecentTimestampsFromRecentBlockProcessingTimestamps() {
|
||||
dag.recentBlockProcessingTimestamps = dag.recentBlockProcessingTimestampsRelevantWindow()
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) recentBlockProcessingTimestampsRelevantWindow() []mstime.Time {
|
||||
minTime := mstime.Now().Add(-syncRateWindowDuration)
|
||||
windowStartIndex := len(dag.recentBlockProcessingTimestamps)
|
||||
for i, processTime := range dag.recentBlockProcessingTimestamps {
|
||||
if processTime.After(minTime) {
|
||||
windowStartIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
return dag.recentBlockProcessingTimestamps[windowStartIndex:]
|
||||
}
|
||||
|
||||
// syncRate returns the rate of processed
|
||||
// blocks in the last syncRateWindowDuration
|
||||
// duration.
|
||||
func (dag *BlockDAG) syncRate() float64 {
|
||||
dag.RLock()
|
||||
defer dag.RUnlock()
|
||||
return float64(len(dag.recentBlockProcessingTimestampsRelevantWindow())) / syncRateWindowDuration.Seconds()
|
||||
}
|
||||
|
||||
// IsSyncRateBelowThreshold checks whether the sync rate
|
||||
// is below the expected threshold.
|
||||
func (dag *BlockDAG) IsSyncRateBelowThreshold(maxDeviation float64) bool {
|
||||
if dag.uptime() < syncRateWindowDuration {
|
||||
return false
|
||||
}
|
||||
|
||||
return dag.syncRate() < 1/dag.Params.TargetTimePerBlock.Seconds()*maxDeviation
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) uptime() time.Duration {
|
||||
return mstime.Now().Sub(dag.startTime)
|
||||
}
|
||||
@@ -5,28 +5,47 @@ package blockdag
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/database/ffldb/ldb"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb" // blank import ffldb so that its init() function runs before tests
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// testDbType is the database backend type to use for the tests.
|
||||
testDbType = "ffldb"
|
||||
|
||||
// testDbRoot is the root directory used to create all test databases.
|
||||
testDbRoot = "testdbs"
|
||||
|
||||
// blockDataNet is the expected network in the test block data.
|
||||
blockDataNet = wire.Mainnet
|
||||
)
|
||||
|
||||
// isSupportedDbType returns whether or not the passed database type is
|
||||
// currently supported.
|
||||
func isSupportedDbType(dbType string) bool {
|
||||
supportedDrivers := database.SupportedDrivers()
|
||||
for _, driver := range supportedDrivers {
|
||||
if dbType == driver {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// FileExists returns whether or not the named file or directory exists.
|
||||
func FileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
@@ -40,66 +59,61 @@ func FileExists(name string) bool {
|
||||
// DAGSetup is used to create a new db and DAG instance with the genesis
|
||||
// block already inserted. In addition to the new DAG instance, it returns
|
||||
// a teardown function the caller should invoke when done testing to clean up.
|
||||
// The openDB parameter instructs DAGSetup whether or not to also open the
|
||||
// database. Setting it to false is useful in tests that handle database
|
||||
// opening/closing by themselves.
|
||||
func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), error) {
|
||||
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
||||
if !isSupportedDbType(testDbType) {
|
||||
return nil, nil, errors.Errorf("unsupported db type %s", testDbType)
|
||||
}
|
||||
|
||||
var teardown func()
|
||||
|
||||
// To make sure that the teardown function is not called before any goroutines finished to run -
|
||||
// overwrite `spawn` to count the number of running goroutines
|
||||
spawnWaitGroup := sync.WaitGroup{}
|
||||
realSpawn := spawn
|
||||
spawn = func(name string, f func()) {
|
||||
spawn = func(f func()) {
|
||||
spawnWaitGroup.Add(1)
|
||||
realSpawn(name, func() {
|
||||
realSpawn(func() {
|
||||
f()
|
||||
spawnWaitGroup.Done()
|
||||
})
|
||||
}
|
||||
|
||||
if openDb {
|
||||
var err error
|
||||
tmpDir, err := ioutil.TempDir("", "DAGSetup")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("error creating temp dir: %s", err)
|
||||
if config.DB == nil {
|
||||
// Create the root directory for test databases.
|
||||
if !FileExists(testDbRoot) {
|
||||
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
|
||||
err := errors.Errorf("unable to create test db "+
|
||||
"root: %s", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// We set ldb.Options here to return nil because normally
|
||||
// the database is initialized with very large caches that
|
||||
// can make opening/closing the database for every test
|
||||
// quite heavy.
|
||||
originalLDBOptions := ldb.Options
|
||||
ldb.Options = func() *opt.Options {
|
||||
return nil
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(tmpDir, dbName)
|
||||
dbPath := filepath.Join(testDbRoot, dbName)
|
||||
_ = os.RemoveAll(dbPath)
|
||||
databaseContext, err := dbaccess.New(dbPath)
|
||||
var err error
|
||||
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
config.DatabaseContext = databaseContext
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
spawnWaitGroup.Wait()
|
||||
spawn = realSpawn
|
||||
databaseContext.Close()
|
||||
ldb.Options = originalLDBOptions
|
||||
config.DB.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
os.RemoveAll(testDbRoot)
|
||||
}
|
||||
} else {
|
||||
teardown = func() {
|
||||
spawnWaitGroup.Wait()
|
||||
spawn = realSpawn
|
||||
config.DB.Close()
|
||||
}
|
||||
}
|
||||
|
||||
config.TimeSource = NewTimeSource()
|
||||
config.TimeSource = NewMedianTime()
|
||||
config.SigCache = txscript.NewSigCache(1000)
|
||||
|
||||
// Create the DAG instance.
|
||||
@@ -121,30 +135,30 @@ type txSubnetworkData struct {
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, subnetworkData *txSubnetworkData) *domainmessage.MsgTx {
|
||||
txIns := []*domainmessage.TxIn{}
|
||||
txOuts := []*domainmessage.TxOut{}
|
||||
func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, subnetworkData *txSubnetworkData) *wire.MsgTx {
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
for i := uint32(0); i < numInputs; i++ {
|
||||
txIns = append(txIns, &domainmessage.TxIn{
|
||||
PreviousOutpoint: *domainmessage.NewOutpoint(&daghash.TxID{}, i),
|
||||
txIns = append(txIns, &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(&daghash.TxID{}, i),
|
||||
SignatureScript: []byte{},
|
||||
Sequence: domainmessage.MaxTxInSequenceNum,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
})
|
||||
}
|
||||
|
||||
for i := uint32(0); i < numOutputs; i++ {
|
||||
txOuts = append(txOuts, &domainmessage.TxOut{
|
||||
txOuts = append(txOuts, &wire.TxOut{
|
||||
ScriptPubKey: OpTrueScript,
|
||||
Value: outputValue,
|
||||
})
|
||||
}
|
||||
|
||||
if subnetworkData != nil {
|
||||
return domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, txIns, txOuts, subnetworkData.subnetworkID, subnetworkData.Gas, subnetworkData.Payload)
|
||||
return wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkData.subnetworkID, subnetworkData.Gas, subnetworkData.Payload)
|
||||
}
|
||||
|
||||
return domainmessage.NewNativeMsgTx(domainmessage.TxVersion, txIns, txOuts)
|
||||
return wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
|
||||
}
|
||||
|
||||
// VirtualForTest is an exported version for virtualBlock, so that it can be returned by exported test_util methods
|
||||
@@ -159,17 +173,17 @@ func SetVirtualForTest(dag *BlockDAG, virtual VirtualForTest) VirtualForTest {
|
||||
|
||||
// GetVirtualFromParentsForTest generates a virtual block with the given parents.
|
||||
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (VirtualForTest, error) {
|
||||
parents := newBlockSet()
|
||||
parents := newSet()
|
||||
for _, hash := range parentHashes {
|
||||
parent, ok := dag.index.LookupNode(hash)
|
||||
if !ok {
|
||||
parent := dag.index.LookupNode(hash)
|
||||
if parent == nil {
|
||||
return nil, errors.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash)
|
||||
}
|
||||
parents.add(parent)
|
||||
}
|
||||
virtual := newVirtualBlock(dag, parents)
|
||||
|
||||
pastUTXO, _, _, err := dag.pastUTXO(&virtual.blockNode)
|
||||
pastUTXO, _, err := dag.pastUTXO(&virtual.blockNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -186,7 +200,7 @@ func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (
|
||||
// LoadBlocks reads files containing kaspa gzipped block data from disk
|
||||
// and returns them as an array of util.Block.
|
||||
func LoadBlocks(filename string) (blocks []*util.Block, err error) {
|
||||
var network = domainmessage.Mainnet
|
||||
var network = wire.Mainnet
|
||||
var dr io.Reader
|
||||
var fi io.ReadCloser
|
||||
|
||||
@@ -244,7 +258,7 @@ func opTrueAddress(prefix util.Bech32Prefix) (util.Address, error) {
|
||||
}
|
||||
|
||||
// PrepareBlockForTest generates a block with the proper merkle roots, coinbase transaction etc. This function is used for test purposes only
|
||||
func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*domainmessage.MsgTx) (*domainmessage.MsgBlock, error) {
|
||||
func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*wire.MsgTx) (*wire.MsgBlock, error) {
|
||||
newVirtual, err := GetVirtualFromParentsForTest(dag, parentHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -252,7 +266,7 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
|
||||
oldVirtual := SetVirtualForTest(dag, newVirtual)
|
||||
defer SetVirtualForTest(dag, oldVirtual)
|
||||
|
||||
OpTrueAddr, err := opTrueAddress(dag.Params.Prefix)
|
||||
OpTrueAddr, err := opTrueAddress(dag.dagParams.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -274,17 +288,6 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
|
||||
blockTransactions[i+1] = util.NewTx(tx)
|
||||
}
|
||||
|
||||
// Sort transactions by subnetwork ID
|
||||
sort.Slice(blockTransactions, func(i, j int) bool {
|
||||
if blockTransactions[i].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
|
||||
return true
|
||||
}
|
||||
if blockTransactions[j].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
|
||||
return false
|
||||
}
|
||||
return subnetworkid.Less(&blockTransactions[i].MsgTx().SubnetworkID, &blockTransactions[j].MsgTx().SubnetworkID)
|
||||
})
|
||||
|
||||
block, err := dag.BlockForMining(blockTransactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -295,28 +298,6 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// PrepareAndProcessBlockForTest prepares a block that points to the given parent
|
||||
// hashes and process it.
|
||||
func PrepareAndProcessBlockForTest(t *testing.T, dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*domainmessage.MsgTx) *domainmessage.MsgBlock {
|
||||
daghash.Sort(parentHashes)
|
||||
block, err := PrepareBlockForTest(dag, parentHashes, transactions)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in ProcessBlock: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("block is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("block was unexpectedly orphan")
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
// generateDeterministicExtraNonceForTest returns a unique deterministic extra nonce for coinbase data, in order to create unique coinbase transactions.
|
||||
func generateDeterministicExtraNonceForTest() uint64 {
|
||||
extraNonceForTest++
|
||||
|
||||
14
blockdag/test_utils_test.go
Normal file
14
blockdag/test_utils_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsSupportedDbType(t *testing.T) {
|
||||
if !isSupportedDbType("ffldb") {
|
||||
t.Errorf("ffldb should be a supported DB driver")
|
||||
}
|
||||
if isSupportedDbType("madeUpDb") {
|
||||
t.Errorf("madeUpDb should not be a supported DB driver")
|
||||
}
|
||||
}
|
||||
BIN
blockdag/testdata/blk_0_to_4.dat
vendored
BIN
blockdag/testdata/blk_0_to_4.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3A.dat
vendored
BIN
blockdag/testdata/blk_3A.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3B.dat
vendored
BIN
blockdag/testdata/blk_3B.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3C.dat
vendored
BIN
blockdag/testdata/blk_3C.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3D.dat
vendored
BIN
blockdag/testdata/blk_3D.dat
vendored
Binary file not shown.
@@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ThresholdState define the various threshold states used when voting on
|
||||
@@ -157,7 +156,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
|
||||
// The state is simply defined if the start time hasn't been
|
||||
// been reached yet.
|
||||
if uint64(medianTime.UnixMilliseconds()) < checker.BeginTime() {
|
||||
if uint64(medianTime.Unix()) < checker.BeginTime() {
|
||||
cache.Update(prevNode.hash, ThresholdDefined)
|
||||
break
|
||||
}
|
||||
@@ -178,9 +177,9 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
var ok bool
|
||||
state, ok = cache.Lookup(prevNode.hash)
|
||||
if !ok {
|
||||
return ThresholdFailed, errors.Errorf(
|
||||
return ThresholdFailed, AssertError(fmt.Sprintf(
|
||||
"thresholdState: cache lookup failed for %s",
|
||||
prevNode.hash)
|
||||
prevNode.hash))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -194,7 +193,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
// The deployment of the rule change fails if it expires
|
||||
// before it is accepted and locked in.
|
||||
medianTime := prevNode.PastMedianTime(dag)
|
||||
medianTimeUnix := uint64(medianTime.UnixMilliseconds())
|
||||
medianTimeUnix := uint64(medianTime.Unix())
|
||||
if medianTimeUnix >= checker.EndTime() {
|
||||
state = ThresholdFailed
|
||||
break
|
||||
@@ -211,7 +210,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
// The deployment of the rule change fails if it expires
|
||||
// before it is accepted and locked in.
|
||||
medianTime := prevNode.PastMedianTime(dag)
|
||||
if uint64(medianTime.UnixMilliseconds()) >= checker.EndTime() {
|
||||
if uint64(medianTime.Unix()) >= checker.EndTime() {
|
||||
state = ThresholdFailed
|
||||
break
|
||||
}
|
||||
@@ -231,6 +230,9 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
if condition {
|
||||
count++
|
||||
}
|
||||
|
||||
// Get the previous block node.
|
||||
current = current.selectedParent
|
||||
}
|
||||
|
||||
// The state is locked in if the number of blocks in the
|
||||
@@ -297,11 +299,11 @@ func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
|
||||
if deploymentID > uint32(len(dag.Params.Deployments)) {
|
||||
return ThresholdFailed, errors.Errorf("deployment ID %d does not exist", deploymentID)
|
||||
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
|
||||
return ThresholdFailed, DeploymentError(deploymentID)
|
||||
}
|
||||
|
||||
deployment := &dag.Params.Deployments[deploymentID]
|
||||
deployment := &dag.dagParams.Deployments[deploymentID]
|
||||
checker := deploymentChecker{deployment: deployment, dag: dag}
|
||||
cache := &dag.deploymentCaches[deploymentID]
|
||||
|
||||
@@ -325,8 +327,8 @@ func (dag *BlockDAG) initThresholdCaches() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for id := 0; id < len(dag.Params.Deployments); id++ {
|
||||
deployment := &dag.Params.Deployments[id]
|
||||
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
||||
deployment := &dag.dagParams.Deployments[id]
|
||||
cache := &dag.deploymentCaches[id]
|
||||
checker := deploymentChecker{deployment: deployment, dag: dag}
|
||||
_, err := dag.thresholdState(prevNode, checker, cache)
|
||||
@@ -336,8 +338,8 @@ func (dag *BlockDAG) initThresholdCaches() error {
|
||||
}
|
||||
|
||||
// No warnings about unknown rules or versions until the DAG is
|
||||
// synced.
|
||||
if dag.isSynced() {
|
||||
// current.
|
||||
if dag.isCurrent() {
|
||||
// Warn if a high enough percentage of the last blocks have
|
||||
// unexpected versions.
|
||||
bestNode := dag.selectedTip()
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
)
|
||||
|
||||
// TimeSource is the interface to access time.
|
||||
type TimeSource interface {
|
||||
// Now returns the current time.
|
||||
Now() mstime.Time
|
||||
}
|
||||
|
||||
// timeSource provides an implementation of the TimeSource interface
|
||||
// that simply returns the current local time.
|
||||
type timeSource struct{}
|
||||
|
||||
// Now returns the current local time, with one millisecond precision.
|
||||
func (m *timeSource) Now() mstime.Time {
|
||||
return mstime.Now()
|
||||
}
|
||||
|
||||
// NewTimeSource returns a new instance of a TimeSource
|
||||
func NewTimeSource() TimeSource {
|
||||
return &timeSource{}
|
||||
}
|
||||
@@ -2,26 +2,31 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func addUTXOToMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *domainmessage.Outpoint) (*secp256k1.MultiSet, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := serializeUTXO(w, entry, outpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ms.Add(w.Bytes())
|
||||
return ms, nil
|
||||
}
|
||||
const ecmhCacheSize = 4_000_000
|
||||
|
||||
func removeUTXOFromMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *domainmessage.Outpoint) (*secp256k1.MultiSet, error) {
|
||||
var (
|
||||
utxoToECMHCache = lru.New(ecmhCacheSize)
|
||||
)
|
||||
|
||||
func utxoMultiset(entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := serializeUTXO(w, entry, outpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ms.Remove(w.Bytes())
|
||||
return ms, nil
|
||||
serializedUTXO := w.Bytes()
|
||||
utxoHash := daghash.DoubleHashH(serializedUTXO)
|
||||
|
||||
if cachedMSPoint, ok := utxoToECMHCache.Get(utxoHash); ok {
|
||||
return cachedMSPoint.(*ecc.Multiset), nil
|
||||
}
|
||||
msPoint := ecc.NewMultiset(ecc.S256()).Add(serializedUTXO)
|
||||
utxoToECMHCache.Add(utxoHash, msPoint)
|
||||
return msPoint, nil
|
||||
}
|
||||
|
||||
@@ -2,12 +2,14 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/locks"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var multisetPointSize = 32
|
||||
|
||||
type blockUTXODiffData struct {
|
||||
diff *UTXODiff
|
||||
diffChild *blockNode
|
||||
@@ -15,16 +17,16 @@ type blockUTXODiffData struct {
|
||||
|
||||
type utxoDiffStore struct {
|
||||
dag *BlockDAG
|
||||
dirty map[*blockNode]struct{}
|
||||
loaded map[*blockNode]*blockUTXODiffData
|
||||
dirty map[daghash.Hash]struct{}
|
||||
loaded map[daghash.Hash]*blockUTXODiffData
|
||||
mtx *locks.PriorityMutex
|
||||
}
|
||||
|
||||
func newUTXODiffStore(dag *BlockDAG) *utxoDiffStore {
|
||||
return &utxoDiffStore{
|
||||
dag: dag,
|
||||
dirty: make(map[*blockNode]struct{}),
|
||||
loaded: make(map[*blockNode]*blockUTXODiffData),
|
||||
dirty: make(map[daghash.Hash]struct{}),
|
||||
loaded: make(map[daghash.Hash]*blockUTXODiffData),
|
||||
mtx: locks.NewPriorityMutex(),
|
||||
}
|
||||
}
|
||||
@@ -33,15 +35,16 @@ func (diffStore *utxoDiffStore) setBlockDiff(node *blockNode, diff *UTXODiff) er
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
// load the diff data from DB to diffStore.loaded
|
||||
_, err := diffStore.diffDataByBlockNode(node)
|
||||
if dbaccess.IsNotFoundError(err) {
|
||||
diffStore.loaded[node] = &blockUTXODiffData{}
|
||||
} else if err != nil {
|
||||
_, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
diffStore.loaded[*node.hash] = &blockUTXODiffData{}
|
||||
}
|
||||
|
||||
diffStore.loaded[node].diff = diff
|
||||
diffStore.setBlockAsDirty(node)
|
||||
diffStore.loaded[*node.hash].diff = diff
|
||||
diffStore.setBlockAsDirty(node.hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -49,19 +52,22 @@ func (diffStore *utxoDiffStore) setBlockDiffChild(node *blockNode, diffChild *bl
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
// load the diff data from DB to diffStore.loaded
|
||||
_, err := diffStore.diffDataByBlockNode(node)
|
||||
_, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return diffNotFoundError(node)
|
||||
}
|
||||
|
||||
diffStore.loaded[node].diffChild = diffChild
|
||||
diffStore.setBlockAsDirty(node)
|
||||
diffStore.loaded[*node.hash].diffChild = diffChild
|
||||
diffStore.setBlockAsDirty(node.hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context, nodes []*blockNode) error {
|
||||
for _, node := range nodes {
|
||||
err := diffStore.removeBlockDiffData(dbContext, node)
|
||||
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHashes []*daghash.Hash) error {
|
||||
for _, hash := range blockHashes {
|
||||
err := diffStore.removeBlockDiffData(dbTx, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -69,64 +75,87 @@ func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) removeBlockDiffData(dbContext dbaccess.Context, node *blockNode) error {
|
||||
func (diffStore *utxoDiffStore) removeBlockDiffData(dbTx database.Tx, blockHash *daghash.Hash) error {
|
||||
diffStore.mtx.LowPriorityWriteLock()
|
||||
defer diffStore.mtx.LowPriorityWriteUnlock()
|
||||
delete(diffStore.loaded, node)
|
||||
err := dbaccess.RemoveDiffData(dbContext, node.hash)
|
||||
delete(diffStore.loaded, *blockHash)
|
||||
err := dbRemoveDiffData(dbTx, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) setBlockAsDirty(node *blockNode) {
|
||||
diffStore.dirty[node] = struct{}{}
|
||||
func (diffStore *utxoDiffStore) setBlockAsDirty(blockHash *daghash.Hash) {
|
||||
diffStore.dirty[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffDataByBlockNode(node *blockNode) (*blockUTXODiffData, error) {
|
||||
if diffData, ok := diffStore.loaded[node]; ok {
|
||||
return diffData, nil
|
||||
func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, bool, error) {
|
||||
if diffData, ok := diffStore.loaded[*hash]; ok {
|
||||
return diffData, true, nil
|
||||
}
|
||||
diffData, err := diffStore.diffDataFromDB(node.hash)
|
||||
diffData, err := diffStore.diffDataFromDB(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
diffStore.loaded[node] = diffData
|
||||
return diffData, nil
|
||||
exists := diffData != nil
|
||||
if exists {
|
||||
diffStore.loaded[*hash] = diffData
|
||||
}
|
||||
return diffData, exists, nil
|
||||
}
|
||||
|
||||
func diffNotFoundError(node *blockNode) error {
|
||||
return errors.Errorf("Couldn't find diff data for block %s", node.hash)
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) {
|
||||
diffStore.mtx.HighPriorityReadLock()
|
||||
defer diffStore.mtx.HighPriorityReadUnlock()
|
||||
diffData, err := diffStore.diffDataByBlockNode(node)
|
||||
diffData, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, diffNotFoundError(node)
|
||||
}
|
||||
return diffData.diff, nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffChildByNode(node *blockNode) (*blockNode, error) {
|
||||
diffStore.mtx.HighPriorityReadLock()
|
||||
defer diffStore.mtx.HighPriorityReadUnlock()
|
||||
diffData, err := diffStore.diffDataByBlockNode(node)
|
||||
diffData, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, diffNotFoundError(node)
|
||||
}
|
||||
return diffData.diffChild, nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffDataFromDB(hash *daghash.Hash) (*blockUTXODiffData, error) {
|
||||
serializedBlockDiffData, err := dbaccess.FetchUTXODiffData(diffStore.dag.databaseContext, hash)
|
||||
var diffData *blockUTXODiffData
|
||||
err := diffStore.dag.db.View(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(utxoDiffsBucketName)
|
||||
serializedBlockDiffData := bucket.Get(hash[:])
|
||||
if serializedBlockDiffData != nil {
|
||||
var err error
|
||||
diffData, err = diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
|
||||
return diffData, nil
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty diff data to the database.
|
||||
func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
// flushToDB writes all dirty diff data to the database. If all writes
|
||||
// succeed, this clears the dirty set.
|
||||
func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
if len(diffStore.dirty) == 0 {
|
||||
@@ -136,10 +165,10 @@ func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
// Allocate a buffer here to avoid needless allocations/grows
|
||||
// while writing each entry.
|
||||
buffer := &bytes.Buffer{}
|
||||
for node := range diffStore.dirty {
|
||||
for hash := range diffStore.dirty {
|
||||
buffer.Reset()
|
||||
diffData := diffStore.loaded[node]
|
||||
err := storeDiffData(dbContext, buffer, node.hash, diffData)
|
||||
diffData := diffStore.loaded[hash]
|
||||
err := dbStoreDiffData(dbTx, buffer, &hash, diffData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -148,53 +177,31 @@ func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) clearDirtyEntries() {
|
||||
diffStore.dirty = make(map[*blockNode]struct{})
|
||||
diffStore.dirty = make(map[daghash.Hash]struct{})
|
||||
}
|
||||
|
||||
// maxBlueScoreDifferenceToKeepLoaded is the maximum difference
|
||||
// between the virtual's blueScore and a blockNode's blueScore
|
||||
// under which to keep diff data loaded in memory.
|
||||
var maxBlueScoreDifferenceToKeepLoaded uint64 = 100
|
||||
|
||||
// clearOldEntries removes entries whose blue score is lower than
|
||||
// virtual.blueScore - maxBlueScoreDifferenceToKeepLoaded. Note
|
||||
// that tips are not removed either even if their blue score is
|
||||
// lower than the above.
|
||||
func (diffStore *utxoDiffStore) clearOldEntries() {
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
|
||||
virtualBlueScore := diffStore.dag.VirtualBlueScore()
|
||||
minBlueScore := virtualBlueScore - maxBlueScoreDifferenceToKeepLoaded
|
||||
if maxBlueScoreDifferenceToKeepLoaded > virtualBlueScore {
|
||||
minBlueScore = 0
|
||||
}
|
||||
|
||||
tips := diffStore.dag.virtual.tips()
|
||||
|
||||
toRemove := make(map[*blockNode]struct{})
|
||||
for node := range diffStore.loaded {
|
||||
if node.blueScore < minBlueScore && !tips.contains(node) {
|
||||
toRemove[node] = struct{}{}
|
||||
}
|
||||
}
|
||||
for node := range toRemove {
|
||||
delete(diffStore.loaded, node)
|
||||
}
|
||||
}
|
||||
|
||||
// storeDiffData stores the UTXO diff data to the database.
|
||||
// dbStoreDiffData stores the UTXO diff data to the database.
|
||||
// This overwrites the current entry if there exists one.
|
||||
func storeDiffData(dbContext dbaccess.Context, w *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
|
||||
// To avoid a ton of allocs, use the io.Writer
|
||||
func dbStoreDiffData(dbTx database.Tx, writeBuffer *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
|
||||
// To avoid a ton of allocs, use the given writeBuffer
|
||||
// instead of allocating one. We expect the buffer to
|
||||
// already be initialized and, in most cases, to already
|
||||
// already be initalized and, in most cases, to already
|
||||
// be large enough to accommodate the serialized data
|
||||
// without growing.
|
||||
err := serializeBlockUTXODiffData(w, diffData)
|
||||
err := serializeBlockUTXODiffData(writeBuffer, diffData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbaccess.StoreUTXODiffData(dbContext, hash, w.Bytes())
|
||||
// Bucket.Put doesn't copy on its own, so we manually
|
||||
// copy here. We do so because we expect the buffer
|
||||
// to be reused once we're done with it.
|
||||
serializedDiffData := make([]byte, writeBuffer.Len())
|
||||
copy(serializedDiffData, writeBuffer.Bytes())
|
||||
|
||||
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Put(hash[:], serializedDiffData)
|
||||
}
|
||||
|
||||
func dbRemoveDiffData(dbTx database.Tx, hash *daghash.Hash) error {
|
||||
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Delete(hash[:])
|
||||
}
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
func TestUTXODiffStore(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -31,19 +31,16 @@ func TestUTXODiffStore(t *testing.T) {
|
||||
// Check that an error is returned when asking for non existing node
|
||||
nonExistingNode := createNode()
|
||||
_, err = dag.utxoDiffStore.diffByNode(nonExistingNode)
|
||||
if !dbaccess.IsNotFoundError(err) {
|
||||
if err != nil {
|
||||
t.Errorf("diffByNode: %s", err)
|
||||
} else {
|
||||
t.Errorf("diffByNode: unexpectedly found diff data")
|
||||
}
|
||||
expectedErrString := fmt.Sprintf("Couldn't find diff data for block %s", nonExistingNode.hash)
|
||||
if err == nil || err.Error() != expectedErrString {
|
||||
t.Errorf("diffByNode: expected error %s but got %s", expectedErrString, err)
|
||||
}
|
||||
|
||||
// Add node's diff data to the utxoDiffStore and check if it's checked correctly.
|
||||
node := createNode()
|
||||
diff := NewUTXODiff()
|
||||
diff.toAdd.add(domainmessage.Outpoint{TxID: daghash.TxID{0x01}, Index: 0}, &UTXOEntry{amount: 1, scriptPubKey: []byte{0x01}})
|
||||
diff.toRemove.add(domainmessage.Outpoint{TxID: daghash.TxID{0x02}, Index: 0}, &UTXOEntry{amount: 2, scriptPubKey: []byte{0x02}})
|
||||
diff.toAdd.add(wire.Outpoint{TxID: daghash.TxID{0x01}, Index: 0}, &UTXOEntry{amount: 1, scriptPubKey: []byte{0x01}})
|
||||
diff.toRemove.add(wire.Outpoint{TxID: daghash.TxID{0x02}, Index: 0}, &UTXOEntry{amount: 2, scriptPubKey: []byte{0x02}})
|
||||
if err := dag.utxoDiffStore.setBlockDiff(node, diff); err != nil {
|
||||
t.Fatalf("setBlockDiff: unexpected error: %s", err)
|
||||
}
|
||||
@@ -66,20 +63,13 @@ func TestUTXODiffStore(t *testing.T) {
|
||||
|
||||
// Flush changes to db, delete them from the dag.utxoDiffStore.loaded
|
||||
// map, and check if the diff data is re-fetched from the database.
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open database transaction: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.utxoDiffStore.flushToDB(dbTx)
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
return dag.utxoDiffStore.flushToDB(dbTx)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to commit database transaction: %s", err)
|
||||
}
|
||||
delete(dag.utxoDiffStore.loaded, node)
|
||||
delete(dag.utxoDiffStore.loaded, *node.hash)
|
||||
|
||||
if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil {
|
||||
t.Fatalf("diffByNode: unexpected error: %s", err)
|
||||
@@ -88,80 +78,9 @@ func TestUTXODiffStore(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check if getBlockDiff caches the result in dag.utxoDiffStore.loaded
|
||||
if loadedDiffData, ok := dag.utxoDiffStore.loaded[node]; !ok {
|
||||
if loadedDiffData, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok {
|
||||
t.Errorf("the diff data wasn't added to loaded map after requesting it")
|
||||
} else if !reflect.DeepEqual(loadedDiffData.diff, diff) {
|
||||
t.Errorf("Expected diff and loadedDiff to be equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearOldEntries(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestClearOldEntries", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestClearOldEntries: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Set maxBlueScoreDifferenceToKeepLoaded to 10 to make this test fast to run
|
||||
currentDifference := maxBlueScoreDifferenceToKeepLoaded
|
||||
maxBlueScoreDifferenceToKeepLoaded = 10
|
||||
defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }()
|
||||
|
||||
// Add 10 blocks
|
||||
blockNodes := make([]*blockNode, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
processedBlock := PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
|
||||
|
||||
node, ok := dag.index.LookupNode(processedBlock.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash())
|
||||
}
|
||||
blockNodes[i] = node
|
||||
}
|
||||
|
||||
// Make sure that all of them exist in the loaded set
|
||||
for _, node := range blockNodes {
|
||||
_, ok := dag.utxoDiffStore.loaded[node]
|
||||
if !ok {
|
||||
t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash)
|
||||
}
|
||||
}
|
||||
|
||||
// Add 10 more blocks on top of the others
|
||||
for i := 0; i < 10; i++ {
|
||||
PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
|
||||
}
|
||||
|
||||
// Make sure that all the old nodes no longer exist in the loaded set
|
||||
for _, node := range blockNodes {
|
||||
_, ok := dag.utxoDiffStore.loaded[node]
|
||||
if ok {
|
||||
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
|
||||
}
|
||||
}
|
||||
|
||||
// Add a block on top of the genesis to force the retrieval of all diffData
|
||||
processedBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
node, ok := dag.index.LookupNode(processedBlock.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash())
|
||||
}
|
||||
|
||||
// Make sure that the child-of-genesis node is in the loaded set, since it
|
||||
// is a tip.
|
||||
_, ok = dag.utxoDiffStore.loaded[node]
|
||||
if !ok {
|
||||
t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash)
|
||||
}
|
||||
|
||||
// Make sure that all the old nodes still do not exist in the loaded set
|
||||
for _, node := range blockNodes {
|
||||
_, ok := dag.utxoDiffStore.loaded[node]
|
||||
if ok {
|
||||
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,15 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util/binaryserializer"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// serializeBlockUTXODiffData serializes diff data in the following format:
|
||||
@@ -17,12 +21,12 @@ import (
|
||||
// diff | UTXODiff | The diff data's diff
|
||||
func serializeBlockUTXODiffData(w io.Writer, diffData *blockUTXODiffData) error {
|
||||
hasDiffChild := diffData.diffChild != nil
|
||||
err := domainmessage.WriteElement(w, hasDiffChild)
|
||||
err := wire.WriteElement(w, hasDiffChild)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hasDiffChild {
|
||||
err := domainmessage.WriteElement(w, diffData.diffChild.hash)
|
||||
err := wire.WriteElement(w, diffData.diffChild.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -36,31 +40,54 @@ func serializeBlockUTXODiffData(w io.Writer, diffData *blockUTXODiffData) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffData []byte) (*blockUTXODiffData, error) {
|
||||
// utxoEntryHeaderCode returns the calculated header code to be used when
|
||||
// serializing the provided utxo entry.
|
||||
func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
|
||||
// As described in the serialization format comments, the header code
|
||||
// encodes the blue score shifted over one bit and the block reward flag
|
||||
// in the lowest bit.
|
||||
headerCode := uint64(entry.BlockBlueScore()) << 1
|
||||
if entry.IsCoinbase() {
|
||||
headerCode |= 0x01
|
||||
}
|
||||
|
||||
return headerCode
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataBytes []byte) (*blockUTXODiffData, error) {
|
||||
diffData := &blockUTXODiffData{}
|
||||
r := bytes.NewBuffer(serializedDiffData)
|
||||
serializedDiffData := bytes.NewBuffer(serializedDiffDataBytes)
|
||||
|
||||
var hasDiffChild bool
|
||||
err := domainmessage.ReadElement(r, &hasDiffChild)
|
||||
err := wire.ReadElement(serializedDiffData, &hasDiffChild)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hasDiffChild {
|
||||
hash := &daghash.Hash{}
|
||||
err := domainmessage.ReadElement(r, hash)
|
||||
err := wire.ReadElement(serializedDiffData, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
diffData.diffChild, ok = diffStore.dag.index.LookupNode(hash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("block %s does not exist in the DAG", hash)
|
||||
}
|
||||
diffData.diffChild = diffStore.dag.index.LookupNode(hash)
|
||||
}
|
||||
|
||||
diffData.diff, err = deserializeUTXODiff(r)
|
||||
diffData.diff = &UTXODiff{
|
||||
useMultiset: true,
|
||||
}
|
||||
|
||||
diffData.diff.toAdd, err = deserializeDiffEntries(serializedDiffData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffData.diff.toRemove, err = deserializeDiffEntries(serializedDiffData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffData.diff.diffMultiset, err = deserializeMultiset(serializedDiffData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -68,31 +95,38 @@ func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffData
|
||||
return diffData, nil
|
||||
}
|
||||
|
||||
func deserializeUTXODiff(r io.Reader) (*UTXODiff, error) {
|
||||
diff := &UTXODiff{}
|
||||
|
||||
var err error
|
||||
diff.toAdd, err = deserializeUTXOCollection(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diff.toRemove, err = deserializeUTXOCollection(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
func deserializeUTXOCollection(r io.Reader) (utxoCollection, error) {
|
||||
count, err := domainmessage.ReadVarInt(r)
|
||||
func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
|
||||
count, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
collection := utxoCollection{}
|
||||
for i := uint64(0); i < count; i++ {
|
||||
utxoEntry, outpoint, err := deserializeUTXO(r)
|
||||
outpointSize, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serializedOutpoint := make([]byte, outpointSize)
|
||||
err = binary.Read(r, byteOrder, serializedOutpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outpoint, err := deserializeOutpoint(serializedOutpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxoEntrySize, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serializedEntry := make([]byte, utxoEntrySize)
|
||||
err = binary.Read(r, byteOrder, serializedEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoEntry, err := deserializeUTXOEntry(serializedEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -101,22 +135,31 @@ func deserializeUTXOCollection(r io.Reader) (utxoCollection, error) {
|
||||
return collection, nil
|
||||
}
|
||||
|
||||
func deserializeUTXO(r io.Reader) (*UTXOEntry, *domainmessage.Outpoint, error) {
|
||||
outpoint, err := deserializeOutpoint(r)
|
||||
// deserializeMultiset deserializes an EMCH multiset.
|
||||
// See serializeMultiset for more details.
|
||||
func deserializeMultiset(r io.Reader) (*ecc.Multiset, error) {
|
||||
xBytes := make([]byte, multisetPointSize)
|
||||
yBytes := make([]byte, multisetPointSize)
|
||||
err := binary.Read(r, byteOrder, xBytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxoEntry, err := deserializeUTXOEntry(r)
|
||||
err = binary.Read(r, byteOrder, yBytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
return utxoEntry, outpoint, nil
|
||||
var x, y big.Int
|
||||
x.SetBytes(xBytes)
|
||||
y.SetBytes(yBytes)
|
||||
return ecc.NewMultisetFromPoint(ecc.S256(), &x, &y), nil
|
||||
}
|
||||
|
||||
// serializeUTXODiff serializes UTXODiff by serializing
|
||||
// UTXODiff.toAdd, UTXODiff.toRemove and UTXODiff.Multiset one after the other.
|
||||
func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
|
||||
if !diff.useMultiset {
|
||||
return errors.New("Cannot serialize a UTXO diff without a multiset")
|
||||
}
|
||||
err := serializeUTXOCollection(w, diff.toAdd)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -126,7 +169,10 @@ func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = serializeMultiset(w, diff.diffMultiset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -134,7 +180,7 @@ func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
|
||||
// the utxo entries and serializing them and their corresponding outpoint
|
||||
// prefixed by a varint that indicates their size.
|
||||
func serializeUTXOCollection(w io.Writer, collection utxoCollection) error {
|
||||
err := domainmessage.WriteVarInt(w, uint64(len(collection)))
|
||||
err := wire.WriteVarInt(w, uint64(len(collection)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -147,93 +193,120 @@ func serializeUTXOCollection(w io.Writer, collection utxoCollection) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// serializeMultiset serializes an ECMH multiset. The serialization
|
||||
// is done by taking the (x,y) coordinnates of the multiset point and
|
||||
// padding each one of them with 32 byte (it'll be 32 byte in most
|
||||
// cases anyway except one of the coordinates is zero) and writing
|
||||
// them one after the other.
|
||||
func serializeMultiset(w io.Writer, ms *ecc.Multiset) error {
|
||||
x, y := ms.Point()
|
||||
xBytes := make([]byte, multisetPointSize)
|
||||
copy(xBytes, x.Bytes())
|
||||
yBytes := make([]byte, multisetPointSize)
|
||||
copy(yBytes, y.Bytes())
|
||||
|
||||
err := binary.Write(w, byteOrder, xBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(w, byteOrder, yBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// serializeUTXO serializes a utxo entry-outpoint pair
|
||||
func serializeUTXO(w io.Writer, entry *UTXOEntry, outpoint *domainmessage.Outpoint) error {
|
||||
err := serializeOutpoint(w, outpoint)
|
||||
func serializeUTXO(w io.Writer, entry *UTXOEntry, outpoint *wire.Outpoint) error {
|
||||
serializedOutpoint := *outpointKey(*outpoint)
|
||||
err := wire.WriteVarInt(w, uint64(len(serializedOutpoint)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = serializeUTXOEntry(w, entry)
|
||||
err = binary.Write(w, byteOrder, serializedOutpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serializedUTXOEntry := serializeUTXOEntry(entry)
|
||||
err = wire.WriteVarInt(w, uint64(len(serializedUTXOEntry)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(w, byteOrder, serializedUTXOEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// p2pkhUTXOEntrySerializeSize is the serialized size for a P2PKH UTXO entry.
|
||||
// 8 bytes (header code) + 8 bytes (amount) + varint for script pub key length of 25 (for P2PKH) + 25 bytes for P2PKH script.
|
||||
var p2pkhUTXOEntrySerializeSize = 8 + 8 + domainmessage.VarIntSerializeSize(25) + 25
|
||||
// serializeUTXOEntry returns the entry serialized to a format that is suitable
|
||||
// for long-term storage. The format is described in detail above.
|
||||
func serializeUTXOEntry(entry *UTXOEntry) []byte {
|
||||
// Encode the header code.
|
||||
headerCode := utxoEntryHeaderCode(entry)
|
||||
|
||||
// serializeUTXOEntry encodes the entry to the given io.Writer and use compression if useCompression is true.
|
||||
// The compression format is described in detail above.
|
||||
func serializeUTXOEntry(w io.Writer, entry *UTXOEntry) error {
|
||||
// Encode the blueScore.
|
||||
err := binaryserializer.PutUint64(w, byteOrder, entry.blockBlueScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Calculate the size needed to serialize the entry.
|
||||
size := serializeSizeVLQ(headerCode) +
|
||||
compressedTxOutSize(uint64(entry.Amount()), entry.ScriptPubKey())
|
||||
|
||||
// Encode the packedFlags.
|
||||
err = binaryserializer.PutUint8(w, uint8(entry.packedFlags))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Serialize the header code followed by the compressed unspent
|
||||
// transaction output.
|
||||
serialized := make([]byte, size)
|
||||
offset := putVLQ(serialized, headerCode)
|
||||
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
|
||||
entry.ScriptPubKey())
|
||||
|
||||
err = binaryserializer.PutUint64(w, byteOrder, entry.Amount())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = domainmessage.WriteVarInt(w, uint64(len(entry.ScriptPubKey())))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.Write(entry.ScriptPubKey())
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return serialized
|
||||
}
|
||||
|
||||
// deserializeUTXOEntry decodes a UTXO entry from the passed reader
|
||||
// into a new UTXOEntry. If isCompressed is used it will decompress
|
||||
// the entry according to the format that is described in detail
|
||||
// above.
|
||||
func deserializeUTXOEntry(r io.Reader) (*UTXOEntry, error) {
|
||||
// Deserialize the blueScore.
|
||||
blockBlueScore, err := binaryserializer.Uint64(r, byteOrder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// deserializeOutpoint decodes an outpoint from the passed serialized byte
|
||||
// slice into a new wire.Outpoint using a format that is suitable for long-
|
||||
// term storage. this format is described in detail above.
|
||||
func deserializeOutpoint(serialized []byte) (*wire.Outpoint, error) {
|
||||
if len(serialized) <= daghash.HashSize {
|
||||
return nil, errDeserialize("unexpected end of data")
|
||||
}
|
||||
|
||||
// Decode the packedFlags.
|
||||
packedFlags, err := binaryserializer.Uint8(r)
|
||||
txID := daghash.TxID{}
|
||||
txID.SetBytes(serialized[:daghash.HashSize])
|
||||
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
|
||||
return wire.NewOutpoint(&txID, uint32(index)), nil
|
||||
}
|
||||
|
||||
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
|
||||
// slice into a new UTXOEntry using a format that is suitable for long-term
|
||||
// storage. The format is described in detail above.
|
||||
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
|
||||
// Deserialize the header code.
|
||||
code, offset := deserializeVLQ(serialized)
|
||||
if offset >= len(serialized) {
|
||||
return nil, errDeserialize("unexpected end of data after header")
|
||||
}
|
||||
|
||||
// Decode the header code.
|
||||
//
|
||||
// Bit 0 indicates whether the containing transaction is a coinbase.
|
||||
// Bits 1-x encode blue score of the containing transaction.
|
||||
isCoinbase := code&0x01 != 0
|
||||
blockBlueScore := code >> 1
|
||||
|
||||
// Decode the compressed unspent transaction output.
|
||||
amount, scriptPubKey, _, err := decodeCompressedTxOut(serialized[offset:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
|
||||
"UTXO: %s", err))
|
||||
}
|
||||
|
||||
entry := &UTXOEntry{
|
||||
amount: amount,
|
||||
scriptPubKey: scriptPubKey,
|
||||
blockBlueScore: blockBlueScore,
|
||||
packedFlags: txoFlags(packedFlags),
|
||||
packedFlags: 0,
|
||||
}
|
||||
|
||||
entry.amount, err = binaryserializer.Uint64(r, byteOrder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scriptPubKeyLen, err := domainmessage.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entry.scriptPubKey = make([]byte, scriptPubKeyLen)
|
||||
_, err = r.Read(entry.scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
if isCoinbase {
|
||||
entry.packedFlags |= tfCoinbase
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
|
||||
@@ -2,14 +2,13 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -78,7 +77,7 @@ const (
|
||||
)
|
||||
|
||||
// NewUTXOEntry creates a new utxoEntry representing the given txOut
|
||||
func NewUTXOEntry(txOut *domainmessage.TxOut, isCoinbase bool, blockBlueScore uint64) *UTXOEntry {
|
||||
func NewUTXOEntry(txOut *wire.TxOut, isCoinbase bool, blockBlueScore uint64) *UTXOEntry {
|
||||
entry := &UTXOEntry{
|
||||
amount: txOut.Value,
|
||||
scriptPubKey: txOut.ScriptPubKey,
|
||||
@@ -93,7 +92,7 @@ func NewUTXOEntry(txOut *domainmessage.TxOut, isCoinbase bool, blockBlueScore ui
|
||||
}
|
||||
|
||||
// utxoCollection represents a set of UTXOs indexed by their outpoints
|
||||
type utxoCollection map[domainmessage.Outpoint]*UTXOEntry
|
||||
type utxoCollection map[wire.Outpoint]*UTXOEntry
|
||||
|
||||
func (uc utxoCollection) String() string {
|
||||
utxoStrings := make([]string, len(uc))
|
||||
@@ -112,31 +111,31 @@ func (uc utxoCollection) String() string {
|
||||
}
|
||||
|
||||
// add adds a new UTXO entry to this collection
|
||||
func (uc utxoCollection) add(outpoint domainmessage.Outpoint, entry *UTXOEntry) {
|
||||
func (uc utxoCollection) add(outpoint wire.Outpoint, entry *UTXOEntry) {
|
||||
uc[outpoint] = entry
|
||||
}
|
||||
|
||||
// remove removes a UTXO entry from this collection if it exists
|
||||
func (uc utxoCollection) remove(outpoint domainmessage.Outpoint) {
|
||||
func (uc utxoCollection) remove(outpoint wire.Outpoint) {
|
||||
delete(uc, outpoint)
|
||||
}
|
||||
|
||||
// get returns the UTXOEntry represented by provided outpoint,
|
||||
// and a boolean value indicating if said UTXOEntry is in the set or not
|
||||
func (uc utxoCollection) get(outpoint domainmessage.Outpoint) (*UTXOEntry, bool) {
|
||||
func (uc utxoCollection) get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
|
||||
entry, ok := uc[outpoint]
|
||||
return entry, ok
|
||||
}
|
||||
|
||||
// contains returns a boolean value indicating whether a UTXO entry is in the set
|
||||
func (uc utxoCollection) contains(outpoint domainmessage.Outpoint) bool {
|
||||
func (uc utxoCollection) contains(outpoint wire.Outpoint) bool {
|
||||
_, ok := uc[outpoint]
|
||||
return ok
|
||||
}
|
||||
|
||||
// containsWithBlueScore returns a boolean value indicating whether a UTXOEntry
|
||||
// is in the set and its blue score is equal to the given blue score.
|
||||
func (uc utxoCollection) containsWithBlueScore(outpoint domainmessage.Outpoint, blueScore uint64) bool {
|
||||
func (uc utxoCollection) containsWithBlueScore(outpoint wire.Outpoint, blueScore uint64) bool {
|
||||
entry, ok := uc.get(outpoint)
|
||||
return ok && entry.blockBlueScore == blueScore
|
||||
}
|
||||
@@ -153,16 +152,29 @@ func (uc utxoCollection) clone() utxoCollection {
|
||||
|
||||
// UTXODiff represents a diff between two UTXO Sets.
|
||||
type UTXODiff struct {
|
||||
toAdd utxoCollection
|
||||
toRemove utxoCollection
|
||||
toAdd utxoCollection
|
||||
toRemove utxoCollection
|
||||
diffMultiset *ecc.Multiset
|
||||
useMultiset bool
|
||||
}
|
||||
|
||||
// NewUTXODiff creates a new, empty utxoDiff
|
||||
// NewUTXODiffWithoutMultiset creates a new, empty utxoDiff
|
||||
// without a multiset.
|
||||
func NewUTXODiffWithoutMultiset() *UTXODiff {
|
||||
return &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
useMultiset: false,
|
||||
}
|
||||
}
|
||||
|
||||
// NewUTXODiff creates a new, empty utxoDiff.
|
||||
func NewUTXODiff() *UTXODiff {
|
||||
return &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
useMultiset: true,
|
||||
diffMultiset: ecc.NewMultiset(ecc.S256()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -196,8 +208,9 @@ func NewUTXODiff() *UTXODiff {
|
||||
// diffFrom results in the UTXO being added to toAdd
|
||||
func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
|
||||
result := UTXODiff{
|
||||
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
|
||||
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
|
||||
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
|
||||
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
|
||||
useMultiset: d.useMultiset,
|
||||
}
|
||||
|
||||
// Note that the following cases are not accounted for, as they are impossible
|
||||
@@ -211,10 +224,6 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
|
||||
for outpoint, utxoEntry := range d.toAdd {
|
||||
if !other.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
|
||||
result.toRemove.add(outpoint, utxoEntry)
|
||||
} else if (d.toRemove.contains(outpoint) && !other.toRemove.contains(outpoint)) ||
|
||||
(!d.toRemove.contains(outpoint) && other.toRemove.contains(outpoint)) {
|
||||
return nil, errors.New(
|
||||
"diffFrom: outpoint both in d.toAdd, other.toAdd, and only one of d.toRemove and other.toRemove")
|
||||
}
|
||||
if diffEntry, ok := other.toRemove.get(outpoint); ok {
|
||||
// An exception is made for entries with unequal blue scores
|
||||
@@ -234,18 +243,6 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
|
||||
// If they are not in other.toRemove - should be added in result.toAdd
|
||||
// If they are in other.toAdd - base utxoSet is not the same
|
||||
for outpoint, utxoEntry := range d.toRemove {
|
||||
diffEntry, ok := other.toRemove.get(outpoint)
|
||||
if ok {
|
||||
// if have the same entry in d.toRemove - simply don't copy.
|
||||
// unless existing entry is with different blue score, in this case - this is an error
|
||||
if utxoEntry.blockBlueScore != diffEntry.blockBlueScore {
|
||||
return nil, errors.New("diffFrom: outpoint both in d.toRemove and other.toRemove with different " +
|
||||
"blue scores, with no corresponding entry in d.toAdd")
|
||||
}
|
||||
} else { // if no existing entry - add to result.toAdd
|
||||
result.toAdd.add(outpoint, utxoEntry)
|
||||
}
|
||||
|
||||
if !other.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
|
||||
result.toAdd.add(outpoint, utxoEntry)
|
||||
}
|
||||
@@ -259,7 +256,7 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
|
||||
other.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore)) {
|
||||
continue
|
||||
}
|
||||
return nil, errors.New("diffFrom: outpoint both in d.toRemove and in other.toAdd")
|
||||
return nil, errors.New("diffFrom: transaction both in d.toRemove and in other.toAdd")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -279,72 +276,124 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if d.useMultiset {
|
||||
// Create a new diffMultiset as the subtraction of the two diffs.
|
||||
result.diffMultiset = other.diffMultiset.Subtract(d.diffMultiset)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// withDiffInPlace applies provided diff to this diff in-place, that would be the result if
|
||||
// first d, and than diff were applied to the same base
|
||||
func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error {
|
||||
for outpoint, entryToRemove := range diff.toRemove {
|
||||
if d.toAdd.containsWithBlueScore(outpoint, entryToRemove.blockBlueScore) {
|
||||
// If already exists in toAdd with the same blueScore - remove from toAdd
|
||||
d.toAdd.remove(outpoint)
|
||||
continue
|
||||
}
|
||||
if d.toRemove.contains(outpoint) {
|
||||
// If already exists - this is an error
|
||||
return errors.Errorf(
|
||||
"withDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint)
|
||||
}
|
||||
|
||||
// If not exists neither in toAdd nor in toRemove - add to toRemove
|
||||
d.toRemove.add(outpoint, entryToRemove)
|
||||
}
|
||||
|
||||
for outpoint, entryToAdd := range diff.toAdd {
|
||||
if d.toRemove.containsWithBlueScore(outpoint, entryToAdd.blockBlueScore) {
|
||||
// If already exists in toRemove with the same blueScore - remove from toRemove
|
||||
if d.toAdd.contains(outpoint) && !diff.toRemove.contains(outpoint) {
|
||||
return errors.Errorf(
|
||||
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd with no "+
|
||||
"corresponding entry in diff.toRemove", outpoint)
|
||||
}
|
||||
d.toRemove.remove(outpoint)
|
||||
continue
|
||||
}
|
||||
if existingEntry, ok := d.toAdd.get(outpoint); ok &&
|
||||
(existingEntry.blockBlueScore == entryToAdd.blockBlueScore ||
|
||||
!diff.toRemove.containsWithBlueScore(outpoint, existingEntry.blockBlueScore)) {
|
||||
// If already exists - this is an error
|
||||
return errors.Errorf(
|
||||
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint)
|
||||
}
|
||||
|
||||
// If not exists neither in toAdd nor in toRemove, or exists in toRemove with different blueScore - add to toAdd
|
||||
d.toAdd.add(outpoint, entryToAdd)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithDiff applies provided diff to this diff, creating a new utxoDiff, that would be the result if
|
||||
// first d, and than diff were applied to some base
|
||||
// first d, and than diff were applied to the same base
|
||||
//
|
||||
// WithDiff follows a set of rules represented by the following 3 by 3 table:
|
||||
//
|
||||
// | | this | |
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | | toAdd | toRemove | None
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// other | toAdd | X | - | toAdd
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | toRemove | - | X | toRemove
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | None | toAdd | toRemove | -
|
||||
//
|
||||
// Key:
|
||||
// - Don't add anything to the result
|
||||
// X Return an error
|
||||
// toAdd Add the UTXO into the toAdd collection of the result
|
||||
// toRemove Add the UTXO into the toRemove collection of the result
|
||||
//
|
||||
// Examples:
|
||||
// 1. This diff contains a UTXO in toAdd, and the other diff contains it in toRemove
|
||||
// WithDiff results in nothing being added
|
||||
// 2. This diff contains a UTXO in toRemove, and the other diff does not contain it
|
||||
// WithDiff results in the UTXO being added to toRemove
|
||||
func (d *UTXODiff) WithDiff(diff *UTXODiff) (*UTXODiff, error) {
|
||||
clone := d.clone()
|
||||
|
||||
err := clone.withDiffInPlace(diff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
result := UTXODiff{
|
||||
toAdd: make(utxoCollection, len(d.toAdd)+len(diff.toAdd)),
|
||||
toRemove: make(utxoCollection, len(d.toRemove)+len(diff.toRemove)),
|
||||
useMultiset: d.useMultiset,
|
||||
}
|
||||
|
||||
return clone, nil
|
||||
// All transactions in d.toAdd:
|
||||
// If they are not in diff.toRemove - should be added in result.toAdd
|
||||
// If they are in diff.toAdd - should throw an error
|
||||
// Otherwise - should be ignored
|
||||
for outpoint, utxoEntry := range d.toAdd {
|
||||
if !diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
|
||||
result.toAdd.add(outpoint, utxoEntry)
|
||||
}
|
||||
if diffEntry, ok := diff.toAdd.get(outpoint); ok {
|
||||
// An exception is made for entries with unequal blue scores
|
||||
// as long as the appropriate entry exists in either d.toRemove
|
||||
// or diff.toRemove.
|
||||
// These are just "updates" to accepted blue score
|
||||
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
|
||||
(d.toRemove.containsWithBlueScore(outpoint, diffEntry.blockBlueScore) ||
|
||||
diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore)) {
|
||||
continue
|
||||
}
|
||||
return nil, ruleError(ErrWithDiff, fmt.Sprintf("WithDiff: outpoint %s both in d.toAdd and in other.toAdd", outpoint))
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in d.toRemove:
|
||||
// If they are not in diff.toAdd - should be added in result.toRemove
|
||||
// If they are in diff.toRemove - should throw an error
|
||||
// Otherwise - should be ignored
|
||||
for outpoint, utxoEntry := range d.toRemove {
|
||||
if !diff.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
|
||||
result.toRemove.add(outpoint, utxoEntry)
|
||||
}
|
||||
if diffEntry, ok := diff.toRemove.get(outpoint); ok {
|
||||
// An exception is made for entries with unequal blue scores
|
||||
// as long as the appropriate entry exists in either d.toAdd
|
||||
// or diff.toAdd.
|
||||
// These are just "updates" to accepted blue score
|
||||
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
|
||||
(d.toAdd.containsWithBlueScore(outpoint, diffEntry.blockBlueScore) ||
|
||||
diff.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore)) {
|
||||
continue
|
||||
}
|
||||
return nil, ruleError(ErrWithDiff, "WithDiff: transaction both in d.toRemove and in other.toRemove")
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in diff.toAdd:
|
||||
// If they are not in d.toRemove - should be added in result.toAdd
|
||||
for outpoint, utxoEntry := range diff.toAdd {
|
||||
if !d.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
|
||||
result.toAdd.add(outpoint, utxoEntry)
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in diff.toRemove:
|
||||
// If they are not in d.toAdd - should be added in result.toRemove
|
||||
for outpoint, utxoEntry := range diff.toRemove {
|
||||
if !d.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
|
||||
result.toRemove.add(outpoint, utxoEntry)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply diff.diffMultiset to d.diffMultiset
|
||||
if d.useMultiset {
|
||||
result.diffMultiset = d.diffMultiset.Union(diff.diffMultiset)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// clone returns a clone of this utxoDiff
|
||||
func (d *UTXODiff) clone() *UTXODiff {
|
||||
clone := &UTXODiff{
|
||||
toAdd: d.toAdd.clone(),
|
||||
toRemove: d.toRemove.clone(),
|
||||
toAdd: d.toAdd.clone(),
|
||||
toRemove: d.toRemove.clone(),
|
||||
useMultiset: d.useMultiset,
|
||||
}
|
||||
if d.useMultiset {
|
||||
clone.diffMultiset = d.diffMultiset.Clone()
|
||||
}
|
||||
return clone
|
||||
}
|
||||
@@ -353,7 +402,7 @@ func (d *UTXODiff) clone() *UTXODiff {
|
||||
//
|
||||
// If d.useMultiset is true, this function MUST be
|
||||
// called with the DAG lock held.
|
||||
func (d *UTXODiff) AddEntry(outpoint domainmessage.Outpoint, entry *UTXOEntry) error {
|
||||
func (d *UTXODiff) AddEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
|
||||
if d.toRemove.containsWithBlueScore(outpoint, entry.blockBlueScore) {
|
||||
d.toRemove.remove(outpoint)
|
||||
} else if _, exists := d.toAdd[outpoint]; exists {
|
||||
@@ -361,6 +410,14 @@ func (d *UTXODiff) AddEntry(outpoint domainmessage.Outpoint, entry *UTXOEntry) e
|
||||
} else {
|
||||
d.toAdd.add(outpoint, entry)
|
||||
}
|
||||
|
||||
if d.useMultiset {
|
||||
newMs, err := addUTXOToMultiset(d.diffMultiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.diffMultiset = newMs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -368,7 +425,7 @@ func (d *UTXODiff) AddEntry(outpoint domainmessage.Outpoint, entry *UTXOEntry) e
|
||||
//
|
||||
// If d.useMultiset is true, this function MUST be
|
||||
// called with the DAG lock held.
|
||||
func (d *UTXODiff) RemoveEntry(outpoint domainmessage.Outpoint, entry *UTXOEntry) error {
|
||||
func (d *UTXODiff) RemoveEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
|
||||
if d.toAdd.containsWithBlueScore(outpoint, entry.blockBlueScore) {
|
||||
d.toAdd.remove(outpoint)
|
||||
} else if _, exists := d.toRemove[outpoint]; exists {
|
||||
@@ -376,10 +433,21 @@ func (d *UTXODiff) RemoveEntry(outpoint domainmessage.Outpoint, entry *UTXOEntry
|
||||
} else {
|
||||
d.toRemove.add(outpoint, entry)
|
||||
}
|
||||
|
||||
if d.useMultiset {
|
||||
newMs, err := removeUTXOFromMultiset(d.diffMultiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.diffMultiset = newMs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d UTXODiff) String() string {
|
||||
if d.useMultiset {
|
||||
return fmt.Sprintf("toAdd: %s; toRemove: %s, Multiset-Hash: %s", d.toAdd, d.toRemove, d.diffMultiset.Hash())
|
||||
}
|
||||
return fmt.Sprintf("toAdd: %s; toRemove: %s", d.toAdd, d.toRemove)
|
||||
}
|
||||
|
||||
@@ -399,27 +467,97 @@ type UTXOSet interface {
|
||||
fmt.Stringer
|
||||
diffFrom(other UTXOSet) (*UTXODiff, error)
|
||||
WithDiff(utxoDiff *UTXODiff) (UTXOSet, error)
|
||||
AddTx(tx *domainmessage.MsgTx, blockBlueScore uint64) (ok bool, err error)
|
||||
diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error)
|
||||
diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error)
|
||||
AddTx(tx *wire.MsgTx, blockBlueScore uint64) (ok bool, err error)
|
||||
clone() UTXOSet
|
||||
Get(outpoint domainmessage.Outpoint) (*UTXOEntry, bool)
|
||||
Get(outpoint wire.Outpoint) (*UTXOEntry, bool)
|
||||
Multiset() *ecc.Multiset
|
||||
WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error)
|
||||
}
|
||||
|
||||
// diffFromTx is a common implementation for diffFromTx, that works
|
||||
// for both diff-based and full UTXO sets
|
||||
// Returns a diff that is equivalent to provided transaction,
|
||||
// or an error if provided transaction is not valid in the context of this UTXOSet
|
||||
func diffFromTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
diff := NewUTXODiff()
|
||||
isCoinbase := tx.IsCoinBase()
|
||||
if !isCoinbase {
|
||||
for _, txIn := range tx.TxIn {
|
||||
if entry, ok := u.Get(txIn.PreviousOutpoint); ok {
|
||||
err := diff.RemoveEntry(txIn.PreviousOutpoint, entry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, ruleError(ErrMissingTxOut, fmt.Sprintf(
|
||||
"Transaction %s is invalid because spends outpoint %s that is not in utxo set",
|
||||
tx.TxID(), txIn.PreviousOutpoint))
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, txOut := range tx.TxOut {
|
||||
entry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore)
|
||||
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
|
||||
err := diff.AddEntry(outpoint, entry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
// diffFromAcceptedTx is a common implementation for diffFromAcceptedTx, that works
|
||||
// for both diff-based and full UTXO sets.
|
||||
// Returns a diff that replaces an entry's blockBlueScore with the given acceptingBlueScore.
|
||||
// Returns an error if the provided transaction's entry is not valid in the context
|
||||
// of this UTXOSet.
|
||||
func diffFromAcceptedTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
diff := NewUTXODiff()
|
||||
isCoinbase := tx.IsCoinBase()
|
||||
for i, txOut := range tx.TxOut {
|
||||
// Fetch any unaccepted transaction
|
||||
existingOutpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
|
||||
existingEntry, ok := u.Get(existingOutpoint)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("cannot accept outpoint %s because it doesn't exist in the given UTXO", existingOutpoint)
|
||||
}
|
||||
|
||||
// Remove unaccepted entries
|
||||
err := diff.RemoveEntry(existingOutpoint, existingEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add new entries with their accepting blue score
|
||||
newEntry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore)
|
||||
err = diff.AddEntry(existingOutpoint, newEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
// FullUTXOSet represents a full list of transaction outputs and their values
|
||||
type FullUTXOSet struct {
|
||||
utxoCollection
|
||||
UTXOMultiset *ecc.Multiset
|
||||
}
|
||||
|
||||
// NewFullUTXOSet creates a new utxoSet with full list of transaction outputs and their values
|
||||
func NewFullUTXOSet() *FullUTXOSet {
|
||||
return &FullUTXOSet{
|
||||
utxoCollection: utxoCollection{},
|
||||
UTXOMultiset: ecc.NewMultiset(ecc.S256()),
|
||||
}
|
||||
}
|
||||
|
||||
// newFullUTXOSetFromUTXOCollection converts a utxoCollection to a FullUTXOSet
|
||||
func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet, error) {
|
||||
var err error
|
||||
multiset := secp256k1.NewMultiset()
|
||||
multiset := ecc.NewMultiset(ecc.S256())
|
||||
for outpoint, utxoEntry := range collection {
|
||||
multiset, err = addUTXOToMultiset(multiset, utxoEntry, &outpoint)
|
||||
if err != nil {
|
||||
@@ -428,6 +566,7 @@ func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet,
|
||||
}
|
||||
return &FullUTXOSet{
|
||||
utxoCollection: collection,
|
||||
UTXOMultiset: multiset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -456,28 +595,44 @@ func (fus *FullUTXOSet) WithDiff(other *UTXODiff) (UTXOSet, error) {
|
||||
// necessarily means there's an error).
|
||||
//
|
||||
// This function MUST be called with the DAG lock held.
|
||||
func (fus *FullUTXOSet) AddTx(tx *domainmessage.MsgTx, blueScore uint64) (isAccepted bool, err error) {
|
||||
if !fus.containsInputs(tx) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, txIn := range tx.TxIn {
|
||||
fus.remove(txIn.PreviousOutpoint)
|
||||
}
|
||||
|
||||
func (fus *FullUTXOSet) AddTx(tx *wire.MsgTx, blueScore uint64) (isAccepted bool, err error) {
|
||||
isCoinbase := tx.IsCoinBase()
|
||||
if !isCoinbase {
|
||||
if !fus.containsInputs(tx) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, txIn := range tx.TxIn {
|
||||
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
err := fus.removeAndUpdateMultiset(outpoint)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, txOut := range tx.TxOut {
|
||||
outpoint := *domainmessage.NewOutpoint(tx.TxID(), uint32(i))
|
||||
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
|
||||
entry := NewUTXOEntry(txOut, isCoinbase, blueScore)
|
||||
fus.add(outpoint, entry)
|
||||
|
||||
err := fus.addAndUpdateMultiset(outpoint, entry)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (fus *FullUTXOSet) containsInputs(tx *domainmessage.MsgTx) bool {
|
||||
// diffFromTx returns a diff that is equivalent to provided transaction,
|
||||
// or an error if provided transaction is not valid in the context of this UTXOSet
|
||||
func (fus *FullUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
return diffFromTx(fus, tx, acceptingBlueScore)
|
||||
}
|
||||
|
||||
func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool {
|
||||
for _, txIn := range tx.TxIn {
|
||||
outpoint := *domainmessage.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
if !fus.contains(outpoint) {
|
||||
return false
|
||||
}
|
||||
@@ -486,17 +641,70 @@ func (fus *FullUTXOSet) containsInputs(tx *domainmessage.MsgTx) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (fus *FullUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
return diffFromAcceptedTx(fus, tx, acceptingBlueScore)
|
||||
}
|
||||
|
||||
// clone returns a clone of this utxoSet
|
||||
func (fus *FullUTXOSet) clone() UTXOSet {
|
||||
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone()}
|
||||
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone(), UTXOMultiset: fus.UTXOMultiset.Clone()}
|
||||
}
|
||||
|
||||
// Get returns the UTXOEntry associated with the given Outpoint, and a boolean indicating if such entry was found
|
||||
func (fus *FullUTXOSet) Get(outpoint domainmessage.Outpoint) (*UTXOEntry, bool) {
|
||||
func (fus *FullUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
|
||||
utxoEntry, ok := fus.utxoCollection[outpoint]
|
||||
return utxoEntry, ok
|
||||
}
|
||||
|
||||
// Multiset returns the ecmh-Multiset of this utxoSet
|
||||
func (fus *FullUTXOSet) Multiset() *ecc.Multiset {
|
||||
return fus.UTXOMultiset
|
||||
}
|
||||
|
||||
// addAndUpdateMultiset adds a UTXOEntry to this utxoSet and updates its multiset accordingly
|
||||
func (fus *FullUTXOSet) addAndUpdateMultiset(outpoint wire.Outpoint, entry *UTXOEntry) error {
|
||||
fus.add(outpoint, entry)
|
||||
newMs, err := addUTXOToMultiset(fus.UTXOMultiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fus.UTXOMultiset = newMs
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeAndUpdateMultiset removes a UTXOEntry from this utxoSet and updates its multiset accordingly
|
||||
func (fus *FullUTXOSet) removeAndUpdateMultiset(outpoint wire.Outpoint) error {
|
||||
entry, ok := fus.Get(outpoint)
|
||||
if !ok {
|
||||
return errors.Errorf("Couldn't find outpoint %s", outpoint)
|
||||
}
|
||||
fus.remove(outpoint)
|
||||
var err error
|
||||
newMs, err := removeUTXOFromMultiset(fus.UTXOMultiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fus.UTXOMultiset = newMs
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithTransactions returns a new UTXO Set with the added transactions.
|
||||
//
|
||||
// This function MUST be called with the DAG lock held.
|
||||
func (fus *FullUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
|
||||
diffSet := NewDiffUTXOSet(fus, NewUTXODiff())
|
||||
for _, tx := range transactions {
|
||||
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ignoreDoubleSpends && !isAccepted {
|
||||
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
|
||||
}
|
||||
}
|
||||
return UTXOSet(diffSet), nil
|
||||
}
|
||||
|
||||
// DiffUTXOSet represents a utxoSet with a base fullUTXOSet and a UTXODiff
|
||||
type DiffUTXOSet struct {
|
||||
base *FullUTXOSet
|
||||
@@ -540,12 +748,13 @@ func (dus *DiffUTXOSet) WithDiff(other *UTXODiff) (UTXOSet, error) {
|
||||
//
|
||||
// If dus.UTXODiff.useMultiset is true, this function MUST be
|
||||
// called with the DAG lock held.
|
||||
func (dus *DiffUTXOSet) AddTx(tx *domainmessage.MsgTx, blockBlueScore uint64) (bool, error) {
|
||||
if !dus.containsInputs(tx) {
|
||||
func (dus *DiffUTXOSet) AddTx(tx *wire.MsgTx, blockBlueScore uint64) (bool, error) {
|
||||
isCoinbase := tx.IsCoinBase()
|
||||
if !isCoinbase && !dus.containsInputs(tx) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err := dus.appendTx(tx, blockBlueScore)
|
||||
err := dus.appendTx(tx, blockBlueScore, isCoinbase)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -553,21 +762,23 @@ func (dus *DiffUTXOSet) AddTx(tx *domainmessage.MsgTx, blockBlueScore uint64) (b
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) appendTx(tx *domainmessage.MsgTx, blockBlueScore uint64) error {
|
||||
for _, txIn := range tx.TxIn {
|
||||
entry, ok := dus.Get(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
return errors.Errorf("couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
|
||||
}
|
||||
err := dus.UTXODiff.RemoveEntry(txIn.PreviousOutpoint, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
func (dus *DiffUTXOSet) appendTx(tx *wire.MsgTx, blockBlueScore uint64, isCoinbase bool) error {
|
||||
if !isCoinbase {
|
||||
for _, txIn := range tx.TxIn {
|
||||
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
entry, ok := dus.Get(outpoint)
|
||||
if !ok {
|
||||
return errors.Errorf("Couldn't find entry for outpoint %s", outpoint)
|
||||
}
|
||||
err := dus.UTXODiff.RemoveEntry(outpoint, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
isCoinbase := tx.IsCoinBase()
|
||||
for i, txOut := range tx.TxOut {
|
||||
outpoint := *domainmessage.NewOutpoint(tx.TxID(), uint32(i))
|
||||
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
|
||||
entry := NewUTXOEntry(txOut, isCoinbase, blockBlueScore)
|
||||
|
||||
err := dus.UTXODiff.AddEntry(outpoint, entry)
|
||||
@@ -578,9 +789,9 @@ func (dus *DiffUTXOSet) appendTx(tx *domainmessage.MsgTx, blockBlueScore uint64)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) containsInputs(tx *domainmessage.MsgTx) bool {
|
||||
func (dus *DiffUTXOSet) containsInputs(tx *wire.MsgTx) bool {
|
||||
for _, txIn := range tx.TxIn {
|
||||
outpoint := *domainmessage.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
|
||||
isInBase := dus.base.contains(outpoint)
|
||||
isInDiffToAdd := dus.UTXODiff.toAdd.contains(outpoint)
|
||||
isInDiffToRemove := dus.UTXODiff.toRemove.contains(outpoint)
|
||||
@@ -605,12 +816,31 @@ func (dus *DiffUTXOSet) meldToBase() error {
|
||||
for outpoint, utxoEntry := range dus.UTXODiff.toAdd {
|
||||
dus.base.add(outpoint, utxoEntry)
|
||||
}
|
||||
dus.UTXODiff = NewUTXODiff()
|
||||
|
||||
if dus.UTXODiff.useMultiset {
|
||||
dus.base.UTXOMultiset = dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
|
||||
}
|
||||
|
||||
if dus.UTXODiff.useMultiset {
|
||||
dus.UTXODiff = NewUTXODiff()
|
||||
} else {
|
||||
dus.UTXODiff = NewUTXODiffWithoutMultiset()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// diffFromTx returns a diff that is equivalent to provided transaction,
|
||||
// or an error if provided transaction is not valid in the context of this UTXOSet
|
||||
func (dus *DiffUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
return diffFromTx(dus, tx, acceptingBlueScore)
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
|
||||
return diffFromAcceptedTx(dus, tx, acceptingBlueScore)
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) String() string {
|
||||
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove)
|
||||
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s, Multiset-Hash:%s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove, dus.Multiset().Hash())
|
||||
}
|
||||
|
||||
// clone returns a clone of this UTXO Set
|
||||
@@ -618,15 +848,9 @@ func (dus *DiffUTXOSet) clone() UTXOSet {
|
||||
return NewDiffUTXOSet(dus.base.clone().(*FullUTXOSet), dus.UTXODiff.clone())
|
||||
}
|
||||
|
||||
// cloneWithoutBase returns a *DiffUTXOSet with same
|
||||
// base as this *DiffUTXOSet and a cloned diff.
|
||||
func (dus *DiffUTXOSet) cloneWithoutBase() UTXOSet {
|
||||
return NewDiffUTXOSet(dus.base, dus.UTXODiff.clone())
|
||||
}
|
||||
|
||||
// Get returns the UTXOEntry associated with provided outpoint in this UTXOSet.
|
||||
// Returns false in second output if this UTXOEntry was not found
|
||||
func (dus *DiffUTXOSet) Get(outpoint domainmessage.Outpoint) (*UTXOEntry, bool) {
|
||||
func (dus *DiffUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
|
||||
if toRemoveEntry, ok := dus.UTXODiff.toRemove.get(outpoint); ok {
|
||||
// An exception is made for entries with unequal blue scores
|
||||
// These are just "updates" to accepted blue score
|
||||
@@ -641,3 +865,42 @@ func (dus *DiffUTXOSet) Get(outpoint domainmessage.Outpoint) (*UTXOEntry, bool)
|
||||
txOut, ok := dus.UTXODiff.toAdd.get(outpoint)
|
||||
return txOut, ok
|
||||
}
|
||||
|
||||
// Multiset returns the ecmh-Multiset of this utxoSet
|
||||
func (dus *DiffUTXOSet) Multiset() *ecc.Multiset {
|
||||
return dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
|
||||
}
|
||||
|
||||
// WithTransactions returns a new UTXO Set with the added transactions.
|
||||
//
|
||||
// If dus.UTXODiff.useMultiset is true, this function MUST be
|
||||
// called with the DAG lock held.
|
||||
func (dus *DiffUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
|
||||
diffSet := NewDiffUTXOSet(dus.base, dus.UTXODiff.clone())
|
||||
for _, tx := range transactions {
|
||||
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ignoreDoubleSpends && !isAccepted {
|
||||
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
|
||||
}
|
||||
}
|
||||
return UTXOSet(diffSet), nil
|
||||
}
|
||||
|
||||
func addUTXOToMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
|
||||
utxoMS, err := utxoMultiset(entry, outpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ms.Union(utxoMS), nil
|
||||
}
|
||||
|
||||
func removeUTXOFromMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
|
||||
utxoMS, err := utxoMultiset(entry, outpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ms.Subtract(utxoMS), nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,16 +10,14 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -47,7 +45,7 @@ const (
|
||||
|
||||
// isNullOutpoint determines whether or not a previous transaction outpoint
|
||||
// is set.
|
||||
func isNullOutpoint(outpoint *domainmessage.Outpoint) bool {
|
||||
func isNullOutpoint(outpoint *wire.Outpoint) bool {
|
||||
if outpoint.Index == math.MaxUint32 && outpoint.TxID == daghash.ZeroTxID {
|
||||
return true
|
||||
}
|
||||
@@ -58,12 +56,12 @@ func isNullOutpoint(outpoint *domainmessage.Outpoint) bool {
|
||||
// met, meaning that all the inputs of a given transaction have reached a
|
||||
// blue score or time sufficient for their relative lock-time maturity.
|
||||
func SequenceLockActive(sequenceLock *SequenceLock, blockBlueScore uint64,
|
||||
medianTimePast mstime.Time) bool {
|
||||
medianTimePast time.Time) bool {
|
||||
|
||||
// If either the milliseconds, or blue score relative-lock time has not yet
|
||||
// If either the seconds, or blue score relative-lock time has not yet
|
||||
// reached, then the transaction is not yet mature according to its
|
||||
// sequence locks.
|
||||
if sequenceLock.Milliseconds >= medianTimePast.UnixMilliseconds() ||
|
||||
if sequenceLock.Seconds >= medianTimePast.Unix() ||
|
||||
sequenceLock.BlockBlueScore >= int64(blockBlueScore) {
|
||||
return false
|
||||
}
|
||||
@@ -72,7 +70,7 @@ func SequenceLockActive(sequenceLock *SequenceLock, blockBlueScore uint64,
|
||||
}
|
||||
|
||||
// IsFinalizedTransaction determines whether or not a transaction is finalized.
|
||||
func IsFinalizedTransaction(tx *util.Tx, blockBlueScore uint64, blockTime mstime.Time) bool {
|
||||
func IsFinalizedTransaction(tx *util.Tx, blockBlueScore uint64, blockTime time.Time) bool {
|
||||
msgTx := tx.MsgTx()
|
||||
|
||||
// Lock time of zero means the transaction is finalized.
|
||||
@@ -89,7 +87,7 @@ func IsFinalizedTransaction(tx *util.Tx, blockBlueScore uint64, blockTime mstime
|
||||
if lockTime < txscript.LockTimeThreshold {
|
||||
blockTimeOrBlueScore = int64(blockBlueScore)
|
||||
} else {
|
||||
blockTimeOrBlueScore = blockTime.UnixMilliseconds()
|
||||
blockTimeOrBlueScore = blockTime.Unix()
|
||||
}
|
||||
if int64(lockTime) < blockTimeOrBlueScore {
|
||||
return true
|
||||
@@ -135,6 +133,15 @@ func CheckTransactionSanity(tx *util.Tx, subnetworkID *subnetworkid.SubnetworkID
|
||||
return ruleError(ErrNoTxInputs, "transaction has no inputs")
|
||||
}
|
||||
|
||||
// A transaction must not exceed the maximum allowed block mass when
|
||||
// serialized.
|
||||
serializedTxSize := msgTx.SerializeSize()
|
||||
if serializedTxSize*MassPerTxByte > wire.MaxMassPerTx {
|
||||
str := fmt.Sprintf("serialized transaction is too big - got "+
|
||||
"%d, max %d", serializedTxSize, wire.MaxMassPerBlock)
|
||||
return ruleError(ErrTxMassTooHigh, str)
|
||||
}
|
||||
|
||||
// Ensure the transaction amounts are in range. Each transaction
|
||||
// output must not be negative or more than the max allowed per
|
||||
// transaction. Also, the total of all outputs must abide by the same
|
||||
@@ -172,7 +179,7 @@ func CheckTransactionSanity(tx *util.Tx, subnetworkID *subnetworkid.SubnetworkID
|
||||
}
|
||||
|
||||
// Check for duplicate transaction inputs.
|
||||
existingTxOut := make(map[domainmessage.Outpoint]struct{})
|
||||
existingTxOut := make(map[wire.Outpoint]struct{})
|
||||
for _, txIn := range msgTx.TxIn {
|
||||
if _, exists := existingTxOut[txIn.PreviousOutpoint]; exists {
|
||||
return ruleError(ErrDuplicateTxInputs, "transaction "+
|
||||
@@ -213,7 +220,10 @@ func CheckTransactionSanity(tx *util.Tx, subnetworkID *subnetworkid.SubnetworkID
|
||||
}
|
||||
|
||||
// Transactions in native, registry and coinbase subnetworks must have Gas = 0
|
||||
if msgTx.SubnetworkID.IsBuiltInOrNative() && msgTx.Gas > 0 {
|
||||
if (msgTx.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) ||
|
||||
msgTx.SubnetworkID.IsBuiltIn()) &&
|
||||
msgTx.Gas > 0 {
|
||||
|
||||
return ruleError(ErrInvalidGas, "transaction in the native or "+
|
||||
"registry subnetworks has gas > 0 ")
|
||||
}
|
||||
@@ -253,7 +263,7 @@ func CheckTransactionSanity(tx *util.Tx, subnetworkID *subnetworkid.SubnetworkID
|
||||
// The flags modify the behavior of this function as follows:
|
||||
// - BFNoPoWCheck: The check to ensure the block hash is less than the target
|
||||
// difficulty is not performed.
|
||||
func (dag *BlockDAG) checkProofOfWork(header *domainmessage.BlockHeader, flags BehaviorFlags) error {
|
||||
func (dag *BlockDAG) checkProofOfWork(header *wire.BlockHeader, flags BehaviorFlags) error {
|
||||
// The target difficulty must be larger than zero.
|
||||
target := util.CompactToBig(header.Bits)
|
||||
if target.Sign() <= 0 {
|
||||
@@ -263,9 +273,9 @@ func (dag *BlockDAG) checkProofOfWork(header *domainmessage.BlockHeader, flags B
|
||||
}
|
||||
|
||||
// The target difficulty must be less than the maximum allowed.
|
||||
if target.Cmp(dag.Params.PowMax) > 0 {
|
||||
if target.Cmp(dag.dagParams.PowMax) > 0 {
|
||||
str := fmt.Sprintf("block target difficulty of %064x is "+
|
||||
"higher than max of %064x", target, dag.Params.PowMax)
|
||||
"higher than max of %064x", target, dag.dagParams.PowMax)
|
||||
return ruleError(ErrUnexpectedDifficulty, str)
|
||||
}
|
||||
|
||||
@@ -293,9 +303,9 @@ func ValidateTxMass(tx *util.Tx, utxoSet UTXOSet) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if txMass > domainmessage.MaxMassPerBlock {
|
||||
if txMass > wire.MaxMassPerBlock {
|
||||
str := fmt.Sprintf("tx %s has mass %d, which is above the "+
|
||||
"allowed limit of %d", tx.ID(), txMass, domainmessage.MaxMassPerBlock)
|
||||
"allowed limit of %d", tx.ID(), txMass, wire.MaxMassPerBlock)
|
||||
return ruleError(ErrTxMassTooHigh, str)
|
||||
}
|
||||
return nil
|
||||
@@ -319,9 +329,9 @@ func CalcBlockMass(pastUTXO UTXOSet, transactions []*util.Tx) (uint64, error) {
|
||||
|
||||
// We could potentially overflow the accumulator so check for
|
||||
// overflow as well.
|
||||
if totalMass < txMass || totalMass > domainmessage.MaxMassPerBlock {
|
||||
if totalMass < txMass || totalMass > wire.MaxMassPerBlock {
|
||||
str := fmt.Sprintf("block has total mass %d, which is "+
|
||||
"above the allowed limit of %d", totalMass, domainmessage.MaxMassPerBlock)
|
||||
"above the allowed limit of %d", totalMass, wire.MaxMassPerBlock)
|
||||
return 0, ruleError(ErrBlockMassTooHigh, str)
|
||||
}
|
||||
}
|
||||
@@ -390,18 +400,17 @@ func CalcTxMass(tx *util.Tx, previousScriptPubKeys [][]byte) uint64 {
|
||||
//
|
||||
// The flags do not modify the behavior of this function directly, however they
|
||||
// are needed to pass along to checkProofOfWork.
|
||||
func (dag *BlockDAG) checkBlockHeaderSanity(block *util.Block, flags BehaviorFlags) (delay time.Duration, err error) {
|
||||
func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags BehaviorFlags) (delay time.Duration, err error) {
|
||||
// Ensure the proof of work bits in the block header is in min/max range
|
||||
// and the block hash is less than the target value described by the
|
||||
// bits.
|
||||
header := &block.MsgBlock().Header
|
||||
err = dag.checkProofOfWork(header, flags)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(header.ParentHashes) == 0 {
|
||||
if !header.BlockHash().IsEqual(dag.Params.GenesisHash) {
|
||||
if !header.BlockHash().IsEqual(dag.dagParams.GenesisHash) {
|
||||
return 0, ruleError(ErrNoParents, "block has no parents")
|
||||
}
|
||||
} else {
|
||||
@@ -411,11 +420,23 @@ func (dag *BlockDAG) checkBlockHeaderSanity(block *util.Block, flags BehaviorFla
|
||||
}
|
||||
}
|
||||
|
||||
// A block timestamp must not have a greater precision than one second.
|
||||
// This check is necessary because Go time.Time values support
|
||||
// nanosecond precision whereas the consensus rules only apply to
|
||||
// seconds and it's much nicer to deal with standard Go time values
|
||||
// instead of converting to seconds everywhere.
|
||||
if !header.Timestamp.Equal(time.Unix(header.Timestamp.Unix(), 0)) {
|
||||
str := fmt.Sprintf("block timestamp of %s has a higher "+
|
||||
"precision than one second", header.Timestamp)
|
||||
return 0, ruleError(ErrInvalidTime, str)
|
||||
}
|
||||
|
||||
// Ensure the block time is not too far in the future. If it's too far, return
|
||||
// the duration of time that should be waited before the block becomes valid.
|
||||
// This check needs to be last as it does not return an error but rather marks the
|
||||
// header as delayed (and valid).
|
||||
maxTimestamp := dag.Now().Add(time.Duration(dag.TimestampDeviationTolerance) * dag.Params.TargetTimePerBlock)
|
||||
maxTimestamp := dag.AdjustedTime().Add(time.Second *
|
||||
time.Duration(int64(dag.TimestampDeviationTolerance)*dag.targetTimePerBlock))
|
||||
if header.Timestamp.After(maxTimestamp) {
|
||||
return header.Timestamp.Sub(maxTimestamp), nil
|
||||
}
|
||||
@@ -424,10 +445,10 @@ func (dag *BlockDAG) checkBlockHeaderSanity(block *util.Block, flags BehaviorFla
|
||||
}
|
||||
|
||||
//checkBlockParentsOrder ensures that the block's parents are ordered by hash
|
||||
func checkBlockParentsOrder(header *domainmessage.BlockHeader) error {
|
||||
sortedHashes := make([]*daghash.Hash, header.NumParentBlocks())
|
||||
for i, hash := range header.ParentHashes {
|
||||
sortedHashes[i] = hash
|
||||
func checkBlockParentsOrder(header *wire.BlockHeader) error {
|
||||
sortedHashes := make([]*daghash.Hash, 0, header.NumParentBlocks())
|
||||
for _, hash := range header.ParentHashes {
|
||||
sortedHashes = append(sortedHashes, hash)
|
||||
}
|
||||
sort.Slice(sortedHashes, func(i, j int) bool {
|
||||
return daghash.Less(sortedHashes[i], sortedHashes[j])
|
||||
@@ -444,182 +465,87 @@ func checkBlockParentsOrder(header *domainmessage.BlockHeader) error {
|
||||
// The flags do not modify the behavior of this function directly, however they
|
||||
// are needed to pass along to checkBlockHeaderSanity.
|
||||
func (dag *BlockDAG) checkBlockSanity(block *util.Block, flags BehaviorFlags) (time.Duration, error) {
|
||||
delay, err := dag.checkBlockHeaderSanity(block, flags)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockContainsAtLeastOneTransaction(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockContainsLessThanMaxBlockMassTransactions(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkFirstBlockTransactionIsCoinbase(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockContainsOnlyOneCoinbase(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockTransactionOrder(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkNoNonNativeTransactions(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockTransactionSanity(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dag.checkBlockHashMerkleRoot(block)
|
||||
msgBlock := block.MsgBlock()
|
||||
header := &msgBlock.Header
|
||||
delay, err := dag.checkBlockHeaderSanity(header, flags)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// The following check will be fairly quick since the transaction IDs
|
||||
// are already cached due to building the merkle tree above.
|
||||
err = dag.checkBlockDuplicateTransactions(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = dag.checkBlockDoubleSpends(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return delay, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockContainsAtLeastOneTransaction(block *util.Block) error {
|
||||
transactions := block.Transactions()
|
||||
numTx := len(transactions)
|
||||
// A block must have at least one transaction.
|
||||
numTx := len(msgBlock.Transactions)
|
||||
if numTx == 0 {
|
||||
return ruleError(ErrNoTransactions, "block does not contain "+
|
||||
return 0, ruleError(ErrNoTransactions, "block does not contain "+
|
||||
"any transactions")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockContainsLessThanMaxBlockMassTransactions(block *util.Block) error {
|
||||
// A block must not have more transactions than the max block mass or
|
||||
// else it is certainly over the block mass limit.
|
||||
transactions := block.Transactions()
|
||||
numTx := len(transactions)
|
||||
if numTx > domainmessage.MaxMassPerBlock {
|
||||
if numTx > wire.MaxMassPerBlock {
|
||||
str := fmt.Sprintf("block contains too many transactions - "+
|
||||
"got %d, max %d", numTx, domainmessage.MaxMassPerBlock)
|
||||
return ruleError(ErrBlockMassTooHigh, str)
|
||||
"got %d, max %d", numTx, wire.MaxMassPerBlock)
|
||||
return 0, ruleError(ErrBlockMassTooHigh, str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkFirstBlockTransactionIsCoinbase(block *util.Block) error {
|
||||
// The first transaction in a block must be a coinbase.
|
||||
transactions := block.Transactions()
|
||||
if !transactions[util.CoinbaseTransactionIndex].IsCoinBase() {
|
||||
return ruleError(ErrFirstTxNotCoinbase, "first transaction in "+
|
||||
return 0, ruleError(ErrFirstTxNotCoinbase, "first transaction in "+
|
||||
"block is not a coinbase")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockContainsOnlyOneCoinbase(block *util.Block) error {
|
||||
transactions := block.Transactions()
|
||||
for i, tx := range transactions[util.CoinbaseTransactionIndex+1:] {
|
||||
txOffset := util.CoinbaseTransactionIndex + 1
|
||||
|
||||
// A block must not have more than one coinbase. And transactions must be
|
||||
// ordered by subnetwork
|
||||
for i, tx := range transactions[txOffset:] {
|
||||
if tx.IsCoinBase() {
|
||||
str := fmt.Sprintf("block contains second coinbase at "+
|
||||
"index %d", i+2)
|
||||
return ruleError(ErrMultipleCoinbases, str)
|
||||
return 0, ruleError(ErrMultipleCoinbases, str)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockTransactionOrder(block *util.Block) error {
|
||||
transactions := block.Transactions()
|
||||
for i, tx := range transactions[util.CoinbaseTransactionIndex+1:] {
|
||||
if i != 0 && subnetworkid.Less(&tx.MsgTx().SubnetworkID, &transactions[i].MsgTx().SubnetworkID) {
|
||||
return ruleError(ErrTransactionsNotSorted, "transactions must be sorted by subnetwork")
|
||||
return 0, ruleError(ErrTransactionsNotSorted, "transactions must be sorted by subnetwork")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkNoNonNativeTransactions(block *util.Block) error {
|
||||
// Disallow non-native/coinbase subnetworks in networks that don't allow them
|
||||
if !dag.Params.EnableNonNativeSubnetworks {
|
||||
transactions := block.Transactions()
|
||||
for _, tx := range transactions {
|
||||
if !(tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) ||
|
||||
tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)) {
|
||||
return ruleError(ErrInvalidSubnetwork, "non-native/coinbase subnetworks are not allowed")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockTransactionSanity(block *util.Block) error {
|
||||
transactions := block.Transactions()
|
||||
// Do some preliminary checks on each transaction to ensure they are
|
||||
// sane before continuing.
|
||||
for _, tx := range transactions {
|
||||
err := CheckTransactionSanity(tx, dag.subnetworkID)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockHashMerkleRoot(block *util.Block) error {
|
||||
// Build merkle tree and ensure the calculated merkle root matches the
|
||||
// entry in the block header. This also has the effect of caching all
|
||||
// of the transaction hashes in the block to speed up future hash
|
||||
// checks.
|
||||
hashMerkleTree := BuildHashMerkleTreeStore(block.Transactions())
|
||||
calculatedHashMerkleRoot := hashMerkleTree.Root()
|
||||
if !block.MsgBlock().Header.HashMerkleRoot.IsEqual(calculatedHashMerkleRoot) {
|
||||
if !header.HashMerkleRoot.IsEqual(calculatedHashMerkleRoot) {
|
||||
str := fmt.Sprintf("block hash merkle root is invalid - block "+
|
||||
"header indicates %s, but calculated value is %s",
|
||||
block.MsgBlock().Header.HashMerkleRoot, calculatedHashMerkleRoot)
|
||||
return ruleError(ErrBadMerkleRoot, str)
|
||||
header.HashMerkleRoot, calculatedHashMerkleRoot)
|
||||
return 0, ruleError(ErrBadMerkleRoot, str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockDuplicateTransactions(block *util.Block) error {
|
||||
// Check for duplicate transactions. This check will be fairly quick
|
||||
// since the transaction IDs are already cached due to building the
|
||||
// merkle tree above.
|
||||
existingTxIDs := make(map[daghash.TxID]struct{})
|
||||
transactions := block.Transactions()
|
||||
for _, tx := range transactions {
|
||||
id := tx.ID()
|
||||
if _, exists := existingTxIDs[*id]; exists {
|
||||
str := fmt.Sprintf("block contains duplicate "+
|
||||
"transaction %s", id)
|
||||
return ruleError(ErrDuplicateTx, str)
|
||||
return 0, ruleError(ErrDuplicateTx, str)
|
||||
}
|
||||
existingTxIDs[*id] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockDoubleSpends(block *util.Block) error {
|
||||
usedOutpoints := make(map[domainmessage.Outpoint]*daghash.TxID)
|
||||
transactions := block.Transactions()
|
||||
for _, tx := range transactions {
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
if spendingTxID, exists := usedOutpoints[txIn.PreviousOutpoint]; exists {
|
||||
str := fmt.Sprintf("transaction %s spends "+
|
||||
"outpoint %s that was already spent by "+
|
||||
"transaction %s in this block", tx.ID(), txIn.PreviousOutpoint, spendingTxID)
|
||||
return ruleError(ErrDoubleSpendInSameBlock, str)
|
||||
}
|
||||
usedOutpoints[txIn.PreviousOutpoint] = tx.ID()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return delay, nil
|
||||
}
|
||||
|
||||
// checkBlockHeaderContext performs several validation checks on the block header
|
||||
@@ -629,7 +555,7 @@ func (dag *BlockDAG) checkBlockDoubleSpends(block *util.Block) error {
|
||||
// - BFFastAdd: No checks are performed.
|
||||
//
|
||||
// This function MUST be called with the dag state lock held (for writes).
|
||||
func (dag *BlockDAG) checkBlockHeaderContext(header *domainmessage.BlockHeader, bluestParent *blockNode, fastAdd bool) error {
|
||||
func (dag *BlockDAG) checkBlockHeaderContext(header *wire.BlockHeader, bluestParent *blockNode, fastAdd bool) error {
|
||||
if !fastAdd {
|
||||
if err := dag.validateDifficulty(header, bluestParent); err != nil {
|
||||
return err
|
||||
@@ -642,7 +568,7 @@ func (dag *BlockDAG) checkBlockHeaderContext(header *domainmessage.BlockHeader,
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateMedianTime(dag *BlockDAG, header *domainmessage.BlockHeader, bluestParent *blockNode) error {
|
||||
func validateMedianTime(dag *BlockDAG, header *wire.BlockHeader, bluestParent *blockNode) error {
|
||||
if !header.IsGenesis() {
|
||||
// Ensure the timestamp for the block header is not before the
|
||||
// median time of the last several blocks (medianTimeBlocks).
|
||||
@@ -656,7 +582,7 @@ func validateMedianTime(dag *BlockDAG, header *domainmessage.BlockHeader, bluest
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) validateDifficulty(header *domainmessage.BlockHeader, bluestParent *blockNode) error {
|
||||
func (dag *BlockDAG) validateDifficulty(header *wire.BlockHeader, bluestParent *blockNode) error {
|
||||
// Ensure the difficulty specified in the block header matches
|
||||
// the calculated difficulty based on the previous block and
|
||||
// difficulty retarget rules.
|
||||
@@ -672,32 +598,41 @@ func (dag *BlockDAG) validateDifficulty(header *domainmessage.BlockHeader, blues
|
||||
}
|
||||
|
||||
// validateParents validates that no parent is an ancestor of another parent, and no parent is finalized
|
||||
func (dag *BlockDAG) validateParents(blockHeader *domainmessage.BlockHeader, parents blockSet) error {
|
||||
for parentA := range parents {
|
||||
func validateParents(blockHeader *wire.BlockHeader, parents blockSet) error {
|
||||
minBlueScore := uint64(math.MaxUint64)
|
||||
queue := newDownHeap()
|
||||
visited := newSet()
|
||||
for parent := range parents {
|
||||
// isFinalized might be false-negative because node finality status is
|
||||
// updated in a separate goroutine. This is why later the block is
|
||||
// checked more thoroughly on the finality rules in dag.checkFinalityViolation.
|
||||
if parentA.isFinalized {
|
||||
return ruleError(ErrFinality, fmt.Sprintf("block %s is a finalized "+
|
||||
"parent of block %s", parentA.hash, blockHeader.BlockHash()))
|
||||
// checked more thoroughly on the finality rules in dag.checkFinalityRules.
|
||||
if parent.isFinalized {
|
||||
return ruleError(ErrFinality, fmt.Sprintf("block %s is a finalized parent of block %s", parent.hash, blockHeader.BlockHash()))
|
||||
}
|
||||
|
||||
for parentB := range parents {
|
||||
if parentA == parentB {
|
||||
continue
|
||||
if parent.blueScore < minBlueScore {
|
||||
minBlueScore = parent.blueScore
|
||||
}
|
||||
for grandParent := range parent.parents {
|
||||
if !visited.contains(grandParent) {
|
||||
queue.Push(grandParent)
|
||||
visited.add(grandParent)
|
||||
}
|
||||
|
||||
isAncestorOf, err := dag.isInPast(parentA, parentB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isAncestorOf {
|
||||
return ruleError(ErrInvalidParentsRelation, fmt.Sprintf("block %s is both a parent of %s and an"+
|
||||
" ancestor of another parent %s",
|
||||
parentA.hash,
|
||||
blockHeader.BlockHash(),
|
||||
parentB.hash,
|
||||
))
|
||||
}
|
||||
}
|
||||
for queue.Len() > 0 {
|
||||
current := queue.pop()
|
||||
if parents.contains(current) {
|
||||
return ruleError(ErrInvalidParentsRelation, fmt.Sprintf("block %s is both a parent of %s and an"+
|
||||
" ancestor of another parent",
|
||||
current.hash,
|
||||
blockHeader.BlockHash()))
|
||||
}
|
||||
if current.blueScore > minBlueScore {
|
||||
for parent := range current.parents {
|
||||
if !visited.contains(parent) {
|
||||
queue.Push(parent)
|
||||
visited.add(parent)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -719,7 +654,7 @@ func (dag *BlockDAG) checkBlockContext(block *util.Block, parents blockSet, flag
|
||||
bluestParent := parents.bluest()
|
||||
fastAdd := flags&BFFastAdd == BFFastAdd
|
||||
|
||||
err := dag.validateParents(&block.MsgBlock().Header, parents)
|
||||
err := validateParents(&block.MsgBlock().Header, parents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -763,9 +698,9 @@ func (dag *BlockDAG) validateAllTxsFinalized(block *util.Block, node *blockNode,
|
||||
func ensureNoDuplicateTx(utxoSet UTXOSet, transactions []*util.Tx) error {
|
||||
// Fetch utxos for all of the transaction ouputs in this block.
|
||||
// Typically, there will not be any utxos for any of the outputs.
|
||||
fetchSet := make(map[domainmessage.Outpoint]struct{})
|
||||
fetchSet := make(map[wire.Outpoint]struct{})
|
||||
for _, tx := range transactions {
|
||||
prevOut := domainmessage.Outpoint{TxID: *tx.ID()}
|
||||
prevOut := wire.Outpoint{TxID: *tx.ID()}
|
||||
for txOutIdx := range tx.MsgTx().TxOut {
|
||||
prevOut.Index = uint32(txOutIdx)
|
||||
fetchSet[prevOut] = struct{}{}
|
||||
@@ -872,7 +807,7 @@ func CheckTransactionInputsAndCalulateFee(tx *util.Tx, txBlueScore uint64, utxoS
|
||||
return txFeeInSompi, nil
|
||||
}
|
||||
|
||||
func validateCoinbaseMaturity(dagParams *dagconfig.Params, entry *UTXOEntry, txBlueScore uint64, txIn *domainmessage.TxIn) error {
|
||||
func validateCoinbaseMaturity(dagParams *dagconfig.Params, entry *UTXOEntry, txBlueScore uint64, txIn *wire.TxIn) error {
|
||||
// Ensure the transaction is not spending coins which have not
|
||||
// yet reached the required coinbase maturity.
|
||||
if entry.IsCoinbase() {
|
||||
@@ -912,11 +847,6 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = checkDoubleSpendsWithBlockPast(pastUTXO, transactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validateBlockMass(pastUTXO, transactions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -936,7 +866,7 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
|
||||
|
||||
for _, tx := range transactions {
|
||||
txFee, err := CheckTransactionInputsAndCalulateFee(tx, block.blueScore, pastUTXO,
|
||||
dag.Params, fastAdd)
|
||||
dag.dagParams, fastAdd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -992,7 +922,7 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
|
||||
|
||||
// Now that the inexpensive checks are done and have passed, verify the
|
||||
// transactions are actually allowed to spend the coins by running the
|
||||
// expensive SCHNORR signature check scripts. Doing this last helps
|
||||
// expensive ECDSA signature check scripts. Doing this last helps
|
||||
// prevent CPU exhaustion attacks.
|
||||
err := checkBlockScripts(block, pastUTXO, transactions, scriptFlags, dag.sigCache)
|
||||
if err != nil {
|
||||
|
||||
@@ -10,51 +10,47 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// TestSequenceLocksActive tests the SequenceLockActive function to ensure it
|
||||
// works as expected in all possible combinations/scenarios.
|
||||
func TestSequenceLocksActive(t *testing.T) {
|
||||
seqLock := func(blueScore int64, milliseconds int64) *SequenceLock {
|
||||
seqLock := func(h int64, s int64) *SequenceLock {
|
||||
return &SequenceLock{
|
||||
Milliseconds: milliseconds,
|
||||
BlockBlueScore: blueScore,
|
||||
Seconds: s,
|
||||
BlockBlueScore: h,
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
seqLock *SequenceLock
|
||||
blockBlueScore uint64
|
||||
mtp mstime.Time
|
||||
mtp time.Time
|
||||
|
||||
want bool
|
||||
}{
|
||||
// Block based sequence lock with equal block blue score.
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 1001, mtp: mstime.UnixMilliseconds(9), want: true},
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 1001, mtp: time.Unix(9, 0), want: true},
|
||||
|
||||
// Time based sequence lock with mtp past the absolute time.
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: mstime.UnixMilliseconds(31), want: true},
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: time.Unix(31, 0), want: true},
|
||||
|
||||
// Block based sequence lock with current blue score below seq lock block blue score.
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 90, mtp: mstime.UnixMilliseconds(9), want: false},
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 90, mtp: time.Unix(9, 0), want: false},
|
||||
|
||||
// Time based sequence lock with current time before lock time.
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: mstime.UnixMilliseconds(29), want: false},
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: time.Unix(29, 0), want: false},
|
||||
|
||||
// Block based sequence lock at the same blue score, so shouldn't yet be active.
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 1000, mtp: mstime.UnixMilliseconds(9), want: false},
|
||||
{seqLock: seqLock(1000, -1), blockBlueScore: 1000, mtp: time.Unix(9, 0), want: false},
|
||||
|
||||
// Time based sequence lock with current time equal to lock time, so shouldn't yet be active.
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: mstime.UnixMilliseconds(30), want: false},
|
||||
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: time.Unix(30, 0), want: false},
|
||||
}
|
||||
|
||||
t.Logf("Running %d sequence locks tests", len(tests))
|
||||
@@ -72,7 +68,7 @@ func TestSequenceLocksActive(t *testing.T) {
|
||||
// ensure it fails.
|
||||
func TestCheckConnectBlockTemplate(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -127,6 +123,12 @@ func TestCheckConnectBlockTemplate(t *testing.T) {
|
||||
"block 4: %v", err)
|
||||
}
|
||||
|
||||
blockNode3 := dag.index.LookupNode(blocks[3].Hash())
|
||||
blockNode4 := dag.index.LookupNode(blocks[4].Hash())
|
||||
if blockNode3.children.contains(blockNode4) {
|
||||
t.Errorf("Block 4 wasn't successfully detached as a child from block3")
|
||||
}
|
||||
|
||||
// Block 3a should connect even though it does not build on dag tips.
|
||||
err = dag.CheckConnectBlockTemplateNoLock(blocks[5])
|
||||
if err != nil {
|
||||
@@ -158,7 +160,7 @@ func TestCheckConnectBlockTemplate(t *testing.T) {
|
||||
// as expected.
|
||||
func TestCheckBlockSanity(t *testing.T) {
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -166,7 +168,6 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
return
|
||||
}
|
||||
defer teardownFunc()
|
||||
dag.timeSource = newFakeTimeSource(mstime.Now())
|
||||
|
||||
block := util.NewBlock(&Block100000)
|
||||
if len(block.Transactions()) < 3 {
|
||||
@@ -185,19 +186,30 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Errorf("CheckBlockSanity: transactions disorder is not detected")
|
||||
}
|
||||
var ruleErr RuleError
|
||||
if !errors.As(err, &ruleErr) {
|
||||
ruleErr, ok := err.(RuleError)
|
||||
if !ok {
|
||||
t.Errorf("CheckBlockSanity: wrong error returned, expect RuleError, got %T", err)
|
||||
} else if ruleErr.ErrorCode != ErrTransactionsNotSorted {
|
||||
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got"+
|
||||
" %v, err %s", ruleErr.ErrorCode, err)
|
||||
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got %v, err %s", ruleErr.ErrorCode, err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Errorf("CheckBlockSanity: unexpected return %s delay", delay)
|
||||
}
|
||||
|
||||
var invalidParentsOrderBlock = domainmessage.MsgBlock{
|
||||
Header: domainmessage.BlockHeader{
|
||||
// Ensure a block that has a timestamp with a precision higher than one
|
||||
// second fails.
|
||||
timestamp := block.MsgBlock().Header.Timestamp
|
||||
block.MsgBlock().Header.Timestamp = timestamp.Add(time.Nanosecond)
|
||||
delay, err = dag.checkBlockSanity(block, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("CheckBlockSanity: error is nil when it shouldn't be")
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Errorf("CheckBlockSanity: unexpected return %s delay", delay)
|
||||
}
|
||||
|
||||
var invalidParentsOrderBlock = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
Version: 0x10000000,
|
||||
ParentHashes: []*daghash.Hash{
|
||||
{
|
||||
@@ -231,16 +243,16 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
0x4e, 0x06, 0xba, 0x64, 0xd7, 0x61, 0xda, 0x25,
|
||||
0x1a, 0x0e, 0x21, 0xd4, 0x64, 0x49, 0x02, 0xa2,
|
||||
},
|
||||
Timestamp: mstime.UnixMilliseconds(0x5cd18053000),
|
||||
Timestamp: time.Unix(0x5cd18053, 0),
|
||||
Bits: 0x207fffff,
|
||||
Nonce: 0x1,
|
||||
},
|
||||
Transactions: []*domainmessage.MsgTx{
|
||||
Transactions: []*wire.MsgTx{
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
@@ -252,7 +264,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0x12a05f200, // 5000000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -265,9 +277,9 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
|
||||
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
|
||||
@@ -302,7 +314,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0x2123e300, // 556000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -335,9 +347,9 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
|
||||
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
|
||||
@@ -371,7 +383,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -404,9 +416,9 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73,
|
||||
0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac,
|
||||
@@ -441,7 +453,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -467,10 +479,8 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Errorf("CheckBlockSanity: error is nil when it shouldn't be")
|
||||
}
|
||||
var rError RuleError
|
||||
if !errors.As(err, &rError) {
|
||||
t.Fatalf("CheckBlockSanity: expected a RuleError, but got %s", err)
|
||||
} else if rError.ErrorCode != ErrWrongParentsOrder {
|
||||
rError := err.(RuleError)
|
||||
if rError.ErrorCode != ErrWrongParentsOrder {
|
||||
t.Errorf("CheckBlockSanity: Expected error was ErrWrongParentsOrder but got %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
@@ -479,8 +489,8 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
|
||||
blockInTheFuture := Block100000
|
||||
expectedDelay := 10 * time.Second
|
||||
deviationTolerance := time.Duration(dag.TimestampDeviationTolerance) * dag.Params.TargetTimePerBlock
|
||||
blockInTheFuture.Header.Timestamp = dag.Now().Add(deviationTolerance + expectedDelay)
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
blockInTheFuture.Header.Timestamp = now.Add(time.Duration(dag.TimestampDeviationTolerance)*time.Second + expectedDelay)
|
||||
delay, err = dag.checkBlockSanity(util.NewBlock(&blockInTheFuture), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("CheckBlockSanity: %v", err)
|
||||
@@ -499,14 +509,14 @@ func TestPastMedianTime(t *testing.T) {
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
blockTime = blockTime.Add(time.Second)
|
||||
tip = newTestNode(dag, blockSetFromSlice(tip),
|
||||
tip = newTestNode(dag, setFromSlice(tip),
|
||||
blockVersion,
|
||||
0,
|
||||
blockTime)
|
||||
}
|
||||
|
||||
// Checks that a block is valid if it has timestamp equals to past median time
|
||||
node := newTestNode(dag, blockSetFromSlice(tip),
|
||||
node := newTestNode(dag, setFromSlice(tip),
|
||||
blockVersion,
|
||||
dag.powMaxBits,
|
||||
tip.PastMedianTime(dag))
|
||||
@@ -519,7 +529,7 @@ func TestPastMedianTime(t *testing.T) {
|
||||
}
|
||||
|
||||
// Checks that a block is valid if its timestamp is after past median time
|
||||
node = newTestNode(dag, blockSetFromSlice(tip),
|
||||
node = newTestNode(dag, setFromSlice(tip),
|
||||
blockVersion,
|
||||
dag.powMaxBits,
|
||||
tip.PastMedianTime(dag).Add(time.Second))
|
||||
@@ -532,7 +542,7 @@ func TestPastMedianTime(t *testing.T) {
|
||||
}
|
||||
|
||||
// Checks that a block is invalid if its timestamp is before past median time
|
||||
node = newTestNode(dag, blockSetFromSlice(tip),
|
||||
node = newTestNode(dag, setFromSlice(tip),
|
||||
blockVersion,
|
||||
0,
|
||||
tip.PastMedianTime(dag).Add(-time.Second))
|
||||
@@ -545,44 +555,45 @@ func TestPastMedianTime(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateParents(t *testing.T) {
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup dag instance: %v", err)
|
||||
return
|
||||
dag := newTestDAG(&dagconfig.SimnetParams)
|
||||
genesisNode := dag.genesis
|
||||
blockVersion := int32(0x10000000)
|
||||
|
||||
blockTime := genesisNode.Header().Timestamp
|
||||
|
||||
generateNode := func(parents ...*blockNode) *blockNode {
|
||||
// The timestamp of each block is changed to prevent a situation where two blocks share the same hash
|
||||
blockTime = blockTime.Add(time.Second)
|
||||
return newTestNode(dag, setFromSlice(parents...),
|
||||
blockVersion,
|
||||
0,
|
||||
blockTime)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
a := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
|
||||
b := prepareAndProcessBlockByParentMsgBlocks(t, dag, a)
|
||||
c := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
|
||||
a := generateNode(genesisNode)
|
||||
b := generateNode(a)
|
||||
c := generateNode(genesisNode)
|
||||
|
||||
aNode := nodeByMsgBlock(t, dag, a)
|
||||
bNode := nodeByMsgBlock(t, dag, b)
|
||||
cNode := nodeByMsgBlock(t, dag, c)
|
||||
|
||||
fakeBlockHeader := &domainmessage.BlockHeader{
|
||||
fakeBlockHeader := &wire.BlockHeader{
|
||||
HashMerkleRoot: &daghash.ZeroHash,
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
}
|
||||
|
||||
// Check direct parents relation
|
||||
err = dag.validateParents(fakeBlockHeader, blockSetFromSlice(aNode, bNode))
|
||||
err := validateParents(fakeBlockHeader, setFromSlice(a, b))
|
||||
if err == nil {
|
||||
t.Errorf("validateParents: `a` is a parent of `b`, so an error is expected")
|
||||
}
|
||||
|
||||
// Check indirect parents relation
|
||||
err = dag.validateParents(fakeBlockHeader, blockSetFromSlice(dag.genesis, bNode))
|
||||
err = validateParents(fakeBlockHeader, setFromSlice(genesisNode, b))
|
||||
if err == nil {
|
||||
t.Errorf("validateParents: `genesis` and `b` are indirectly related, so an error is expected")
|
||||
}
|
||||
|
||||
// Check parents with no relation
|
||||
err = dag.validateParents(fakeBlockHeader, blockSetFromSlice(bNode, cNode))
|
||||
err = validateParents(fakeBlockHeader, setFromSlice(b, c))
|
||||
if err != nil {
|
||||
t.Errorf("validateParents: unexpected error: %v", err)
|
||||
}
|
||||
@@ -596,12 +607,13 @@ func TestCheckTransactionSanity(t *testing.T) {
|
||||
outputValue uint64
|
||||
nodeSubnetworkID subnetworkid.SubnetworkID
|
||||
txSubnetworkData *txSubnetworkData
|
||||
extraModificationsFunc func(*domainmessage.MsgTx)
|
||||
extraModificationsFunc func(*wire.MsgTx)
|
||||
expectedErr error
|
||||
}{
|
||||
{"good one", 1, 1, 1, *subnetworkid.SubnetworkIDNative, nil, nil, nil},
|
||||
{"no inputs", 0, 1, 1, *subnetworkid.SubnetworkIDNative, nil, nil, ruleError(ErrNoTxInputs, "")},
|
||||
{"no outputs", 1, 0, 1, *subnetworkid.SubnetworkIDNative, nil, nil, nil},
|
||||
{"too massive", 1, 1000000, 1, *subnetworkid.SubnetworkIDNative, nil, nil, ruleError(ErrTxMassTooHigh, "")},
|
||||
{"too much sompi in one output", 1, 1, util.MaxSompi + 1,
|
||||
*subnetworkid.SubnetworkIDNative,
|
||||
nil,
|
||||
@@ -615,7 +627,7 @@ func TestCheckTransactionSanity(t *testing.T) {
|
||||
{"duplicate inputs", 2, 1, 1,
|
||||
*subnetworkid.SubnetworkIDNative,
|
||||
nil,
|
||||
func(tx *domainmessage.MsgTx) { tx.TxIn[1].PreviousOutpoint.Index = 0 },
|
||||
func(tx *wire.MsgTx) { tx.TxIn[1].PreviousOutpoint.Index = 0 },
|
||||
ruleError(ErrDuplicateTxInputs, "")},
|
||||
{"1 input coinbase",
|
||||
1,
|
||||
@@ -669,14 +681,14 @@ func TestCheckTransactionSanity(t *testing.T) {
|
||||
{"invalid payload hash", 1, 1, 0,
|
||||
subnetworkid.SubnetworkID{123},
|
||||
&txSubnetworkData{&subnetworkid.SubnetworkID{123}, 0, []byte{1}},
|
||||
func(tx *domainmessage.MsgTx) {
|
||||
func(tx *wire.MsgTx) {
|
||||
tx.PayloadHash = &daghash.Hash{}
|
||||
},
|
||||
ruleError(ErrInvalidPayloadHash, "")},
|
||||
{"invalid payload hash in native subnetwork", 1, 1, 0,
|
||||
*subnetworkid.SubnetworkIDNative,
|
||||
nil,
|
||||
func(tx *domainmessage.MsgTx) {
|
||||
func(tx *wire.MsgTx) {
|
||||
tx.PayloadHash = daghash.DoubleHashP(tx.Payload)
|
||||
},
|
||||
ruleError(ErrInvalidPayloadHash, "")},
|
||||
@@ -699,8 +711,8 @@ func TestCheckTransactionSanity(t *testing.T) {
|
||||
|
||||
// Block100000 defines block 100,000 of the block DAG. It is used to
|
||||
// test Block operations.
|
||||
var Block100000 = domainmessage.MsgBlock{
|
||||
Header: domainmessage.BlockHeader{
|
||||
var Block100000 = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
Version: 0x10000000,
|
||||
ParentHashes: []*daghash.Hash{
|
||||
{
|
||||
@@ -729,16 +741,16 @@ var Block100000 = domainmessage.MsgBlock{
|
||||
0x3c, 0xb1, 0x16, 0x8f, 0x5f, 0x6b, 0x45, 0x87,
|
||||
},
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
Timestamp: mstime.UnixMilliseconds(0x17305aa654a),
|
||||
Timestamp: time.Unix(0x5cdac4b1, 0),
|
||||
Bits: 0x207fffff,
|
||||
Nonce: 1,
|
||||
Nonce: 0x00000001,
|
||||
},
|
||||
Transactions: []*domainmessage.MsgTx{
|
||||
Transactions: []*wire.MsgTx{
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x9b, 0x22, 0x59, 0x44, 0x66, 0xf0, 0xbe, 0x50,
|
||||
0x7c, 0x1c, 0x8a, 0xf6, 0x06, 0x27, 0xe6, 0x33,
|
||||
@@ -751,7 +763,7 @@ var Block100000 = domainmessage.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0x12a05f200, // 5000000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -773,9 +785,9 @@ var Block100000 = domainmessage.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95,
|
||||
0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3,
|
||||
@@ -787,7 +799,7 @@ var Block100000 = domainmessage.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b,
|
||||
0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87,
|
||||
@@ -803,9 +815,9 @@ var Block100000 = domainmessage.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
|
||||
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
|
||||
@@ -840,7 +852,7 @@ var Block100000 = domainmessage.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0x2123e300, // 556000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -873,9 +885,9 @@ var Block100000 = domainmessage.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
|
||||
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
|
||||
@@ -909,7 +921,7 @@ var Block100000 = domainmessage.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -942,9 +954,9 @@ var Block100000 = domainmessage.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73,
|
||||
0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac,
|
||||
@@ -979,7 +991,7 @@ var Block100000 = domainmessage.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -1001,8 +1013,8 @@ var Block100000 = domainmessage.MsgBlock{
|
||||
}
|
||||
|
||||
// BlockWithWrongTxOrder defines invalid block 100,000 of the block DAG.
|
||||
var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
Header: domainmessage.BlockHeader{
|
||||
var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
Version: 1,
|
||||
ParentHashes: []*daghash.Hash{
|
||||
{
|
||||
@@ -1036,16 +1048,16 @@ var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
0x0b, 0x79, 0xf5, 0x29, 0x6d, 0x1c, 0xaa, 0x90,
|
||||
0x2f, 0x01, 0xd4, 0x83, 0x9b, 0x2a, 0x04, 0x5e,
|
||||
},
|
||||
Timestamp: mstime.UnixMilliseconds(0x5cd16eaa000),
|
||||
Timestamp: time.Unix(0x5cd16eaa, 0),
|
||||
Bits: 0x207fffff,
|
||||
Nonce: 1,
|
||||
Nonce: 0x0,
|
||||
},
|
||||
Transactions: []*domainmessage.MsgTx{
|
||||
Transactions: []*wire.MsgTx{
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x9b, 0x22, 0x59, 0x44, 0x66, 0xf0, 0xbe, 0x50,
|
||||
0x7c, 0x1c, 0x8a, 0xf6, 0x06, 0x27, 0xe6, 0x33,
|
||||
@@ -1058,7 +1070,7 @@ var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0x12a05f200, // 5000000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -1080,9 +1092,9 @@ var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95,
|
||||
0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3,
|
||||
@@ -1094,7 +1106,7 @@ var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b,
|
||||
0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87,
|
||||
@@ -1110,9 +1122,9 @@ var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
|
||||
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
|
||||
@@ -1147,7 +1159,7 @@ var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0x2123e300, // 556000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -1182,9 +1194,9 @@ var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
|
||||
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
|
||||
@@ -1218,7 +1230,7 @@ var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
@@ -1251,9 +1263,9 @@ var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*domainmessage.TxIn{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutpoint: domainmessage.Outpoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73,
|
||||
0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac,
|
||||
@@ -1288,7 +1300,7 @@ var BlockWithWrongTxOrder = domainmessage.MsgBlock{
|
||||
Sequence: math.MaxUint64,
|
||||
},
|
||||
},
|
||||
TxOut: []*domainmessage.TxOut{
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
ScriptPubKey: []byte{
|
||||
|
||||
@@ -78,7 +78,7 @@ func (c bitConditionChecker) EndTime() uint64 {
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 {
|
||||
return c.dag.Params.RuleChangeActivationThreshold
|
||||
return c.dag.dagParams.RuleChangeActivationThreshold
|
||||
}
|
||||
|
||||
// MinerConfirmationWindow is the number of blocks in each threshold state
|
||||
@@ -89,7 +89,7 @@ func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 {
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c bitConditionChecker) MinerConfirmationWindow() uint64 {
|
||||
return c.dag.Params.MinerConfirmationWindow
|
||||
return c.dag.dagParams.MinerConfirmationWindow
|
||||
}
|
||||
|
||||
// Condition returns true when the specific bit associated with the checker is
|
||||
@@ -159,7 +159,7 @@ func (c deploymentChecker) EndTime() uint64 {
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c deploymentChecker) RuleChangeActivationThreshold() uint64 {
|
||||
return c.dag.Params.RuleChangeActivationThreshold
|
||||
return c.dag.dagParams.RuleChangeActivationThreshold
|
||||
}
|
||||
|
||||
// MinerConfirmationWindow is the number of blocks in each threshold state
|
||||
@@ -170,7 +170,7 @@ func (c deploymentChecker) RuleChangeActivationThreshold() uint64 {
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c deploymentChecker) MinerConfirmationWindow() uint64 {
|
||||
return c.dag.Params.MinerConfirmationWindow
|
||||
return c.dag.dagParams.MinerConfirmationWindow
|
||||
}
|
||||
|
||||
// Condition returns true when the specific bit defined by the deployment
|
||||
@@ -198,8 +198,8 @@ func (dag *BlockDAG) calcNextBlockVersion(prevNode *blockNode) (int32, error) {
|
||||
// that is either in the process of being voted on, or locked in for the
|
||||
// activation at the next threshold window change.
|
||||
expectedVersion := uint32(vbTopBits)
|
||||
for id := 0; id < len(dag.Params.Deployments); id++ {
|
||||
deployment := &dag.Params.Deployments[id]
|
||||
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
||||
deployment := &dag.dagParams.Deployments[id]
|
||||
cache := &dag.deploymentCaches[id]
|
||||
checker := deploymentChecker{deployment: deployment, dag: dag}
|
||||
state, err := dag.thresholdState(prevNode, checker, cache)
|
||||
|
||||
@@ -32,13 +32,22 @@ func newVirtualBlock(dag *BlockDAG, tips blockSet) *virtualBlock {
|
||||
var virtual virtualBlock
|
||||
virtual.dag = dag
|
||||
virtual.utxoSet = NewFullUTXOSet()
|
||||
virtual.selectedParentChainSet = newBlockSet()
|
||||
virtual.selectedParentChainSet = newSet()
|
||||
virtual.selectedParentChainSlice = nil
|
||||
virtual.setTips(tips)
|
||||
|
||||
return &virtual
|
||||
}
|
||||
|
||||
// clone creates and returns a clone of the virtual block.
|
||||
func (v *virtualBlock) clone() *virtualBlock {
|
||||
return &virtualBlock{
|
||||
utxoSet: v.utxoSet,
|
||||
blockNode: v.blockNode,
|
||||
selectedParentChainSet: v.selectedParentChainSet,
|
||||
}
|
||||
}
|
||||
|
||||
// setTips replaces the tips of the virtual block with the blocks in the
|
||||
// given blockSet. This only differs from the exported version in that it
|
||||
// is up to the caller to ensure the lock is held.
|
||||
@@ -113,8 +122,8 @@ func (v *virtualBlock) updateSelectedParentSet(oldSelectedParent *blockNode) *ch
|
||||
// This function is safe for concurrent access.
|
||||
func (v *virtualBlock) SetTips(tips blockSet) {
|
||||
v.mtx.Lock()
|
||||
defer v.mtx.Unlock()
|
||||
v.setTips(tips)
|
||||
v.mtx.Unlock()
|
||||
}
|
||||
|
||||
// addTip adds the given tip to the set of tips in the virtual block.
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestVirtualBlock(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -52,12 +52,12 @@ func TestVirtualBlock(t *testing.T) {
|
||||
// \ X
|
||||
// <- 4 <- 6
|
||||
node0 := dag.genesis
|
||||
node1 := buildNode(t, dag, blockSetFromSlice(node0))
|
||||
node2 := buildNode(t, dag, blockSetFromSlice(node1))
|
||||
node3 := buildNode(t, dag, blockSetFromSlice(node0))
|
||||
node4 := buildNode(t, dag, blockSetFromSlice(node0))
|
||||
node5 := buildNode(t, dag, blockSetFromSlice(node3, node4))
|
||||
node6 := buildNode(t, dag, blockSetFromSlice(node3, node4))
|
||||
node1 := buildNode(t, dag, setFromSlice(node0))
|
||||
node2 := buildNode(t, dag, setFromSlice(node1))
|
||||
node3 := buildNode(t, dag, setFromSlice(node0))
|
||||
node4 := buildNode(t, dag, setFromSlice(node0))
|
||||
node5 := buildNode(t, dag, setFromSlice(node3, node4))
|
||||
node6 := buildNode(t, dag, setFromSlice(node3, node4))
|
||||
|
||||
// Given an empty VirtualBlock, each of the following test cases will:
|
||||
// Set its tips to tipsToSet
|
||||
@@ -75,28 +75,28 @@ func TestVirtualBlock(t *testing.T) {
|
||||
name: "empty virtual",
|
||||
tipsToSet: []*blockNode{},
|
||||
tipsToAdd: []*blockNode{},
|
||||
expectedTips: newBlockSet(),
|
||||
expectedTips: newSet(),
|
||||
expectedSelectedParent: nil,
|
||||
},
|
||||
{
|
||||
name: "virtual with genesis tip",
|
||||
tipsToSet: []*blockNode{node0},
|
||||
tipsToAdd: []*blockNode{},
|
||||
expectedTips: blockSetFromSlice(node0),
|
||||
expectedTips: setFromSlice(node0),
|
||||
expectedSelectedParent: node0,
|
||||
},
|
||||
{
|
||||
name: "virtual with genesis tip, add child of genesis",
|
||||
tipsToSet: []*blockNode{node0},
|
||||
tipsToAdd: []*blockNode{node1},
|
||||
expectedTips: blockSetFromSlice(node1),
|
||||
expectedTips: setFromSlice(node1),
|
||||
expectedSelectedParent: node1,
|
||||
},
|
||||
{
|
||||
name: "empty virtual, add a full DAG",
|
||||
tipsToSet: []*blockNode{},
|
||||
tipsToAdd: []*blockNode{node0, node1, node2, node3, node4, node5, node6},
|
||||
expectedTips: blockSetFromSlice(node2, node5, node6),
|
||||
expectedTips: setFromSlice(node2, node5, node6),
|
||||
expectedSelectedParent: node5,
|
||||
},
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func TestVirtualBlock(t *testing.T) {
|
||||
virtual := newVirtualBlock(dag, nil)
|
||||
|
||||
// Set the tips. This will be the initial state
|
||||
virtual.SetTips(blockSetFromSlice(test.tipsToSet...))
|
||||
virtual.SetTips(setFromSlice(test.tipsToSet...))
|
||||
|
||||
// Add all blockNodes in tipsToAdd in order
|
||||
for _, tipToAdd := range test.tipsToAdd {
|
||||
@@ -134,7 +134,7 @@ func TestSelectedPath(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestSelectedPath", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestSelectedPath", Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -147,9 +147,9 @@ func TestSelectedPath(t *testing.T) {
|
||||
|
||||
tip := dag.genesis
|
||||
virtual.AddTip(tip)
|
||||
initialPath := blockSetFromSlice(tip)
|
||||
initialPath := setFromSlice(tip)
|
||||
for i := 0; i < 5; i++ {
|
||||
tip = buildNode(t, dag, blockSetFromSlice(tip))
|
||||
tip = buildNode(t, dag, setFromSlice(tip))
|
||||
initialPath.add(tip)
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func TestSelectedPath(t *testing.T) {
|
||||
|
||||
firstPath := initialPath.clone()
|
||||
for i := 0; i < 5; i++ {
|
||||
tip = buildNode(t, dag, blockSetFromSlice(tip))
|
||||
tip = buildNode(t, dag, setFromSlice(tip))
|
||||
firstPath.add(tip)
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
@@ -175,7 +175,7 @@ func TestSelectedPath(t *testing.T) {
|
||||
secondPath := initialPath.clone()
|
||||
tip = initialTip
|
||||
for i := 0; i < 100; i++ {
|
||||
tip = buildNode(t, dag, blockSetFromSlice(tip))
|
||||
tip = buildNode(t, dag, setFromSlice(tip))
|
||||
secondPath.add(tip)
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
@@ -193,7 +193,7 @@ func TestSelectedPath(t *testing.T) {
|
||||
|
||||
tip = initialTip
|
||||
for i := 0; i < 3; i++ {
|
||||
tip = buildNode(t, dag, blockSetFromSlice(tip))
|
||||
tip = buildNode(t, dag, setFromSlice(tip))
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
// Because we added a very short chain, the selected path should not be affected.
|
||||
@@ -215,14 +215,14 @@ func TestSelectedPath(t *testing.T) {
|
||||
t.Fatalf("updateSelectedParentSet didn't panic")
|
||||
}
|
||||
}()
|
||||
virtual2.updateSelectedParentSet(buildNode(t, dag, blockSetFromSlice()))
|
||||
virtual2.updateSelectedParentSet(buildNode(t, dag, setFromSlice()))
|
||||
}
|
||||
|
||||
func TestChainUpdates(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestChainUpdates", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestChainUpdates", Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -236,23 +236,23 @@ func TestChainUpdates(t *testing.T) {
|
||||
var toBeRemovedNodes []*blockNode
|
||||
toBeRemovedTip := genesis
|
||||
for i := 0; i < 5; i++ {
|
||||
toBeRemovedTip = buildNode(t, dag, blockSetFromSlice(toBeRemovedTip))
|
||||
toBeRemovedTip = buildNode(t, dag, setFromSlice(toBeRemovedTip))
|
||||
toBeRemovedNodes = append(toBeRemovedNodes, toBeRemovedTip)
|
||||
}
|
||||
|
||||
// Create a VirtualBlock with the toBeRemoved chain
|
||||
virtual := newVirtualBlock(dag, blockSetFromSlice(toBeRemovedNodes...))
|
||||
virtual := newVirtualBlock(dag, setFromSlice(toBeRemovedNodes...))
|
||||
|
||||
// Create a chain to be added
|
||||
var toBeAddedNodes []*blockNode
|
||||
toBeAddedTip := genesis
|
||||
for i := 0; i < 8; i++ {
|
||||
toBeAddedTip = buildNode(t, dag, blockSetFromSlice(toBeAddedTip))
|
||||
toBeAddedTip = buildNode(t, dag, setFromSlice(toBeAddedTip))
|
||||
toBeAddedNodes = append(toBeAddedNodes, toBeAddedTip)
|
||||
}
|
||||
|
||||
// Set the virtual tip to be the tip of the toBeAdded chain
|
||||
chainUpdates := virtual.setTips(blockSetFromSlice(toBeAddedTip))
|
||||
chainUpdates := virtual.setTips(setFromSlice(toBeAddedTip))
|
||||
|
||||
// Make sure that the removed blocks are as expected (in reverse order)
|
||||
if len(chainUpdates.removedChainBlockHashes) != len(toBeRemovedNodes) {
|
||||
|
||||
@@ -6,24 +6,58 @@ package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/limits"
|
||||
"github.com/kaspanet/kaspad/logs"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
const (
|
||||
// blockDBNamePrefix is the prefix for the kaspad block database.
|
||||
blockDBNamePrefix = "blocks"
|
||||
// blockDbNamePrefix is the prefix for the kaspad block database.
|
||||
blockDbNamePrefix = "blocks"
|
||||
)
|
||||
|
||||
var (
|
||||
cfg *ConfigFlags
|
||||
log *logs.Logger
|
||||
spawn func(string, func())
|
||||
log logs.Logger
|
||||
spawn func(func())
|
||||
)
|
||||
|
||||
// loadBlockDB opens the block database and returns a handle to it.
|
||||
func loadBlockDB() (database.DB, error) {
|
||||
// The database name is based on the database type.
|
||||
dbName := blockDbNamePrefix + "_" + cfg.DbType
|
||||
dbPath := filepath.Join(cfg.DataDir, dbName)
|
||||
|
||||
log.Infof("Loading block database from '%s'", dbPath)
|
||||
db, err := database.Open(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
|
||||
if err != nil {
|
||||
// Return the error if it's not because the database doesn't
|
||||
// exist.
|
||||
if dbErr, ok := err.(database.Error); !ok || dbErr.ErrorCode !=
|
||||
database.ErrDbDoesNotExist {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the db if it does not exist.
|
||||
err = os.MkdirAll(cfg.DataDir, 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db, err = database.Create(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Block database loaded")
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// realMain is the real main function for the utility. It is necessary to work
|
||||
// around the fact that deferred functions do not run when os.Exit() is called.
|
||||
func realMain() error {
|
||||
@@ -40,6 +74,14 @@ func realMain() error {
|
||||
log = backendLogger.Logger("MAIN")
|
||||
spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
// Load the block database.
|
||||
db, err := loadBlockDB()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to load database: %s", err)
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
fi, err := os.Open(cfg.InFile)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to open file %s: %s", cfg.InFile, err)
|
||||
@@ -50,7 +92,7 @@ func realMain() error {
|
||||
// Create a block importer for the database and input file and start it.
|
||||
// The done channel returned from start will contain an error if
|
||||
// anything went wrong.
|
||||
importer, err := newBlockImporter(fi)
|
||||
importer, err := newBlockImporter(db, fi)
|
||||
if err != nil {
|
||||
log.Errorf("Failed create block importer: %s", err)
|
||||
return err
|
||||
|
||||
@@ -6,15 +6,20 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
flags "github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
flags "github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDbType = "ffldb"
|
||||
defaultDataFile = "bootstrap.dat"
|
||||
defaultProgress = 10
|
||||
)
|
||||
@@ -22,6 +27,7 @@ const (
|
||||
var (
|
||||
kaspadHomeDir = util.AppDataDir("kaspad", false)
|
||||
defaultDataDir = filepath.Join(kaspadHomeDir, "data")
|
||||
knownDbTypes = database.SupportedDrivers()
|
||||
activeConfig *ConfigFlags
|
||||
)
|
||||
|
||||
@@ -34,10 +40,12 @@ func ActiveConfig() *ConfigFlags {
|
||||
//
|
||||
// See loadConfig for details on the configuration load process.
|
||||
type ConfigFlags struct {
|
||||
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
|
||||
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
|
||||
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
|
||||
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
|
||||
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
|
||||
DbType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
|
||||
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
|
||||
TxIndex bool `long:"txindex" description:"Build a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
|
||||
AddrIndex bool `long:"addrindex" description:"Build a full address-based transaction index which makes the searchrawtransactions RPC available"`
|
||||
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
@@ -51,11 +59,23 @@ func fileExists(name string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// validDbType returns whether or not dbType is a supported database type.
|
||||
func validDbType(dbType string) bool {
|
||||
for _, knownType := range knownDbTypes {
|
||||
if dbType == knownType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// loadConfig initializes and parses the config using command line options.
|
||||
func loadConfig() (*ConfigFlags, []string, error) {
|
||||
// Default config.
|
||||
activeConfig = &ConfigFlags{
|
||||
DataDir: defaultDataDir,
|
||||
DbType: defaultDbType,
|
||||
InFile: defaultDataFile,
|
||||
Progress: defaultProgress,
|
||||
}
|
||||
@@ -64,8 +84,7 @@ func loadConfig() (*ConfigFlags, []string, error) {
|
||||
parser := flags.NewParser(&activeConfig, flags.Default)
|
||||
remainingArgs, err := parser.Parse()
|
||||
if err != nil {
|
||||
var flagsErr *flags.Error
|
||||
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
|
||||
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
|
||||
parser.WriteHelp(os.Stderr)
|
||||
}
|
||||
return nil, nil, err
|
||||
@@ -76,6 +95,16 @@ func loadConfig() (*ConfigFlags, []string, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Validate database type.
|
||||
if !validDbType(activeConfig.DbType) {
|
||||
str := "%s: The specified database type [%s] is invalid -- " +
|
||||
"supported types %s"
|
||||
err := errors.Errorf(str, "loadConfig", activeConfig.DbType, strings.Join(knownDbTypes, ", "))
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
parser.WriteHelp(os.Stderr)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Append the network type to the data directory so it is "namespaced"
|
||||
// per network. In addition to the block database, there are other
|
||||
// pieces of data that are saved to disk such as address manager state.
|
||||
|
||||
@@ -6,16 +6,16 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/blockdag/indexers"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/domainmessage"
|
||||
"github.com/kaspanet/kaspad/blockdag/indexers"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// importResults houses the stats and result as an import operation.
|
||||
@@ -28,6 +28,7 @@ type importResults struct {
|
||||
// blockImporter houses information about an ongoing import from a block data
|
||||
// file to the block database.
|
||||
type blockImporter struct {
|
||||
db database.DB
|
||||
dag *blockdag.BlockDAG
|
||||
r io.ReadSeeker
|
||||
processQueue chan []byte
|
||||
@@ -40,8 +41,8 @@ type blockImporter struct {
|
||||
receivedLogBlocks int64
|
||||
receivedLogTx int64
|
||||
lastHeight int64
|
||||
lastBlockTime mstime.Time
|
||||
lastLogTime mstime.Time
|
||||
lastBlockTime time.Time
|
||||
lastLogTime time.Time
|
||||
}
|
||||
|
||||
// readBlock reads the next block from the input file.
|
||||
@@ -68,10 +69,10 @@ func (bi *blockImporter) readBlock() ([]byte, error) {
|
||||
if err := binary.Read(bi.r, binary.LittleEndian, &blockLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blockLen > domainmessage.MaxMessagePayload {
|
||||
if blockLen > wire.MaxMessagePayload {
|
||||
return nil, errors.Errorf("block payload of %d bytes is larger "+
|
||||
"than the max allowed %d bytes", blockLen,
|
||||
domainmessage.MaxMessagePayload)
|
||||
wire.MaxMessagePayload)
|
||||
}
|
||||
|
||||
serializedBlock := make([]byte, blockLen)
|
||||
@@ -100,14 +101,14 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
||||
|
||||
// Skip blocks that already exist.
|
||||
blockHash := block.Hash()
|
||||
if bi.dag.IsKnownBlock(blockHash) {
|
||||
if bi.dag.HaveBlock(blockHash) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Don't bother trying to process orphans.
|
||||
parentHashes := block.MsgBlock().Header.ParentHashes
|
||||
if len(parentHashes) > 0 {
|
||||
if !bi.dag.AreKnownBlocks(parentHashes) {
|
||||
if !bi.dag.HaveBlocks(parentHashes) {
|
||||
return false, errors.Errorf("import file contains block "+
|
||||
"%v which does not link to the available "+
|
||||
"block DAG", parentHashes)
|
||||
@@ -171,7 +172,7 @@ out:
|
||||
func (bi *blockImporter) logProgress() {
|
||||
bi.receivedLogBlocks++
|
||||
|
||||
now := mstime.Now()
|
||||
now := time.Now()
|
||||
duration := now.Sub(bi.lastLogTime)
|
||||
if duration < time.Second*time.Duration(cfg.Progress) {
|
||||
return
|
||||
@@ -265,12 +266,12 @@ func (bi *blockImporter) Import() chan *importResults {
|
||||
// Start up the read and process handling goroutines. This setup allows
|
||||
// blocks to be read from disk in parallel while being processed.
|
||||
bi.wg.Add(2)
|
||||
spawn("blockImporter.readHandler", bi.readHandler)
|
||||
spawn("blockImporter.processHandler", bi.processHandler)
|
||||
spawn(bi.readHandler)
|
||||
spawn(bi.processHandler)
|
||||
|
||||
// Wait for the import to finish in a separate goroutine and signal
|
||||
// the status handler when done.
|
||||
spawn("blockImporter.sendToDoneChan", func() {
|
||||
spawn(func() {
|
||||
bi.wg.Wait()
|
||||
bi.doneChan <- true
|
||||
})
|
||||
@@ -278,7 +279,7 @@ func (bi *blockImporter) Import() chan *importResults {
|
||||
// Start the status handler and return the result channel that it will
|
||||
// send the results on when the import is done.
|
||||
resultChan := make(chan *importResults)
|
||||
spawn("blockImporter.statusHandler", func() {
|
||||
spawn(func() {
|
||||
bi.statusHandler(resultChan)
|
||||
})
|
||||
return resultChan
|
||||
@@ -286,12 +287,29 @@ func (bi *blockImporter) Import() chan *importResults {
|
||||
|
||||
// newBlockImporter returns a new importer for the provided file reader seeker
|
||||
// and database.
|
||||
func newBlockImporter(r io.ReadSeeker) (*blockImporter, error) {
|
||||
// Create the acceptance index if needed.
|
||||
func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
|
||||
// Create the transaction and address indexes if needed.
|
||||
//
|
||||
// CAUTION: the txindex needs to be first in the indexes array because
|
||||
// the addrindex uses data from the txindex during catchup. If the
|
||||
// addrindex is run first, it may not have the transactions from the
|
||||
// current block indexed.
|
||||
var indexes []indexers.Indexer
|
||||
if cfg.AcceptanceIndex {
|
||||
log.Info("Acceptance index is enabled")
|
||||
indexes = append(indexes, indexers.NewAcceptanceIndex())
|
||||
if cfg.TxIndex || cfg.AddrIndex {
|
||||
// Enable transaction index if address index is enabled since it
|
||||
// requires it.
|
||||
if !cfg.TxIndex {
|
||||
log.Infof("Transaction index enabled because it is " +
|
||||
"required by the address index")
|
||||
cfg.TxIndex = true
|
||||
} else {
|
||||
log.Info("Transaction index is enabled")
|
||||
}
|
||||
indexes = append(indexes, indexers.NewTxIndex())
|
||||
}
|
||||
if cfg.AddrIndex {
|
||||
log.Info("Address index is enabled")
|
||||
indexes = append(indexes, indexers.NewAddrIndex(ActiveConfig().NetParams()))
|
||||
}
|
||||
|
||||
// Create an index manager if any of the optional indexes are enabled.
|
||||
@@ -301,8 +319,9 @@ func newBlockImporter(r io.ReadSeeker) (*blockImporter, error) {
|
||||
}
|
||||
|
||||
dag, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: ActiveConfig().NetParams(),
|
||||
TimeSource: blockdag.NewTimeSource(),
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
IndexManager: indexManager,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -310,12 +329,13 @@ func newBlockImporter(r io.ReadSeeker) (*blockImporter, error) {
|
||||
}
|
||||
|
||||
return &blockImporter{
|
||||
db: db,
|
||||
r: r,
|
||||
processQueue: make(chan []byte, 2),
|
||||
doneChan: make(chan bool),
|
||||
errChan: make(chan error),
|
||||
quit: make(chan struct{}),
|
||||
dag: dag,
|
||||
lastLogTime: mstime.Now(),
|
||||
lastLogTime: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
87
cmd/addsubnetwork/addsubnetwork.go
Normal file
87
cmd/addsubnetwork/addsubnetwork.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/rpcclient"
|
||||
"github.com/kaspanet/kaspad/rpcmodel"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
getSubnetworkRetryDelay = 5 * time.Second
|
||||
maxGetSubnetworkRetries = 12
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg, err := parseConfig()
|
||||
if err != nil {
|
||||
panic(errors.Errorf("error parsing command-line arguments: %s", err))
|
||||
}
|
||||
|
||||
privateKey, addrPubKeyHash, err := decodeKeys(cfg)
|
||||
if err != nil {
|
||||
panic(errors.Errorf("error decoding public key: %s", err))
|
||||
}
|
||||
|
||||
client, err := connect(cfg)
|
||||
if err != nil {
|
||||
panic(errors.Errorf("could not connect to RPC server: %s", err))
|
||||
}
|
||||
log.Infof("Connected to server %s", cfg.RPCServer)
|
||||
|
||||
fundingOutpoint, fundingTx, err := findUnspentTXO(cfg, client, addrPubKeyHash)
|
||||
if err != nil {
|
||||
panic(errors.Errorf("error finding unspent transactions: %s", err))
|
||||
}
|
||||
if fundingOutpoint == nil || fundingTx == nil {
|
||||
panic(errors.Errorf("could not find any unspent transactions this for key"))
|
||||
}
|
||||
log.Infof("Found transaction to spend: %s:%d", fundingOutpoint.TxID, fundingOutpoint.Index)
|
||||
|
||||
registryTx, err := buildSubnetworkRegistryTx(cfg, fundingOutpoint, fundingTx, privateKey)
|
||||
if err != nil {
|
||||
panic(errors.Errorf("error building subnetwork registry tx: %s", err))
|
||||
}
|
||||
|
||||
_, err = client.SendRawTransaction(registryTx, true)
|
||||
if err != nil {
|
||||
panic(errors.Errorf("failed sending subnetwork registry tx: %s", err))
|
||||
}
|
||||
log.Infof("Successfully sent subnetwork registry transaction")
|
||||
|
||||
subnetworkID, err := blockdag.TxToSubnetworkID(registryTx)
|
||||
if err != nil {
|
||||
panic(errors.Errorf("could not build subnetwork ID: %s", err))
|
||||
}
|
||||
|
||||
err = waitForSubnetworkToBecomeAccepted(client, subnetworkID)
|
||||
if err != nil {
|
||||
panic(errors.Errorf("error waiting for subnetwork to become accepted: %s", err))
|
||||
}
|
||||
log.Infof("Subnetwork '%s' was successfully registered.", subnetworkID)
|
||||
}
|
||||
|
||||
func waitForSubnetworkToBecomeAccepted(client *rpcclient.Client, subnetworkID *subnetworkid.SubnetworkID) error {
|
||||
retries := 0
|
||||
for {
|
||||
_, err := client.GetSubnetwork(subnetworkID.String())
|
||||
if err != nil {
|
||||
if rpcError, ok := err.(*rpcmodel.RPCError); ok && rpcError.Code == rpcmodel.ErrRPCSubnetworkNotFound {
|
||||
log.Infof("Subnetwork not found")
|
||||
|
||||
retries++
|
||||
if retries == maxGetSubnetworkRetries {
|
||||
return errors.Errorf("failed to get subnetwork %d times: %s", maxGetSubnetworkRetries, err)
|
||||
}
|
||||
|
||||
log.Infof("Waiting %d seconds...", int(getSubnetworkRetryDelay.Seconds()))
|
||||
<-time.After(getSubnetworkRetryDelay)
|
||||
continue
|
||||
}
|
||||
return errors.Errorf("failed getting subnetwork: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
71
cmd/addsubnetwork/config.go
Normal file
71
cmd/addsubnetwork/config.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var activeConfig *ConfigFlags
|
||||
|
||||
// ActiveConfig returns the active configuration struct
|
||||
func ActiveConfig() *ConfigFlags {
|
||||
return activeConfig
|
||||
}
|
||||
|
||||
// ConfigFlags holds the configurations set by the command line argument
|
||||
type ConfigFlags struct {
|
||||
PrivateKey string `short:"k" long:"private-key" description:"Private key" required:"true"`
|
||||
RPCUser string `short:"u" long:"rpcuser" description:"RPC username" required:"true"`
|
||||
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password" required:"true"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to" required:"true"`
|
||||
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
|
||||
DisableTLS bool `long:"notls" description:"Disable TLS"`
|
||||
GasLimit uint64 `long:"gaslimit" description:"The gas limit of the new subnetwork"`
|
||||
RegistryTxFee uint64 `long:"regtxfee" description:"The fee for the subnetwork registry transaction"`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
const (
|
||||
defaultSubnetworkGasLimit = 1000
|
||||
defaultRegistryTxFee = 3000
|
||||
)
|
||||
|
||||
func parseConfig() (*ConfigFlags, error) {
|
||||
activeConfig = &ConfigFlags{}
|
||||
parser := flags.NewParser(activeConfig, flags.PrintErrors|flags.HelpFlag)
|
||||
_, err := parser.Parse()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if activeConfig.RPCCert == "" && !activeConfig.DisableTLS {
|
||||
return nil, errors.New("--notls has to be disabled if --cert is used")
|
||||
}
|
||||
|
||||
if activeConfig.RPCCert != "" && activeConfig.DisableTLS {
|
||||
return nil, errors.New("--cert should be omitted if --notls is used")
|
||||
}
|
||||
|
||||
err = activeConfig.ResolveNetwork(parser)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if activeConfig.GasLimit < 0 {
|
||||
return nil, errors.Errorf("gaslimit may not be smaller than 0")
|
||||
}
|
||||
if activeConfig.GasLimit == 0 {
|
||||
activeConfig.GasLimit = defaultSubnetworkGasLimit
|
||||
}
|
||||
|
||||
if activeConfig.RegistryTxFee < 0 {
|
||||
return nil, errors.Errorf("regtxfee may not be smaller than 0")
|
||||
}
|
||||
if activeConfig.RegistryTxFee == 0 {
|
||||
activeConfig.RegistryTxFee = defaultRegistryTxFee
|
||||
}
|
||||
|
||||
return activeConfig, nil
|
||||
}
|
||||
37
cmd/addsubnetwork/connect.go
Normal file
37
cmd/addsubnetwork/connect.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/rpcclient"
|
||||
"github.com/pkg/errors"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
func connect(cfg *ConfigFlags) (*rpcclient.Client, error) {
|
||||
var cert []byte
|
||||
if !cfg.DisableTLS {
|
||||
var err error
|
||||
cert, err = ioutil.ReadFile(cfg.RPCCert)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("error reading certificates file: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
connCfg := &rpcclient.ConnConfig{
|
||||
Host: cfg.RPCServer,
|
||||
Endpoint: "ws",
|
||||
User: cfg.RPCUser,
|
||||
Pass: cfg.RPCPassword,
|
||||
DisableTLS: cfg.DisableTLS,
|
||||
}
|
||||
|
||||
if !cfg.DisableTLS {
|
||||
connCfg.Certificates = cert
|
||||
}
|
||||
|
||||
client, err := rpcclient.New(connCfg, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("error connecting to address %s: %s", cfg.RPCServer, err)
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
19
cmd/addsubnetwork/keys.go
Normal file
19
cmd/addsubnetwork/keys.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/base58"
|
||||
)
|
||||
|
||||
func decodeKeys(cfg *ConfigFlags) (*ecc.PrivateKey, *util.AddressPubKeyHash, error) {
|
||||
privateKeyBytes := base58.Decode(cfg.PrivateKey)
|
||||
privateKey, _ := ecc.PrivKeyFromBytes(ecc.S256(), privateKeyBytes)
|
||||
serializedPrivateKey := privateKey.PubKey().SerializeCompressed()
|
||||
|
||||
addr, err := util.NewAddressPubKeyHashFromPublicKey(serializedPrivateKey, ActiveConfig().NetParams().Prefix)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return privateKey, addr, nil
|
||||
}
|
||||
10
cmd/addsubnetwork/log.go
Normal file
10
cmd/addsubnetwork/log.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/logs"
|
||||
)
|
||||
|
||||
var (
|
||||
backendLog = logs.NewBackend()
|
||||
log = backendLog.Logger("ASUB")
|
||||
)
|
||||
29
cmd/addsubnetwork/registrytx.go
Normal file
29
cmd/addsubnetwork/registrytx.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func buildSubnetworkRegistryTx(cfg *ConfigFlags, fundingOutpoint *wire.Outpoint, fundingTx *wire.MsgTx, privateKey *ecc.PrivateKey) (*wire.MsgTx, error) {
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutpoint: *fundingOutpoint,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &wire.TxOut{
|
||||
ScriptPubKey: fundingTx.TxOut[fundingOutpoint.Index].ScriptPubKey,
|
||||
Value: fundingTx.TxOut[fundingOutpoint.Index].Value - cfg.RegistryTxFee,
|
||||
}
|
||||
registryTx := wire.NewRegistryMsgTx(1, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}, cfg.GasLimit)
|
||||
|
||||
SignatureScript, err := txscript.SignatureScript(registryTx, 0, fundingTx.TxOut[fundingOutpoint.Index].ScriptPubKey,
|
||||
txscript.SigHashAll, privateKey, true)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("failed to build signature script: %s", err)
|
||||
}
|
||||
txIn.SignatureScript = SignatureScript
|
||||
|
||||
return registryTx, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user