Merge remote-tracking branch 'origin/v0.6.0-dev'

This commit is contained in:
Mike Zak 2020-08-09 09:09:27 +03:00
commit 61f383a713
438 changed files with 15376 additions and 24686 deletions

File diff suppressed because it is too large Load Diff

View File

@ -2,20 +2,21 @@
// Use of this source code is governed by an ISC // Use of this source code is governed by an ISC
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package addrmgr package addressmanager
import ( import (
"fmt" "fmt"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/subnetworkid"
"io/ioutil" "io/ioutil"
"net" "net"
"reflect" "reflect"
"testing" "testing"
"time" "time"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/kaspanet/kaspad/wire" "github.com/kaspanet/kaspad/wire"
@ -25,7 +26,7 @@ import (
// method. // method.
type naTest struct { type naTest struct {
in wire.NetAddress in wire.NetAddress
want string want AddressKey
} }
// naTests houses all of the tests to be performed against the NetAddressKey // naTests houses all of the tests to be performed against the NetAddressKey
@ -96,7 +97,7 @@ func addNaTests() {
addNaTest("fef3::4:4", 8336, "[fef3::4:4]:8336") addNaTest("fef3::4:4", 8336, "[fef3::4:4]:8336")
} }
func addNaTest(ip string, port uint16, want string) { func addNaTest(ip string, port uint16, want AddressKey) {
nip := net.ParseIP(ip) nip := net.ParseIP(ip)
na := *wire.NewNetAddressIPPort(nip, port, wire.SFNodeNetwork) na := *wire.NewNetAddressIPPort(nip, port, wire.SFNodeNetwork)
test := naTest{na, want} test := naTest{na, want}
@ -108,22 +109,25 @@ func lookupFuncForTest(host string) ([]net.IP, error) {
} }
func newAddrManagerForTest(t *testing.T, testName string, func newAddrManagerForTest(t *testing.T, testName string,
localSubnetworkID *subnetworkid.SubnetworkID) (addressManager *AddrManager, teardown func()) { localSubnetworkID *subnetworkid.SubnetworkID) (addressManager *AddressManager, teardown func()) {
cfg := config.DefaultConfig()
cfg.SubnetworkID = localSubnetworkID
dbPath, err := ioutil.TempDir("", testName) dbPath, err := ioutil.TempDir("", testName)
if err != nil { if err != nil {
t.Fatalf("Error creating temporary directory: %s", err) t.Fatalf("Error creating temporary directory: %s", err)
} }
err = dbaccess.Open(dbPath) databaseContext, err := dbaccess.New(dbPath)
if err != nil { if err != nil {
t.Fatalf("error creating db: %s", err) t.Fatalf("error creating db: %s", err)
} }
addressManager = New(lookupFuncForTest, localSubnetworkID) addressManager = New(cfg, databaseContext)
return addressManager, func() { return addressManager, func() {
err := dbaccess.Close() err := databaseContext.Close()
if err != nil { if err != nil {
t.Fatalf("error closing the database: %s", err) t.Fatalf("error closing the database: %s", err)
} }
@ -144,15 +148,6 @@ func TestStartStop(t *testing.T) {
} }
func TestAddAddressByIP(t *testing.T) { func TestAddAddressByIP(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
fmtErr := errors.Errorf("") fmtErr := errors.Errorf("")
addrErr := &net.AddrError{} addrErr := &net.AddrError{}
var tests = []struct { var tests = []struct {
@ -198,15 +193,6 @@ func TestAddAddressByIP(t *testing.T) {
} }
func TestAddLocalAddress(t *testing.T) { func TestAddLocalAddress(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
var tests = []struct { var tests = []struct {
address wire.NetAddress address wire.NetAddress
priority AddressPriority priority AddressPriority
@ -261,15 +247,6 @@ func TestAddLocalAddress(t *testing.T) {
} }
func TestAttempt(t *testing.T) { func TestAttempt(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
amgr, teardown := newAddrManagerForTest(t, "TestAttempt", nil) amgr, teardown := newAddrManagerForTest(t, "TestAttempt", nil)
defer teardown() defer teardown()
@ -293,15 +270,6 @@ func TestAttempt(t *testing.T) {
} }
func TestConnected(t *testing.T) { func TestConnected(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
amgr, teardown := newAddrManagerForTest(t, "TestConnected", nil) amgr, teardown := newAddrManagerForTest(t, "TestConnected", nil)
defer teardown() defer teardown()
@ -313,7 +281,7 @@ func TestConnected(t *testing.T) {
ka := amgr.GetAddress() ka := amgr.GetAddress()
na := ka.NetAddress() na := ka.NetAddress()
// make it an hour ago // make it an hour ago
na.Timestamp = time.Unix(time.Now().Add(time.Hour*-1).Unix(), 0) na.Timestamp = mstime.Now().Add(time.Hour * -1)
amgr.Connected(na) amgr.Connected(na)
@ -323,15 +291,6 @@ func TestConnected(t *testing.T) {
} }
func TestNeedMoreAddresses(t *testing.T) { func TestNeedMoreAddresses(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
amgr, teardown := newAddrManagerForTest(t, "TestNeedMoreAddresses", nil) amgr, teardown := newAddrManagerForTest(t, "TestNeedMoreAddresses", nil)
defer teardown() defer teardown()
addrsToAdd := 1500 addrsToAdd := 1500
@ -343,7 +302,7 @@ func TestNeedMoreAddresses(t *testing.T) {
var err error var err error
for i := 0; i < addrsToAdd; i++ { for i := 0; i < addrsToAdd; i++ {
s := fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60) s := AddressKey(fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60))
addrs[i], err = amgr.DeserializeNetAddress(s) addrs[i], err = amgr.DeserializeNetAddress(s)
if err != nil { if err != nil {
t.Errorf("Failed to turn %s into an address: %v", s, err) t.Errorf("Failed to turn %s into an address: %v", s, err)
@ -365,15 +324,6 @@ func TestNeedMoreAddresses(t *testing.T) {
} }
func TestGood(t *testing.T) { func TestGood(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
amgr, teardown := newAddrManagerForTest(t, "TestGood", nil) amgr, teardown := newAddrManagerForTest(t, "TestGood", nil)
defer teardown() defer teardown()
addrsToAdd := 64 * 64 addrsToAdd := 64 * 64
@ -383,7 +333,7 @@ func TestGood(t *testing.T) {
var err error var err error
for i := 0; i < addrsToAdd; i++ { for i := 0; i < addrsToAdd; i++ {
s := fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60) s := AddressKey(fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60))
addrs[i], err = amgr.DeserializeNetAddress(s) addrs[i], err = amgr.DeserializeNetAddress(s)
if err != nil { if err != nil {
t.Errorf("Failed to turn %s into an address: %v", s, err) t.Errorf("Failed to turn %s into an address: %v", s, err)
@ -422,15 +372,6 @@ func TestGood(t *testing.T) {
} }
func TestGoodChangeSubnetworkID(t *testing.T) { func TestGoodChangeSubnetworkID(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
amgr, teardown := newAddrManagerForTest(t, "TestGoodChangeSubnetworkID", nil) amgr, teardown := newAddrManagerForTest(t, "TestGoodChangeSubnetworkID", nil)
defer teardown() defer teardown()
addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0) addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
@ -441,8 +382,8 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
amgr.AddAddress(addr, srcAddr, oldSubnetwork) amgr.AddAddress(addr, srcAddr, oldSubnetwork)
amgr.Good(addr, oldSubnetwork) amgr.Good(addr, oldSubnetwork)
// make sure address was saved to addrIndex under oldSubnetwork // make sure address was saved to addressIndex under oldSubnetwork
ka := amgr.find(addr) ka := amgr.knownAddress(addr)
if ka == nil { if ka == nil {
t.Fatalf("Address was not found after first time .Good called") t.Fatalf("Address was not found after first time .Good called")
} }
@ -451,10 +392,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
} }
// make sure address was added to correct bucket under oldSubnetwork // make sure address was added to correct bucket under oldSubnetwork
bucket := amgr.addrTried[*oldSubnetwork][amgr.getTriedBucket(addr)] bucket := amgr.subnetworkTriedAddresBucketArrays[*oldSubnetwork][amgr.triedAddressBucketIndex(addr)]
wasFound := false wasFound := false
for e := bucket.Front(); e != nil; e = e.Next() { for _, ka := range bucket {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { if NetAddressKey(ka.NetAddress()) == addrKey {
wasFound = true wasFound = true
} }
} }
@ -466,8 +407,8 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
newSubnetwork := subnetworkid.SubnetworkIDRegistry newSubnetwork := subnetworkid.SubnetworkIDRegistry
amgr.Good(addr, newSubnetwork) amgr.Good(addr, newSubnetwork)
// make sure address was updated in addrIndex under newSubnetwork // make sure address was updated in addressIndex under newSubnetwork
ka = amgr.find(addr) ka = amgr.knownAddress(addr)
if ka == nil { if ka == nil {
t.Fatalf("Address was not found after second time .Good called") t.Fatalf("Address was not found after second time .Good called")
} }
@ -476,10 +417,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
} }
// make sure address was removed from bucket under oldSubnetwork // make sure address was removed from bucket under oldSubnetwork
bucket = amgr.addrTried[*oldSubnetwork][amgr.getTriedBucket(addr)] bucket = amgr.subnetworkTriedAddresBucketArrays[*oldSubnetwork][amgr.triedAddressBucketIndex(addr)]
wasFound = false wasFound = false
for e := bucket.Front(); e != nil; e = e.Next() { for _, ka := range bucket {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { if NetAddressKey(ka.NetAddress()) == addrKey {
wasFound = true wasFound = true
} }
} }
@ -488,10 +429,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
} }
// make sure address was added to correct bucket under newSubnetwork // make sure address was added to correct bucket under newSubnetwork
bucket = amgr.addrTried[*newSubnetwork][amgr.getTriedBucket(addr)] bucket = amgr.subnetworkTriedAddresBucketArrays[*newSubnetwork][amgr.triedAddressBucketIndex(addr)]
wasFound = false wasFound = false
for e := bucket.Front(); e != nil; e = e.Next() { for _, ka := range bucket {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { if NetAddressKey(ka.NetAddress()) == addrKey {
wasFound = true wasFound = true
} }
} }
@ -501,15 +442,6 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
} }
func TestGetAddress(t *testing.T) { func TestGetAddress(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
localSubnetworkID := &subnetworkid.SubnetworkID{0xff} localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
amgr, teardown := newAddrManagerForTest(t, "TestGetAddress", localSubnetworkID) amgr, teardown := newAddrManagerForTest(t, "TestGetAddress", localSubnetworkID)
defer teardown() defer teardown()
@ -583,15 +515,6 @@ func TestGetAddress(t *testing.T) {
} }
func TestGetBestLocalAddress(t *testing.T) { func TestGetBestLocalAddress(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
localAddrs := []wire.NetAddress{ localAddrs := []wire.NetAddress{
{IP: net.ParseIP("192.168.0.100")}, {IP: net.ParseIP("192.168.0.100")},
{IP: net.ParseIP("::1")}, {IP: net.ParseIP("::1")},

View File

@ -1,5 +1,5 @@
/* /*
Package addrmgr implements concurrency safe Kaspa address manager. Package addressmanager implements concurrency safe Kaspa address manager.
Address Manager Overview Address Manager Overview
@ -31,4 +31,4 @@ peers which no longer appear to be good peers as well as bias the selection
toward known good peers. The general idea is to make a best effort at only toward known good peers. The general idea is to make a best effort at only
providing usable addresses. providing usable addresses.
*/ */
package addrmgr package addressmanager

View File

@ -2,11 +2,10 @@
// Use of this source code is governed by an ISC // Use of this source code is governed by an ISC
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package addrmgr package addressmanager
import ( import (
"time" "github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/wire" "github.com/kaspanet/kaspad/wire"
) )
@ -19,7 +18,7 @@ func TstKnownAddressChance(ka *KnownAddress) float64 {
} }
func TstNewKnownAddress(na *wire.NetAddress, attempts int, func TstNewKnownAddress(na *wire.NetAddress, attempts int,
lastattempt, lastsuccess time.Time, tried bool, refs int) *KnownAddress { lastattempt, lastsuccess mstime.Time, tried bool, refs int) *KnownAddress {
return &KnownAddress{na: na, attempts: attempts, lastattempt: lastattempt, return &KnownAddress{netAddress: na, attempts: attempts, lastAttempt: lastattempt,
lastsuccess: lastsuccess, tried: tried, refs: refs} lastSuccess: lastsuccess, tried: tried, referenceCount: refs}
} }

View File

@ -2,9 +2,10 @@
// Use of this source code is governed by an ISC // Use of this source code is governed by an ISC
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package addrmgr package addressmanager
import ( import (
"github.com/kaspanet/kaspad/util/mstime"
"time" "time"
"github.com/kaspanet/kaspad/util/subnetworkid" "github.com/kaspanet/kaspad/util/subnetworkid"
@ -15,20 +16,22 @@ import (
// KnownAddress tracks information about a known network address that is used // KnownAddress tracks information about a known network address that is used
// to determine how viable an address is. // to determine how viable an address is.
type KnownAddress struct { type KnownAddress struct {
na *wire.NetAddress netAddress *wire.NetAddress
srcAddr *wire.NetAddress sourceAddress *wire.NetAddress
attempts int attempts int
lastattempt time.Time lastAttempt mstime.Time
lastsuccess time.Time lastSuccess mstime.Time
tried bool tried bool
refs int // reference count of new buckets referenceCount int // reference count of new buckets
subnetworkID *subnetworkid.SubnetworkID subnetworkID *subnetworkid.SubnetworkID
isBanned bool
bannedTime mstime.Time
} }
// NetAddress returns the underlying wire.NetAddress associated with the // NetAddress returns the underlying wire.NetAddress associated with the
// known address. // known address.
func (ka *KnownAddress) NetAddress() *wire.NetAddress { func (ka *KnownAddress) NetAddress() *wire.NetAddress {
return ka.na return ka.netAddress
} }
// SubnetworkID returns the subnetwork ID of the known address. // SubnetworkID returns the subnetwork ID of the known address.
@ -37,16 +40,16 @@ func (ka *KnownAddress) SubnetworkID() *subnetworkid.SubnetworkID {
} }
// LastAttempt returns the last time the known address was attempted. // LastAttempt returns the last time the known address was attempted.
func (ka *KnownAddress) LastAttempt() time.Time { func (ka *KnownAddress) LastAttempt() mstime.Time {
return ka.lastattempt return ka.lastAttempt
} }
// chance returns the selection probability for a known address. The priority // chance returns the selection probability for a known address. The priority
// depends upon how recently the address has been seen, how recently it was last // depends upon how recently the address has been seen, how recently it was last
// attempted and how often attempts to connect to it have failed. // attempted and how often attempts to connect to it have failed.
func (ka *KnownAddress) chance() float64 { func (ka *KnownAddress) chance() float64 {
now := time.Now() now := mstime.Now()
lastAttempt := now.Sub(ka.lastattempt) lastAttempt := now.Sub(ka.lastAttempt)
if lastAttempt < 0 { if lastAttempt < 0 {
lastAttempt = 0 lastAttempt = 0
@ -76,27 +79,27 @@ func (ka *KnownAddress) chance() float64 {
// All addresses that meet these criteria are assumed to be worthless and not // All addresses that meet these criteria are assumed to be worthless and not
// worth keeping hold of. // worth keeping hold of.
func (ka *KnownAddress) isBad() bool { func (ka *KnownAddress) isBad() bool {
if ka.lastattempt.After(time.Now().Add(-1 * time.Minute)) { if ka.lastAttempt.After(mstime.Now().Add(-1 * time.Minute)) {
return false return false
} }
// From the future? // From the future?
if ka.na.Timestamp.After(time.Now().Add(10 * time.Minute)) { if ka.netAddress.Timestamp.After(mstime.Now().Add(10 * time.Minute)) {
return true return true
} }
// Over a month old? // Over a month old?
if ka.na.Timestamp.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { if ka.netAddress.Timestamp.Before(mstime.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
return true return true
} }
// Never succeeded? // Never succeeded?
if ka.lastsuccess.IsZero() && ka.attempts >= numRetries { if ka.lastSuccess.IsZero() && ka.attempts >= numRetries {
return true return true
} }
// Hasn't succeeded in too long? // Hasn't succeeded in too long?
if !ka.lastsuccess.After(time.Now().Add(-1*minBadDays*time.Hour*24)) && if !ka.lastSuccess.After(mstime.Now().Add(-1*minBadDays*time.Hour*24)) &&
ka.attempts >= maxFailures { ka.attempts >= maxFailures {
return true return true
} }

View File

@ -0,0 +1,115 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addressmanager_test
import (
"github.com/kaspanet/kaspad/util/mstime"
"math"
"testing"
"time"
"github.com/kaspanet/kaspad/addressmanager"
"github.com/kaspanet/kaspad/wire"
)
func TestChance(t *testing.T) {
now := mstime.Now()
var tests = []struct {
addr *addressmanager.KnownAddress
expected float64
}{
{
//Test normal case
addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
1.0,
}, {
//Test case in which lastseen < 0
addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
0, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
1.0,
}, {
//Test case in which lastAttempt < 0
addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, mstime.Now().Add(30*time.Minute), mstime.Now(), false, 0),
1.0 * .01,
}, {
//Test case in which lastAttempt < ten minutes
addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, mstime.Now().Add(-5*time.Minute), mstime.Now(), false, 0),
1.0 * .01,
}, {
//Test case with several failed attempts.
addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
2, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
1 / 1.5 / 1.5,
},
}
err := .0001
for i, test := range tests {
chance := addressmanager.TstKnownAddressChance(test.addr)
if math.Abs(test.expected-chance) >= err {
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
}
}
}
func TestIsBad(t *testing.T) {
now := mstime.Now()
future := now.Add(35 * time.Minute)
monthOld := now.Add(-43 * time.Hour * 24)
secondsOld := now.Add(-2 * time.Second)
minutesOld := now.Add(-27 * time.Minute)
hoursOld := now.Add(-5 * time.Hour)
zeroTime := mstime.Time{}
futureNa := &wire.NetAddress{Timestamp: future}
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
currentNa := &wire.NetAddress{Timestamp: secondsOld}
//Test addresses that have been tried in the last minute.
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
}
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
}
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
}
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
}
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
}
//Test address that claims to be from the future.
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
}
//Test address that has not been seen in over a month.
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 7: addresses more than a month old are bad.")
}
//It has failed at least three times and never succeeded.
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
t.Errorf("test case 8: addresses that have never succeeded are bad.")
}
//It has failed ten times in the last week
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
}
//Test an address that should work.
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 10: This should be a valid address.")
}
}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by an ISC // Use of this source code is governed by an ISC
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package addrmgr package addressmanager
import ( import (
"github.com/kaspanet/kaspad/logger" "github.com/kaspanet/kaspad/logger"

View File

@ -2,13 +2,11 @@
// Use of this source code is governed by an ISC // Use of this source code is governed by an ISC
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package addrmgr package addressmanager
import ( import (
"net" "net"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/wire" "github.com/kaspanet/kaspad/wire"
) )
@ -202,8 +200,8 @@ func IsValid(na *wire.NetAddress) bool {
// IsRoutable returns whether or not the passed address is routable over // IsRoutable returns whether or not the passed address is routable over
// the public internet. This is true as long as the address is valid and is not // the public internet. This is true as long as the address is valid and is not
// in any reserved ranges. // in any reserved ranges.
func IsRoutable(na *wire.NetAddress) bool { func (am *AddressManager) IsRoutable(na *wire.NetAddress) bool {
if config.ActiveConfig().NetParams().AcceptUnroutable { if am.cfg.NetParams().AcceptUnroutable {
return !IsLocal(na) return !IsLocal(na)
} }
@ -217,11 +215,11 @@ func IsRoutable(na *wire.NetAddress) bool {
// of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string // of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
// "local" for a local address, and the string "unroutable" for an unroutable // "local" for a local address, and the string "unroutable" for an unroutable
// address. // address.
func GroupKey(na *wire.NetAddress) string { func (am *AddressManager) GroupKey(na *wire.NetAddress) string {
if IsLocal(na) { if IsLocal(na) {
return "local" return "local"
} }
if !IsRoutable(na) { if !am.IsRoutable(na) {
return "unroutable" return "unroutable"
} }
if IsIPv4(na) { if IsIPv4(na) {

View File

@ -2,30 +2,20 @@
// Use of this source code is governed by an ISC // Use of this source code is governed by an ISC
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package addrmgr_test package addressmanager
import ( import (
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"net" "net"
"testing" "testing"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/wire" "github.com/kaspanet/kaspad/wire"
) )
// TestIPTypes ensures the various functions which determine the type of an IP // TestIPTypes ensures the various functions which determine the type of an IP
// address based on RFCs work as intended. // address based on RFCs work as intended.
func TestIPTypes(t *testing.T) { func TestIPTypes(t *testing.T) {
originalActiveCfg := config.ActiveConfig() amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
config.SetActiveConfig(&config.Config{ defer teardown()
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
type ipTest struct { type ipTest struct {
in wire.NetAddress in wire.NetAddress
rfc1918 bool rfc1918 bool
@ -99,55 +89,55 @@ func TestIPTypes(t *testing.T) {
t.Logf("Running %d tests", len(tests)) t.Logf("Running %d tests", len(tests))
for _, test := range tests { for _, test := range tests {
if rv := addrmgr.IsRFC1918(&test.in); rv != test.rfc1918 { if rv := IsRFC1918(&test.in); rv != test.rfc1918 {
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc1918) t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc1918)
} }
if rv := addrmgr.IsRFC3849(&test.in); rv != test.rfc3849 { if rv := IsRFC3849(&test.in); rv != test.rfc3849 {
t.Errorf("IsRFC3849 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3849) t.Errorf("IsRFC3849 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3849)
} }
if rv := addrmgr.IsRFC3927(&test.in); rv != test.rfc3927 { if rv := IsRFC3927(&test.in); rv != test.rfc3927 {
t.Errorf("IsRFC3927 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3927) t.Errorf("IsRFC3927 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3927)
} }
if rv := addrmgr.IsRFC3964(&test.in); rv != test.rfc3964 { if rv := IsRFC3964(&test.in); rv != test.rfc3964 {
t.Errorf("IsRFC3964 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3964) t.Errorf("IsRFC3964 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3964)
} }
if rv := addrmgr.IsRFC4193(&test.in); rv != test.rfc4193 { if rv := IsRFC4193(&test.in); rv != test.rfc4193 {
t.Errorf("IsRFC4193 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4193) t.Errorf("IsRFC4193 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4193)
} }
if rv := addrmgr.IsRFC4380(&test.in); rv != test.rfc4380 { if rv := IsRFC4380(&test.in); rv != test.rfc4380 {
t.Errorf("IsRFC4380 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4380) t.Errorf("IsRFC4380 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4380)
} }
if rv := addrmgr.IsRFC4843(&test.in); rv != test.rfc4843 { if rv := IsRFC4843(&test.in); rv != test.rfc4843 {
t.Errorf("IsRFC4843 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4843) t.Errorf("IsRFC4843 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4843)
} }
if rv := addrmgr.IsRFC4862(&test.in); rv != test.rfc4862 { if rv := IsRFC4862(&test.in); rv != test.rfc4862 {
t.Errorf("IsRFC4862 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4862) t.Errorf("IsRFC4862 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4862)
} }
if rv := addrmgr.IsRFC6052(&test.in); rv != test.rfc6052 { if rv := IsRFC6052(&test.in); rv != test.rfc6052 {
t.Errorf("isRFC6052 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6052) t.Errorf("isRFC6052 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6052)
} }
if rv := addrmgr.IsRFC6145(&test.in); rv != test.rfc6145 { if rv := IsRFC6145(&test.in); rv != test.rfc6145 {
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6145) t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6145)
} }
if rv := addrmgr.IsLocal(&test.in); rv != test.local { if rv := IsLocal(&test.in); rv != test.local {
t.Errorf("IsLocal %s\n got: %v want: %v", test.in.IP, rv, test.local) t.Errorf("IsLocal %s\n got: %v want: %v", test.in.IP, rv, test.local)
} }
if rv := addrmgr.IsValid(&test.in); rv != test.valid { if rv := IsValid(&test.in); rv != test.valid {
t.Errorf("IsValid %s\n got: %v want: %v", test.in.IP, rv, test.valid) t.Errorf("IsValid %s\n got: %v want: %v", test.in.IP, rv, test.valid)
} }
if rv := addrmgr.IsRoutable(&test.in); rv != test.routable { if rv := amgr.IsRoutable(&test.in); rv != test.routable {
t.Errorf("IsRoutable %s\n got: %v want: %v", test.in.IP, rv, test.routable) t.Errorf("IsRoutable %s\n got: %v want: %v", test.in.IP, rv, test.routable)
} }
} }
@ -156,14 +146,8 @@ func TestIPTypes(t *testing.T) {
// TestGroupKey tests the GroupKey function to ensure it properly groups various // TestGroupKey tests the GroupKey function to ensure it properly groups various
// IP addresses. // IP addresses.
func TestGroupKey(t *testing.T) { func TestGroupKey(t *testing.T) {
originalActiveCfg := config.ActiveConfig() amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
config.SetActiveConfig(&config.Config{ defer teardown()
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
tests := []struct { tests := []struct {
name string name string
@ -213,7 +197,7 @@ func TestGroupKey(t *testing.T) {
for i, test := range tests { for i, test := range tests {
nip := net.ParseIP(test.ip) nip := net.ParseIP(test.ip)
na := *wire.NewNetAddressIPPort(nip, 8333, wire.SFNodeNetwork) na := *wire.NewNetAddressIPPort(nip, 8333, wire.SFNodeNetwork)
if key := addrmgr.GroupKey(&na); key != test.expected { if key := amgr.GroupKey(&na); key != test.expected {
t.Errorf("TestGroupKey #%d (%s): unexpected group key "+ t.Errorf("TestGroupKey #%d (%s): unexpected group key "+
"- got '%s', want '%s'", i, test.name, "- got '%s', want '%s'", i, test.name,
key, test.expected) key, test.expected)

File diff suppressed because it is too large Load Diff

View File

@ -1,114 +0,0 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr_test
import (
"math"
"testing"
"time"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/wire"
)
func TestChance(t *testing.T) {
now := time.Unix(time.Now().Unix(), 0)
var tests = []struct {
addr *addrmgr.KnownAddress
expected float64
}{
{
//Test normal case
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1.0,
}, {
//Test case in which lastseen < 0
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1.0,
}, {
//Test case in which lastattempt < 0
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(30*time.Minute), time.Now(), false, 0),
1.0 * .01,
}, {
//Test case in which lastattempt < ten minutes
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(-5*time.Minute), time.Now(), false, 0),
1.0 * .01,
}, {
//Test case with several failed attempts.
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
2, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1 / 1.5 / 1.5,
},
}
err := .0001
for i, test := range tests {
chance := addrmgr.TstKnownAddressChance(test.addr)
if math.Abs(test.expected-chance) >= err {
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
}
}
}
func TestIsBad(t *testing.T) {
now := time.Unix(time.Now().Unix(), 0)
future := now.Add(35 * time.Minute)
monthOld := now.Add(-43 * time.Hour * 24)
secondsOld := now.Add(-2 * time.Second)
minutesOld := now.Add(-27 * time.Minute)
hoursOld := now.Add(-5 * time.Hour)
zeroTime := time.Time{}
futureNa := &wire.NetAddress{Timestamp: future}
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
currentNa := &wire.NetAddress{Timestamp: secondsOld}
//Test addresses that have been tried in the last minute.
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
}
//Test address that claims to be from the future.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
}
//Test address that has not been seen in over a month.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 7: addresses more than a month old are bad.")
}
//It has failed at least three times and never succeeded.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
t.Errorf("test case 8: addresses that have never succeeded are bad.")
}
//It has failed ten times in the last week
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
}
//Test an address that should work.
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 10: This should be a valid address.")
}
}

248
app/app.go Normal file
View File

@ -0,0 +1,248 @@
package app
import (
"fmt"
"sync/atomic"
"github.com/kaspanet/kaspad/addressmanager"
"github.com/kaspanet/kaspad/netadapter/id"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/connmanager"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/dnsseed"
"github.com/kaspanet/kaspad/mempool"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/netadapter"
"github.com/kaspanet/kaspad/protocol"
"github.com/kaspanet/kaspad/rpc"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/wire"
)
// App is a wrapper for all the kaspad services
type App struct {
cfg *config.Config
rpcServer *rpc.Server
addressManager *addressmanager.AddressManager
protocolManager *protocol.Manager
connectionManager *connmanager.ConnectionManager
netAdapter *netadapter.NetAdapter
started, shutdown int32
}
// Start launches all the kaspad services.
func (a *App) Start() {
// Already started?
if atomic.AddInt32(&a.started, 1) != 1 {
return
}
log.Trace("Starting kaspad")
err := a.protocolManager.Start()
if err != nil {
panics.Exit(log, fmt.Sprintf("Error starting the p2p protocol: %+v", err))
}
a.maybeSeedFromDNS()
a.connectionManager.Start()
if !a.cfg.DisableRPC {
a.rpcServer.Start()
}
}
// Stop gracefully shuts down all the kaspad services.
func (a *App) Stop() error {
// Make sure this only happens once.
if atomic.AddInt32(&a.shutdown, 1) != 1 {
log.Infof("Kaspad is already in the process of shutting down")
return nil
}
log.Warnf("Kaspad shutting down")
a.connectionManager.Stop()
err := a.protocolManager.Stop()
if err != nil {
log.Errorf("Error stopping the p2p protocol: %+v", err)
}
// Shutdown the RPC server if it's not disabled.
if !a.cfg.DisableRPC {
err := a.rpcServer.Stop()
if err != nil {
log.Errorf("Error stopping rpcServer: %+v", err)
}
}
return nil
}
// New returns a new App instance configured to listen on addr for the
// kaspa network type specified by dagParams. Use start to begin accepting
// connections from peers.
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{}) (*App, error) {
indexManager, acceptanceIndex := setupIndexes(cfg)
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
// Create a new block DAG instance with the appropriate configuration.
dag, err := setupDAG(cfg, databaseContext, interrupt, sigCache, indexManager)
if err != nil {
return nil, err
}
txMempool := setupMempool(cfg, dag, sigCache)
netAdapter, err := netadapter.NewNetAdapter(cfg)
if err != nil {
return nil, err
}
addressManager := addressmanager.New(cfg, databaseContext)
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
if err != nil {
return nil, err
}
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
if err != nil {
return nil, err
}
rpcServer, err := setupRPC(
cfg, dag, txMempool, sigCache, acceptanceIndex, connectionManager, addressManager, protocolManager)
if err != nil {
return nil, err
}
return &App{
cfg: cfg,
rpcServer: rpcServer,
protocolManager: protocolManager,
connectionManager: connectionManager,
netAdapter: netAdapter,
addressManager: addressManager,
}, nil
}
func (a *App) maybeSeedFromDNS() {
if !a.cfg.DisableDNSSeed {
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, wire.SFNodeNetwork, false, nil,
a.cfg.Lookup, func(addresses []*wire.NetAddress) {
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
// IPs of nodes and not its own IP, we can not know real IP of
// source. So we'll take first returned address as source.
a.addressManager.AddAddresses(addresses, addresses[0], nil)
})
}
}
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{},
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
dag, err := blockdag.New(&blockdag.Config{
Interrupt: interrupt,
DatabaseContext: databaseContext,
DAGParams: cfg.NetParams(),
TimeSource: blockdag.NewTimeSource(),
SigCache: sigCache,
IndexManager: indexManager,
SubnetworkID: cfg.SubnetworkID,
})
return dag, err
}
func setupIndexes(cfg *config.Config) (blockdag.IndexManager, *indexers.AcceptanceIndex) {
// Create indexes if needed.
var indexes []indexers.Indexer
var acceptanceIndex *indexers.AcceptanceIndex
if cfg.AcceptanceIndex {
log.Info("acceptance index is enabled")
indexes = append(indexes, acceptanceIndex)
}
// Create an index manager if any of the optional indexes are enabled.
if len(indexes) < 0 {
return nil, nil
}
indexManager := indexers.NewManager(indexes)
return indexManager, acceptanceIndex
}
func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
mempoolConfig := mempool.Config{
Policy: mempool.Policy{
AcceptNonStd: cfg.RelayNonStd,
MaxOrphanTxs: cfg.MaxOrphanTxs,
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
MinRelayTxFee: cfg.MinRelayTxFee,
MaxTxVersion: 1,
},
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
return dag.CalcSequenceLockNoLock(tx, utxoSet, true)
},
IsDeploymentActive: dag.IsDeploymentActive,
SigCache: sigCache,
DAG: dag,
}
return mempool.New(&mempoolConfig)
}
func setupRPC(cfg *config.Config,
dag *blockdag.BlockDAG,
txMempool *mempool.TxPool,
sigCache *txscript.SigCache,
acceptanceIndex *indexers.AcceptanceIndex,
connectionManager *connmanager.ConnectionManager,
addressManager *addressmanager.AddressManager,
protocolManager *protocol.Manager) (*rpc.Server, error) {
if !cfg.DisableRPC {
policy := mining.Policy{
BlockMaxMass: cfg.BlockMaxMass,
}
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, txMempool, dag, sigCache)
rpcServer, err := rpc.NewRPCServer(cfg, dag, txMempool, acceptanceIndex, blockTemplateGenerator,
connectionManager, addressManager, protocolManager)
if err != nil {
return nil, err
}
// Signal process shutdown when the RPC server requests it.
spawn("setupRPC-handleShutdownRequest", func() {
<-rpcServer.RequestedProcessShutdown()
signal.ShutdownRequestChannel <- struct{}{}
})
return rpcServer, nil
}
return nil, nil
}
// P2PNodeID returns the network ID associated with this App
func (a *App) P2PNodeID() *id.ID {
return a.netAdapter.ID()
}
// AddressManager returns the AddressManager associated with this App
func (a *App) AddressManager() *addressmanager.AddressManager {
return a.addressManager
}
// WaitForShutdown blocks until the main listener and peer handlers are stopped.
func (a *App) WaitForShutdown() {
// TODO(libp2p)
// a.p2pServer.WaitForShutdown()
}

View File

@ -1,13 +1,14 @@
// Copyright (c) 2017 The btcsuite developers // Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2017 The Decred developers
// Use of this source code is governed by an ISC // Use of this source code is governed by an ISC
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package netsync package app
import ( import (
"github.com/kaspanet/kaspad/logger" "github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics" "github.com/kaspanet/kaspad/util/panics"
) )
var log, _ = logger.Get(logger.SubsystemTags.SYNC) var log, _ = logger.Get(logger.SubsystemTags.KASD)
var spawn = panics.GoroutineWrapperFunc(log) var spawn = panics.GoroutineWrapperFunc(log)

View File

@ -6,6 +6,7 @@ package blockdag
import ( import (
"fmt" "fmt"
"github.com/kaspanet/kaspad/dbaccess" "github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -17,7 +18,7 @@ func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error
newNode.status = statusInvalidAncestor newNode.status = statusInvalidAncestor
dag.index.AddNode(newNode) dag.index.AddNode(newNode)
dbTx, err := dbaccess.NewTx() dbTx, err := dag.databaseContext.NewTx()
if err != nil { if err != nil {
return err return err
} }
@ -72,7 +73,7 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
// expensive connection logic. It also has some other nice properties // expensive connection logic. It also has some other nice properties
// such as making blocks that never become part of the DAG or // such as making blocks that never become part of the DAG or
// blocks that fail to connect available for further analysis. // blocks that fail to connect available for further analysis.
dbTx, err := dbaccess.NewTx() dbTx, err := dag.databaseContext.NewTx()
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,10 +1,9 @@
package blockdag package blockdag
import ( import (
"testing"
"time"
"github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/mstime"
"testing"
) )
func TestAncestorErrors(t *testing.T) { func TestAncestorErrors(t *testing.T) {
@ -18,7 +17,7 @@ func TestAncestorErrors(t *testing.T) {
} }
defer teardownFunc() defer teardownFunc()
node := newTestNode(dag, newBlockSet(), int32(0x10000000), 0, time.Unix(0, 0)) node := newTestNode(dag, newBlockSet(), int32(0x10000000), 0, mstime.Now())
node.blueScore = 2 node.blueScore = 2
ancestor := node.SelectedAncestor(3) ancestor := node.SelectedAncestor(3)
if ancestor != nil { if ancestor != nil {

View File

@ -6,10 +6,11 @@ package blockdag
import ( import (
"fmt" "fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors"
"math" "math"
"time"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire" "github.com/kaspanet/kaspad/wire"
@ -110,7 +111,7 @@ func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSe
parents: parents, parents: parents,
children: make(blockSet), children: make(blockSet),
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
timestamp: dag.Now().Unix(), timestamp: dag.Now().UnixMilliseconds(),
bluesAnticoneSizes: make(map[*blockNode]dagconfig.KType), bluesAnticoneSizes: make(map[*blockNode]dagconfig.KType),
} }
@ -120,7 +121,7 @@ func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSe
node.version = blockHeader.Version node.version = blockHeader.Version
node.bits = blockHeader.Bits node.bits = blockHeader.Bits
node.nonce = blockHeader.Nonce node.nonce = blockHeader.Nonce
node.timestamp = blockHeader.Timestamp.Unix() node.timestamp = blockHeader.Timestamp.UnixMilliseconds()
node.hashMerkleRoot = blockHeader.HashMerkleRoot node.hashMerkleRoot = blockHeader.HashMerkleRoot
node.acceptedIDMerkleRoot = blockHeader.AcceptedIDMerkleRoot node.acceptedIDMerkleRoot = blockHeader.AcceptedIDMerkleRoot
node.utxoCommitment = blockHeader.UTXOCommitment node.utxoCommitment = blockHeader.UTXOCommitment
@ -167,7 +168,7 @@ func (node *blockNode) Header() *wire.BlockHeader {
HashMerkleRoot: node.hashMerkleRoot, HashMerkleRoot: node.hashMerkleRoot,
AcceptedIDMerkleRoot: node.acceptedIDMerkleRoot, AcceptedIDMerkleRoot: node.acceptedIDMerkleRoot,
UTXOCommitment: node.utxoCommitment, UTXOCommitment: node.utxoCommitment,
Timestamp: time.Unix(node.timestamp, 0), Timestamp: node.time(),
Bits: node.bits, Bits: node.bits,
Nonce: node.nonce, Nonce: node.nonce,
} }
@ -204,13 +205,13 @@ func (node *blockNode) RelativeAncestor(distance uint64) *blockNode {
// prior to, and including, the block node. // prior to, and including, the block node.
// //
// This function is safe for concurrent access. // This function is safe for concurrent access.
func (node *blockNode) PastMedianTime(dag *BlockDAG) time.Time { func (node *blockNode) PastMedianTime(dag *BlockDAG) mstime.Time {
window := blueBlockWindow(node, 2*dag.TimestampDeviationTolerance-1) window := blueBlockWindow(node, 2*dag.TimestampDeviationTolerance-1)
medianTimestamp, err := window.medianTimestamp() medianTimestamp, err := window.medianTimestamp()
if err != nil { if err != nil {
panic(fmt.Sprintf("blueBlockWindow: %s", err)) panic(fmt.Sprintf("blueBlockWindow: %s", err))
} }
return time.Unix(medianTimestamp, 0) return mstime.UnixMilliseconds(medianTimestamp)
} }
func (node *blockNode) ParentHashes() []*daghash.Hash { func (node *blockNode) ParentHashes() []*daghash.Hash {
@ -223,10 +224,14 @@ func (node *blockNode) isGenesis() bool {
} }
func (node *blockNode) finalityScore(dag *BlockDAG) uint64 { func (node *blockNode) finalityScore(dag *BlockDAG) uint64 {
return node.blueScore / uint64(dag.dagParams.FinalityInterval) return node.blueScore / uint64(dag.FinalityInterval())
} }
// String returns a string that contains the block hash. // String returns a string that contains the block hash.
func (node blockNode) String() string { func (node blockNode) String() string {
return node.hash.String() return node.hash.String()
} }
func (node *blockNode) time() mstime.Time {
return mstime.UnixMilliseconds(node.timestamp)
}

View File

@ -51,12 +51,12 @@ func TestBlueBlockWindow(t *testing.T) {
expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"}, expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
}, },
{ {
parents: []string{"C", "D"}, parents: []string{"D", "C"},
id: "E", id: "E",
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"}, expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
}, },
{ {
parents: []string{"C", "D"}, parents: []string{"D", "C"},
id: "F", id: "F",
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"}, expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
}, },

View File

@ -4,6 +4,8 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"io"
"github.com/kaspanet/kaspad/dbaccess" "github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/coinbasepayload" "github.com/kaspanet/kaspad/util/coinbasepayload"
@ -12,7 +14,6 @@ import (
"github.com/kaspanet/kaspad/util/txsort" "github.com/kaspanet/kaspad/util/txsort"
"github.com/kaspanet/kaspad/wire" "github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors" "github.com/pkg/errors"
"io"
) )
// compactFeeData is a specialized data type to store a compact list of fees // compactFeeData is a specialized data type to store a compact list of fees
@ -75,11 +76,11 @@ func (cfr *compactFeeIterator) next() (uint64, error) {
// getBluesFeeData returns the compactFeeData for all nodes's blues, // getBluesFeeData returns the compactFeeData for all nodes's blues,
// used to calculate the fees this blockNode needs to pay // used to calculate the fees this blockNode needs to pay
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) { func (dag *BlockDAG) getBluesFeeData(node *blockNode) (map[daghash.Hash]compactFeeData, error) {
bluesFeeData := make(map[daghash.Hash]compactFeeData) bluesFeeData := make(map[daghash.Hash]compactFeeData)
for _, blueBlock := range node.blues { for _, blueBlock := range node.blues {
feeData, err := dbaccess.FetchFeeData(dbaccess.NoTx(), blueBlock.hash) feeData, err := dbaccess.FetchFeeData(dag.databaseContext, blueBlock.hash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -118,7 +119,7 @@ func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Bl
// expectedCoinbaseTransaction returns the coinbase transaction for the current block // expectedCoinbaseTransaction returns the coinbase transaction for the current block
func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) { func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) {
bluesFeeData, err := node.getBluesFeeData(dag) bluesFeeData, err := dag.getBluesFeeData(node)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -177,7 +178,7 @@ func coinbaseOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
} }
} }
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.dagParams) + totalFees totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.Params) + totalFees
if totalReward == 0 { if totalReward == 0 {
return nil, nil return nil, nil

View File

@ -12,7 +12,8 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"testing" "testing"
"time"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -86,7 +87,7 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
// TestSetCoinbaseMaturity makes the ability to set the coinbase maturity // TestSetCoinbaseMaturity makes the ability to set the coinbase maturity
// available when running tests. // available when running tests.
func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) { func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
dag.dagParams.BlockCoinbaseMaturity = maturity dag.Params.BlockCoinbaseMaturity = maturity
} }
// newTestDAG returns a DAG that is usable for syntetic tests. It is // newTestDAG returns a DAG that is usable for syntetic tests. It is
@ -95,11 +96,9 @@ func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
// use of it. // use of it.
func newTestDAG(params *dagconfig.Params) *BlockDAG { func newTestDAG(params *dagconfig.Params) *BlockDAG {
index := newBlockIndex(params) index := newBlockIndex(params)
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
dag := &BlockDAG{ dag := &BlockDAG{
dagParams: params, Params: params,
timeSource: NewTimeSource(), timeSource: NewTimeSource(),
targetTimePerBlock: targetTimePerBlock,
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize, difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
TimestampDeviationTolerance: params.TimestampDeviationTolerance, TimestampDeviationTolerance: params.TimestampDeviationTolerance,
powMaxBits: util.BigToCompact(params.PowMax), powMaxBits: util.BigToCompact(params.PowMax),
@ -119,7 +118,7 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
// newTestNode creates a block node connected to the passed parent with the // newTestNode creates a block node connected to the passed parent with the
// provided fields populated and fake values for the other fields. // provided fields populated and fake values for the other fields.
func newTestNode(dag *BlockDAG, parents blockSet, blockVersion int32, bits uint32, timestamp time.Time) *blockNode { func newTestNode(dag *BlockDAG, parents blockSet, blockVersion int32, bits uint32, timestamp mstime.Time) *blockNode {
// Make up a header and create a block node from it. // Make up a header and create a block node from it.
header := &wire.BlockHeader{ header := &wire.BlockHeader{
Version: blockVersion, Version: blockVersion,
@ -186,13 +185,13 @@ func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNod
} }
type fakeTimeSource struct { type fakeTimeSource struct {
time time.Time time mstime.Time
} }
func (fts *fakeTimeSource) Now() time.Time { func (fts *fakeTimeSource) Now() mstime.Time {
return time.Unix(fts.time.Unix(), 0) return fts.time
} }
func newFakeTimeSource(fakeTime time.Time) TimeSource { func newFakeTimeSource(fakeTime mstime.Time) TimeSource {
return &fakeTimeSource{time: fakeTime} return &fakeTimeSource{time: fakeTime}
} }

View File

@ -11,6 +11,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/dbaccess" "github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -30,7 +32,9 @@ const (
// queued. // queued.
maxOrphanBlocks = 100 maxOrphanBlocks = 100
isDAGCurrentMaxDiff = 12 * time.Hour // isDAGCurrentMaxDiff is the number of blocks from the network tips (estimated by timestamps) for the current
// to be considered not synced
isDAGCurrentMaxDiff = 40_000
) )
// orphanBlock represents a block that we don't yet have the parent for. It // orphanBlock represents a block that we don't yet have the parent for. It
@ -38,13 +42,13 @@ const (
// forever. // forever.
type orphanBlock struct { type orphanBlock struct {
block *util.Block block *util.Block
expiration time.Time expiration mstime.Time
} }
// delayedBlock represents a block which has a delayed timestamp and will be processed at processTime // delayedBlock represents a block which has a delayed timestamp and will be processed at processTime
type delayedBlock struct { type delayedBlock struct {
block *util.Block block *util.Block
processTime time.Time processTime mstime.Time
} }
// chainUpdates represents the updates made to the selected parent chain after // chainUpdates represents the updates made to the selected parent chain after
@ -61,17 +65,17 @@ type BlockDAG struct {
// The following fields are set when the instance is created and can't // The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a // be changed afterwards, so there is no need to protect them with a
// separate mutex. // separate mutex.
dagParams *dagconfig.Params Params *dagconfig.Params
timeSource TimeSource databaseContext *dbaccess.DatabaseContext
sigCache *txscript.SigCache timeSource TimeSource
indexManager IndexManager sigCache *txscript.SigCache
genesis *blockNode indexManager IndexManager
genesis *blockNode
// The following fields are calculated based upon the provided DAG // The following fields are calculated based upon the provided DAG
// parameters. They are also set when the instance is created and // parameters. They are also set when the instance is created and
// can't be changed afterwards, so there is no need to protect them with // can't be changed afterwards, so there is no need to protect them with
// a separate mutex. // a separate mutex.
targetTimePerBlock int64 // The target delay between blocks (in seconds)
difficultyAdjustmentWindowSize uint64 difficultyAdjustmentWindowSize uint64
TimestampDeviationTolerance uint64 TimestampDeviationTolerance uint64
@ -156,8 +160,106 @@ type BlockDAG struct {
reachabilityTree *reachabilityTree reachabilityTree *reachabilityTree
recentBlockProcessingTimestamps []time.Time recentBlockProcessingTimestamps []mstime.Time
startTime time.Time startTime mstime.Time
}
// New returns a BlockDAG instance using the provided configuration details.
func New(config *Config) (*BlockDAG, error) {
// Enforce required config fields.
if config.DAGParams == nil {
return nil, errors.New("BlockDAG.New DAG parameters nil")
}
if config.TimeSource == nil {
return nil, errors.New("BlockDAG.New timesource is nil")
}
if config.DatabaseContext == nil {
return nil, errors.New("BlockDAG.DatabaseContext timesource is nil")
}
params := config.DAGParams
index := newBlockIndex(params)
dag := &BlockDAG{
Params: params,
databaseContext: config.DatabaseContext,
timeSource: config.TimeSource,
sigCache: config.SigCache,
indexManager: config.IndexManager,
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
powMaxBits: util.BigToCompact(params.PowMax),
index: index,
orphans: make(map[daghash.Hash]*orphanBlock),
prevOrphans: make(map[daghash.Hash][]*orphanBlock),
delayedBlocks: make(map[daghash.Hash]*delayedBlock),
delayedBlocksQueue: newDelayedBlocksHeap(),
warningCaches: newThresholdCaches(vbNumBits),
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
blockCount: 0,
subnetworkID: config.SubnetworkID,
startTime: mstime.Now(),
}
dag.virtual = newVirtualBlock(dag, nil)
dag.utxoDiffStore = newUTXODiffStore(dag)
dag.multisetStore = newMultisetStore(dag)
dag.reachabilityTree = newReachabilityTree(dag)
// Initialize the DAG state from the passed database. When the db
// does not yet contain any DAG state, both it and the DAG state
// will be initialized to contain only the genesis block.
err := dag.initDAGState()
if err != nil {
return nil, err
}
// Initialize and catch up all of the currently active optional indexes
// as needed.
if config.IndexManager != nil {
err = config.IndexManager.Init(dag, dag.databaseContext)
if err != nil {
return nil, err
}
}
genesis, ok := index.LookupNode(params.GenesisHash)
if !ok {
genesisBlock := util.NewBlock(dag.Params.GenesisBlock)
// To prevent the creation of a new err variable unintentionally so the
// defered function above could read err - declare isOrphan and isDelayed explicitly.
var isOrphan, isDelayed bool
isOrphan, isDelayed, err = dag.ProcessBlock(genesisBlock, BFNone)
if err != nil {
return nil, err
}
if isDelayed {
return nil, errors.New("genesis block shouldn't be in the future")
}
if isOrphan {
return nil, errors.New("genesis block is unexpectedly orphan")
}
genesis, ok = index.LookupNode(params.GenesisHash)
if !ok {
return nil, errors.New("genesis is not found in the DAG after it was proccessed")
}
}
// Save a reference to the genesis block.
dag.genesis = genesis
// Initialize rule change threshold state caches.
err = dag.initThresholdCaches()
if err != nil {
return nil, err
}
selectedTip := dag.selectedTip()
log.Infof("DAG state (blue score %d, hash %s)",
selectedTip.blueScore, selectedTip.hash)
return dag, nil
} }
// IsKnownBlock returns whether or not the DAG instance has the block represented // IsKnownBlock returns whether or not the DAG instance has the block represented
@ -220,7 +322,7 @@ func (dag *BlockDAG) IsKnownInvalid(hash *daghash.Hash) bool {
// GetOrphanMissingAncestorHashes returns all of the missing parents in the orphan's sub-DAG // GetOrphanMissingAncestorHashes returns all of the missing parents in the orphan's sub-DAG
// //
// This function is safe for concurrent access. // This function is safe for concurrent access.
func (dag *BlockDAG) GetOrphanMissingAncestorHashes(orphanHash *daghash.Hash) ([]*daghash.Hash, error) { func (dag *BlockDAG) GetOrphanMissingAncestorHashes(orphanHash *daghash.Hash) []*daghash.Hash {
// Protect concurrent access. Using a read lock only so multiple // Protect concurrent access. Using a read lock only so multiple
// readers can query without blocking each other. // readers can query without blocking each other.
dag.orphanLock.RLock() dag.orphanLock.RLock()
@ -245,7 +347,7 @@ func (dag *BlockDAG) GetOrphanMissingAncestorHashes(orphanHash *daghash.Hash) ([
} }
} }
} }
return missingAncestorsHashes, nil return missingAncestorsHashes
} }
// removeOrphanBlock removes the passed orphan block from the orphan pool and // removeOrphanBlock removes the passed orphan block from the orphan pool and
@ -293,7 +395,7 @@ func (dag *BlockDAG) removeOrphanBlock(orphan *orphanBlock) {
func (dag *BlockDAG) addOrphanBlock(block *util.Block) { func (dag *BlockDAG) addOrphanBlock(block *util.Block) {
// Remove expired orphan blocks. // Remove expired orphan blocks.
for _, oBlock := range dag.orphans { for _, oBlock := range dag.orphans {
if time.Now().After(oBlock.expiration) { if mstime.Now().After(oBlock.expiration) {
dag.removeOrphanBlock(oBlock) dag.removeOrphanBlock(oBlock)
continue continue
} }
@ -325,7 +427,7 @@ func (dag *BlockDAG) addOrphanBlock(block *util.Block) {
// Insert the block into the orphan map with an expiration time // Insert the block into the orphan map with an expiration time
// 1 hour from now. // 1 hour from now.
expiration := time.Now().Add(time.Hour) expiration := mstime.Now().Add(time.Hour)
oBlock := &orphanBlock{ oBlock := &orphanBlock{
block: block, block: block,
expiration: expiration, expiration: expiration,
@ -345,7 +447,7 @@ func (dag *BlockDAG) addOrphanBlock(block *util.Block) {
// block either after 'seconds' (according to past median time), or once the // block either after 'seconds' (according to past median time), or once the
// 'BlockBlueScore' has been reached. // 'BlockBlueScore' has been reached.
type SequenceLock struct { type SequenceLock struct {
Seconds int64 Milliseconds int64
BlockBlueScore int64 BlockBlueScore int64
} }
@ -379,7 +481,7 @@ func (dag *BlockDAG) calcSequenceLock(node *blockNode, utxoSet UTXOSet, tx *util
// A value of -1 for each relative lock type represents a relative time // A value of -1 for each relative lock type represents a relative time
// lock value that will allow a transaction to be included in a block // lock value that will allow a transaction to be included in a block
// at any given height or time. // at any given height or time.
sequenceLock := &SequenceLock{Seconds: -1, BlockBlueScore: -1} sequenceLock := &SequenceLock{Milliseconds: -1, BlockBlueScore: -1}
// Sequence locks don't apply to coinbase transactions Therefore, we // Sequence locks don't apply to coinbase transactions Therefore, we
// return sequence lock values of -1 indicating that this transaction // return sequence lock values of -1 indicating that this transaction
@ -431,16 +533,15 @@ func (dag *BlockDAG) calcSequenceLock(node *blockNode, utxoSet UTXOSet, tx *util
} }
medianTime := blockNode.PastMedianTime(dag) medianTime := blockNode.PastMedianTime(dag)
// Time based relative time-locks as defined by BIP 68 // Time based relative time-locks have a time granularity of
// have a time granularity of RelativeLockSeconds, so // wire.SequenceLockTimeGranularity, so we shift left by this
// we shift left by this amount to convert to the // amount to convert to the proper relative time-lock. We also
// proper relative time-lock. We also subtract one from // subtract one from the relative lock to maintain the original
// the relative lock to maintain the original lockTime // lockTime semantics.
// semantics. timeLockMilliseconds := (relativeLock << wire.SequenceLockTimeGranularity) - 1
timeLockSeconds := (relativeLock << wire.SequenceLockTimeGranularity) - 1 timeLock := medianTime.UnixMilliseconds() + timeLockMilliseconds
timeLock := medianTime.Unix() + timeLockSeconds if timeLock > sequenceLock.Milliseconds {
if timeLock > sequenceLock.Seconds { sequenceLock.Milliseconds = timeLock
sequenceLock.Seconds = timeLock
} }
default: default:
// The relative lock-time for this input is expressed // The relative lock-time for this input is expressed
@ -459,18 +560,18 @@ func (dag *BlockDAG) calcSequenceLock(node *blockNode, utxoSet UTXOSet, tx *util
} }
// LockTimeToSequence converts the passed relative locktime to a sequence // LockTimeToSequence converts the passed relative locktime to a sequence
// number in accordance to BIP-68. // number.
func LockTimeToSequence(isSeconds bool, locktime uint64) uint64 { func LockTimeToSequence(isMilliseconds bool, locktime uint64) uint64 {
// If we're expressing the relative lock time in blocks, then the // If we're expressing the relative lock time in blocks, then the
// corresponding sequence number is simply the desired input age. // corresponding sequence number is simply the desired input age.
if !isSeconds { if !isMilliseconds {
return locktime return locktime
} }
// Set the 22nd bit which indicates the lock time is in seconds, then // Set the 22nd bit which indicates the lock time is in milliseconds, then
// shift the locktime over by 9 since the time granularity is in // shift the locktime over by 19 since the time granularity is in
// 512-second intervals (2^9). This results in a max lock-time of // 524288-millisecond intervals (2^19). This results in a max lock-time of
// 33,553,920 seconds, or 1.1 years. // 34,359,214,080 seconds, or 1.1 years.
return wire.SequenceLockTimeIsSeconds | return wire.SequenceLockTimeIsSeconds |
locktime>>wire.SequenceLockTimeGranularity locktime>>wire.SequenceLockTimeGranularity
} }
@ -492,7 +593,7 @@ func (dag *BlockDAG) addBlock(node *blockNode,
if errors.As(err, &RuleError{}) { if errors.As(err, &RuleError{}) {
dag.index.SetStatusFlags(node, statusValidateFailed) dag.index.SetStatusFlags(node, statusValidateFailed)
dbTx, err := dbaccess.NewTx() dbTx, err := dag.databaseContext.NewTx()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -685,7 +786,7 @@ func addTxToMultiset(ms *secp256k1.MultiSet, tx *wire.MsgTx, pastUTXO UTXOSet, b
func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UTXODiff, func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UTXODiff,
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData compactFeeData) error { txsAcceptanceData MultiBlockTxsAcceptanceData, feeData compactFeeData) error {
dbTx, err := dbaccess.NewTx() dbTx, err := dag.databaseContext.NewTx()
if err != nil { if err != nil {
return err return err
} }
@ -787,7 +888,7 @@ func (dag *BlockDAG) validateGasLimit(block *util.Block) error {
if !msgTx.SubnetworkID.IsEqual(currentSubnetworkID) { if !msgTx.SubnetworkID.IsEqual(currentSubnetworkID) {
currentSubnetworkID = &msgTx.SubnetworkID currentSubnetworkID = &msgTx.SubnetworkID
currentGasUsage = 0 currentGasUsage = 0
currentSubnetworkGasLimit, err = GasLimit(currentSubnetworkID) currentSubnetworkGasLimit, err = dag.GasLimit(currentSubnetworkID)
if err != nil { if err != nil {
return errors.Errorf("Error getting gas limit for subnetworkID '%s': %s", currentSubnetworkID, err) return errors.Errorf("Error getting gas limit for subnetworkID '%s': %s", currentSubnetworkID, err)
} }
@ -827,6 +928,11 @@ func (dag *BlockDAG) isInSelectedParentChainOf(node *blockNode, other *blockNode
return dag.reachabilityTree.isReachabilityTreeAncestorOf(node, other) return dag.reachabilityTree.isReachabilityTreeAncestorOf(node, other)
} }
// FinalityInterval is the interval that determines the finality window of the DAG.
func (dag *BlockDAG) FinalityInterval() uint64 {
return uint64(dag.Params.FinalityDuration / dag.Params.TargetTimePerBlock)
}
// checkFinalityViolation checks the new block does not violate the finality rules // checkFinalityViolation checks the new block does not violate the finality rules
// specifically - the new block selectedParent chain should contain the old finality point. // specifically - the new block selectedParent chain should contain the old finality point.
func (dag *BlockDAG) checkFinalityViolation(newNode *blockNode) error { func (dag *BlockDAG) checkFinalityViolation(newNode *blockNode) error {
@ -877,7 +983,7 @@ func (dag *BlockDAG) updateFinalityPoint() {
} }
} }
dag.lastFinalityPoint = currentNode dag.lastFinalityPoint = currentNode
spawn(func() { spawn("dag.finalizeNodesBelowFinalityPoint", func() {
dag.finalizeNodesBelowFinalityPoint(true) dag.finalizeNodesBelowFinalityPoint(true)
}) })
} }
@ -889,7 +995,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
} }
var nodesToDelete []*blockNode var nodesToDelete []*blockNode
if deleteDiffData { if deleteDiffData {
nodesToDelete = make([]*blockNode, 0, dag.dagParams.FinalityInterval) nodesToDelete = make([]*blockNode, 0, dag.FinalityInterval())
} }
for len(queue) > 0 { for len(queue) > 0 {
var current *blockNode var current *blockNode
@ -905,7 +1011,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
} }
} }
if deleteDiffData { if deleteDiffData {
err := dag.utxoDiffStore.removeBlocksDiffData(dbaccess.NoTx(), nodesToDelete) err := dag.utxoDiffStore.removeBlocksDiffData(dag.databaseContext, nodesToDelete)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error removing diff data from utxoDiffStore: %s", err)) panic(fmt.Sprintf("Error removing diff data from utxoDiffStore: %s", err))
} }
@ -1136,10 +1242,10 @@ func genesisPastUTXO(virtual *virtualBlock) UTXOSet {
return genesisPastUTXO return genesisPastUTXO
} }
func (node *blockNode) fetchBlueBlocks() ([]*util.Block, error) { func (dag *BlockDAG) fetchBlueBlocks(node *blockNode) ([]*util.Block, error) {
blueBlocks := make([]*util.Block, len(node.blues)) blueBlocks := make([]*util.Block, len(node.blues))
for i, blueBlockNode := range node.blues { for i, blueBlockNode := range node.blues {
blueBlock, err := fetchBlockByHash(dbaccess.NoTx(), blueBlockNode.hash) blueBlock, err := dag.fetchBlockByHash(blueBlockNode.hash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1253,7 +1359,7 @@ func (dag *BlockDAG) pastUTXO(node *blockNode) (
return nil, nil, nil, err return nil, nil, nil, err
} }
blueBlocks, err := node.fetchBlueBlocks() blueBlocks, err := dag.fetchBlueBlocks(node)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@ -1341,18 +1447,18 @@ func (dag *BlockDAG) isSynced() bool {
var dagTimestamp int64 var dagTimestamp int64
selectedTip := dag.selectedTip() selectedTip := dag.selectedTip()
if selectedTip == nil { if selectedTip == nil {
dagTimestamp = dag.dagParams.GenesisBlock.Header.Timestamp.Unix() dagTimestamp = dag.Params.GenesisBlock.Header.Timestamp.UnixMilliseconds()
} else { } else {
dagTimestamp = selectedTip.timestamp dagTimestamp = selectedTip.timestamp
} }
dagTime := time.Unix(dagTimestamp, 0) dagTime := mstime.UnixMilliseconds(dagTimestamp)
return dag.Now().Sub(dagTime) <= isDAGCurrentMaxDiff return dag.Now().Sub(dagTime) <= isDAGCurrentMaxDiff*dag.Params.TargetTimePerBlock
} }
// Now returns the adjusted time according to // Now returns the adjusted time according to
// dag.timeSource. See TimeSource.Now for // dag.timeSource. See TimeSource.Now for
// more details. // more details.
func (dag *BlockDAG) Now() time.Time { func (dag *BlockDAG) Now() mstime.Time {
return dag.timeSource.Now() return dag.timeSource.Now()
} }
@ -1407,7 +1513,7 @@ func (dag *BlockDAG) UTXOSet() *FullUTXOSet {
} }
// CalcPastMedianTime returns the past median time of the DAG. // CalcPastMedianTime returns the past median time of the DAG.
func (dag *BlockDAG) CalcPastMedianTime() time.Time { func (dag *BlockDAG) CalcPastMedianTime() mstime.Time {
return dag.virtual.tips().bluest().PastMedianTime(dag) return dag.virtual.tips().bluest().PastMedianTime(dag)
} }
@ -1583,7 +1689,7 @@ func (dag *BlockDAG) IsInSelectedParentChain(blockHash *daghash.Hash) (bool, err
blockNode, ok := dag.index.LookupNode(blockHash) blockNode, ok := dag.index.LookupNode(blockHash)
if !ok { if !ok {
str := fmt.Sprintf("block %s is not in the DAG", blockHash) str := fmt.Sprintf("block %s is not in the DAG", blockHash)
return false, errNotInDAG(str) return false, ErrNotInDAG(str)
} }
return dag.virtual.selectedParentChainSet.contains(blockNode), nil return dag.virtual.selectedParentChainSet.contains(blockNode), nil
} }
@ -1696,7 +1802,7 @@ func (dag *BlockDAG) ChildHashesByHash(hash *daghash.Hash) ([]*daghash.Hash, err
node, ok := dag.index.LookupNode(hash) node, ok := dag.index.LookupNode(hash)
if !ok { if !ok {
str := fmt.Sprintf("block %s is not in the DAG", hash) str := fmt.Sprintf("block %s is not in the DAG", hash)
return nil, errNotInDAG(str) return nil, ErrNotInDAG(str)
} }
@ -1711,7 +1817,7 @@ func (dag *BlockDAG) SelectedParentHash(blockHash *daghash.Hash) (*daghash.Hash,
node, ok := dag.index.LookupNode(blockHash) node, ok := dag.index.LookupNode(blockHash)
if !ok { if !ok {
str := fmt.Sprintf("block %s is not in the DAG", blockHash) str := fmt.Sprintf("block %s is not in the DAG", blockHash)
return nil, errNotInDAG(str) return nil, ErrNotInDAG(str)
} }
@ -1978,7 +2084,7 @@ func (dag *BlockDAG) peekDelayedBlock() *delayedBlock {
type IndexManager interface { type IndexManager interface {
// Init is invoked during DAG initialize in order to allow the index // Init is invoked during DAG initialize in order to allow the index
// manager to initialize itself and any indexes it is managing. // manager to initialize itself and any indexes it is managing.
Init(*BlockDAG) error Init(*BlockDAG, *dbaccess.DatabaseContext) error
// ConnectBlock is invoked when a new block has been connected to the // ConnectBlock is invoked when a new block has been connected to the
// DAG. // DAG.
@ -2025,102 +2131,10 @@ type Config struct {
// //
// This field is required. // This field is required.
SubnetworkID *subnetworkid.SubnetworkID SubnetworkID *subnetworkid.SubnetworkID
}
// New returns a BlockDAG instance using the provided configuration details. // DatabaseContext is the context in which all database queries related to
func New(config *Config) (*BlockDAG, error) { // this DAG are going to run.
// Enforce required config fields. DatabaseContext *dbaccess.DatabaseContext
if config.DAGParams == nil {
return nil, errors.New("BlockDAG.New DAG parameters nil")
}
if config.TimeSource == nil {
return nil, errors.New("BlockDAG.New timesource is nil")
}
params := config.DAGParams
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
index := newBlockIndex(params)
dag := &BlockDAG{
dagParams: params,
timeSource: config.TimeSource,
sigCache: config.SigCache,
indexManager: config.IndexManager,
targetTimePerBlock: targetTimePerBlock,
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
powMaxBits: util.BigToCompact(params.PowMax),
index: index,
orphans: make(map[daghash.Hash]*orphanBlock),
prevOrphans: make(map[daghash.Hash][]*orphanBlock),
delayedBlocks: make(map[daghash.Hash]*delayedBlock),
delayedBlocksQueue: newDelayedBlocksHeap(),
warningCaches: newThresholdCaches(vbNumBits),
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
blockCount: 0,
subnetworkID: config.SubnetworkID,
startTime: time.Now(),
}
dag.virtual = newVirtualBlock(dag, nil)
dag.utxoDiffStore = newUTXODiffStore(dag)
dag.multisetStore = newMultisetStore(dag)
dag.reachabilityTree = newReachabilityTree(dag)
// Initialize the DAG state from the passed database. When the db
// does not yet contain any DAG state, both it and the DAG state
// will be initialized to contain only the genesis block.
err := dag.initDAGState()
if err != nil {
return nil, err
}
// Initialize and catch up all of the currently active optional indexes
// as needed.
if config.IndexManager != nil {
err = config.IndexManager.Init(dag)
if err != nil {
return nil, err
}
}
genesis, ok := index.LookupNode(params.GenesisHash)
if !ok {
genesisBlock := util.NewBlock(dag.dagParams.GenesisBlock)
// To prevent the creation of a new err variable unintentionally so the
// defered function above could read err - declare isOrphan and isDelayed explicitly.
var isOrphan, isDelayed bool
isOrphan, isDelayed, err = dag.ProcessBlock(genesisBlock, BFNone)
if err != nil {
return nil, err
}
if isDelayed {
return nil, errors.New("genesis block shouldn't be in the future")
}
if isOrphan {
return nil, errors.New("genesis block is unexpectedly orphan")
}
genesis, ok = index.LookupNode(params.GenesisHash)
if !ok {
return nil, errors.New("genesis is not found in the DAG after it was proccessed")
}
}
// Save a reference to the genesis block.
dag.genesis = genesis
// Initialize rule change threshold state caches.
err = dag.initThresholdCaches()
if err != nil {
return nil, err
}
selectedTip := dag.selectedTip()
log.Infof("DAG state (blue score %d, hash %s)",
selectedTip.blueScore, selectedTip.hash)
return dag, nil
} }
func (dag *BlockDAG) isKnownDelayedBlock(hash *daghash.Hash) bool { func (dag *BlockDAG) isKnownDelayedBlock(hash *daghash.Hash) bool {

View File

@ -6,9 +6,6 @@ package blockdag
import ( import (
"fmt" "fmt"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
@ -16,6 +13,10 @@ import (
"testing" "testing"
"time" "time"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/txscript" "github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
@ -207,10 +208,10 @@ func TestIsKnownBlock(t *testing.T) {
{hash: dagconfig.SimnetParams.GenesisHash.String(), want: true}, {hash: dagconfig.SimnetParams.GenesisHash.String(), want: true},
// Block 3b should be present (as a second child of Block 2). // Block 3b should be present (as a second child of Block 2).
{hash: "2eb8903d3eb7f977ab329649f56f4125afa532662f7afe5dba0d4a3f1b93746f", want: true}, {hash: "46314ca17e117b31b467fe1b26fd36c98ee83e750aa5e3b3c1c32870afbe5984", want: true},
// Block 100000 should be present (as an orphan). // Block 100000 should be present (as an orphan).
{hash: "65b20b048a074793ebfd1196e49341c8d194dabfc6b44a4fd0c607406e122baf", want: true}, {hash: "732c891529619d43b5aeb3df42ba25dea483a8c0aded1cf585751ebabea28f29", want: true},
// Random hashes should not be available. // Random hashes should not be available.
{hash: "123", want: false}, {hash: "123", want: false},
@ -278,13 +279,13 @@ func TestCalcSequenceLock(t *testing.T) {
// Obtain the past median time from the PoV of the input created above. // Obtain the past median time from the PoV of the input created above.
// The past median time for the input is the past median time from the PoV // The past median time for the input is the past median time from the PoV
// of the block *prior* to the one that included it. // of the block *prior* to the one that included it.
medianTime := node.RelativeAncestor(5).PastMedianTime(dag).Unix() medianTime := node.RelativeAncestor(5).PastMedianTime(dag).UnixMilliseconds()
// The median time calculated from the PoV of the best block in the // The median time calculated from the PoV of the best block in the
// test DAG. For unconfirmed inputs, this value will be used since // test DAG. For unconfirmed inputs, this value will be used since
// the MTP will be calculated from the PoV of the yet-to-be-mined // the MTP will be calculated from the PoV of the yet-to-be-mined
// block. // block.
nextMedianTime := node.PastMedianTime(dag).Unix() nextMedianTime := node.PastMedianTime(dag).UnixMilliseconds()
nextBlockBlueScore := int32(numBlocksToGenerate) + 1 nextBlockBlueScore := int32(numBlocksToGenerate) + 1
// Add an additional transaction which will serve as our unconfirmed // Add an additional transaction which will serve as our unconfirmed
@ -315,35 +316,34 @@ func TestCalcSequenceLock(t *testing.T) {
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: wire.MaxTxInSequenceNum}}, nil), tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: wire.MaxTxInSequenceNum}}, nil),
utxoSet: utxoSet, utxoSet: utxoSet,
want: &SequenceLock{ want: &SequenceLock{
Seconds: -1, Milliseconds: -1,
BlockBlueScore: -1, BlockBlueScore: -1,
}, },
}, },
// A transaction with a single input whose lock time is // A transaction with a single input whose lock time is
// expressed in seconds. However, the specified lock time is // expressed in seconds. However, the specified lock time is
// below the required floor for time based lock times since // below the required floor for time based lock times since
// they have time granularity of 512 seconds. As a result, the // they have time granularity of 524288 milliseconds. As a result, the
// seconds lock-time should be just before the median time of // milliseconds lock-time should be just before the median time of
// the targeted block. // the targeted block.
{ {
name: "single input, seconds lock time below time granularity", name: "single input, milliseconds lock time below time granularity",
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(true, 2)}}, nil), tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(true, 2)}}, nil),
utxoSet: utxoSet, utxoSet: utxoSet,
want: &SequenceLock{ want: &SequenceLock{
Seconds: medianTime - 1, Milliseconds: medianTime - 1,
BlockBlueScore: -1, BlockBlueScore: -1,
}, },
}, },
// A transaction with a single input whose lock time is // A transaction with a single input whose lock time is
// expressed in seconds. The number of seconds should be 1023 // expressed in seconds. The number of seconds should be 1048575
// seconds after the median past time of the last block in the // milliseconds after the median past time of the DAG.
// chain.
{ {
name: "single input, 1023 seconds after median time", name: "single input, 1048575 milliseconds after median time",
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(true, 1024)}}, nil), tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(true, 1048576)}}, nil),
utxoSet: utxoSet, utxoSet: utxoSet,
want: &SequenceLock{ want: &SequenceLock{
Seconds: medianTime + 1023, Milliseconds: medianTime + 1048575,
BlockBlueScore: -1, BlockBlueScore: -1,
}, },
}, },
@ -358,7 +358,7 @@ func TestCalcSequenceLock(t *testing.T) {
tx: wire.NewNativeMsgTx(1, tx: wire.NewNativeMsgTx(1,
[]*wire.TxIn{{ []*wire.TxIn{{
PreviousOutpoint: utxo, PreviousOutpoint: utxo,
Sequence: LockTimeToSequence(true, 2560), Sequence: LockTimeToSequence(true, 2621440),
}, { }, {
PreviousOutpoint: utxo, PreviousOutpoint: utxo,
Sequence: LockTimeToSequence(false, 4), Sequence: LockTimeToSequence(false, 4),
@ -370,7 +370,7 @@ func TestCalcSequenceLock(t *testing.T) {
nil), nil),
utxoSet: utxoSet, utxoSet: utxoSet,
want: &SequenceLock{ want: &SequenceLock{
Seconds: medianTime + (5 << wire.SequenceLockTimeGranularity) - 1, Milliseconds: medianTime + (5 << wire.SequenceLockTimeGranularity) - 1,
BlockBlueScore: int64(prevUtxoBlueScore) + 3, BlockBlueScore: int64(prevUtxoBlueScore) + 3,
}, },
}, },
@ -383,7 +383,7 @@ func TestCalcSequenceLock(t *testing.T) {
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(false, 3)}}, nil), tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: utxo, Sequence: LockTimeToSequence(false, 3)}}, nil),
utxoSet: utxoSet, utxoSet: utxoSet,
want: &SequenceLock{ want: &SequenceLock{
Seconds: -1, Milliseconds: -1,
BlockBlueScore: int64(prevUtxoBlueScore) + 2, BlockBlueScore: int64(prevUtxoBlueScore) + 2,
}, },
}, },
@ -394,14 +394,14 @@ func TestCalcSequenceLock(t *testing.T) {
name: "two inputs, lock-times in seconds", name: "two inputs, lock-times in seconds",
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{ tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{
PreviousOutpoint: utxo, PreviousOutpoint: utxo,
Sequence: LockTimeToSequence(true, 5120), Sequence: LockTimeToSequence(true, 5242880),
}, { }, {
PreviousOutpoint: utxo, PreviousOutpoint: utxo,
Sequence: LockTimeToSequence(true, 2560), Sequence: LockTimeToSequence(true, 2621440),
}}, nil), }}, nil),
utxoSet: utxoSet, utxoSet: utxoSet,
want: &SequenceLock{ want: &SequenceLock{
Seconds: medianTime + (10 << wire.SequenceLockTimeGranularity) - 1, Milliseconds: medianTime + (10 << wire.SequenceLockTimeGranularity) - 1,
BlockBlueScore: -1, BlockBlueScore: -1,
}, },
}, },
@ -422,7 +422,7 @@ func TestCalcSequenceLock(t *testing.T) {
nil), nil),
utxoSet: utxoSet, utxoSet: utxoSet,
want: &SequenceLock{ want: &SequenceLock{
Seconds: -1, Milliseconds: -1,
BlockBlueScore: int64(prevUtxoBlueScore) + 10, BlockBlueScore: int64(prevUtxoBlueScore) + 10,
}, },
}, },
@ -434,10 +434,10 @@ func TestCalcSequenceLock(t *testing.T) {
tx: wire.NewNativeMsgTx(1, tx: wire.NewNativeMsgTx(1,
[]*wire.TxIn{{ []*wire.TxIn{{
PreviousOutpoint: utxo, PreviousOutpoint: utxo,
Sequence: LockTimeToSequence(true, 2560), Sequence: LockTimeToSequence(true, 2621440),
}, { }, {
PreviousOutpoint: utxo, PreviousOutpoint: utxo,
Sequence: LockTimeToSequence(true, 6656), Sequence: LockTimeToSequence(true, 6815744),
}, { }, {
PreviousOutpoint: utxo, PreviousOutpoint: utxo,
Sequence: LockTimeToSequence(false, 3), Sequence: LockTimeToSequence(false, 3),
@ -448,7 +448,7 @@ func TestCalcSequenceLock(t *testing.T) {
nil), nil),
utxoSet: utxoSet, utxoSet: utxoSet,
want: &SequenceLock{ want: &SequenceLock{
Seconds: medianTime + (13 << wire.SequenceLockTimeGranularity) - 1, Milliseconds: medianTime + (13 << wire.SequenceLockTimeGranularity) - 1,
BlockBlueScore: int64(prevUtxoBlueScore) + 8, BlockBlueScore: int64(prevUtxoBlueScore) + 8,
}, },
}, },
@ -464,7 +464,7 @@ func TestCalcSequenceLock(t *testing.T) {
utxoSet: utxoSet, utxoSet: utxoSet,
mempool: true, mempool: true,
want: &SequenceLock{ want: &SequenceLock{
Seconds: -1, Milliseconds: -1,
BlockBlueScore: int64(nextBlockBlueScore) + 1, BlockBlueScore: int64(nextBlockBlueScore) + 1,
}, },
}, },
@ -472,12 +472,12 @@ func TestCalcSequenceLock(t *testing.T) {
// a time based lock, so the lock time should be based off the // a time based lock, so the lock time should be based off the
// MTP of the *next* block. // MTP of the *next* block.
{ {
name: "single input, unconfirmed, lock-time in seoncds", name: "single input, unconfirmed, lock-time in milliseoncds",
tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: unConfUtxo, Sequence: LockTimeToSequence(true, 1024)}}, nil), tx: wire.NewNativeMsgTx(1, []*wire.TxIn{{PreviousOutpoint: unConfUtxo, Sequence: LockTimeToSequence(true, 1048576)}}, nil),
utxoSet: utxoSet, utxoSet: utxoSet,
mempool: true, mempool: true,
want: &SequenceLock{ want: &SequenceLock{
Seconds: nextMedianTime + 1023, Milliseconds: nextMedianTime + 1048575,
BlockBlueScore: -1, BlockBlueScore: -1,
}, },
}, },
@ -491,9 +491,9 @@ func TestCalcSequenceLock(t *testing.T) {
t.Fatalf("test '%s', unable to calc sequence lock: %v", test.name, err) t.Fatalf("test '%s', unable to calc sequence lock: %v", test.name, err)
} }
if seqLock.Seconds != test.want.Seconds { if seqLock.Milliseconds != test.want.Milliseconds {
t.Fatalf("test '%s' got %v seconds want %v seconds", t.Fatalf("test '%s' got %v milliseconds want %v milliseconds",
test.name, seqLock.Seconds, test.want.Seconds) test.name, seqLock.Milliseconds, test.want.Milliseconds)
} }
if seqLock.BlockBlueScore != test.want.BlockBlueScore { if seqLock.BlockBlueScore != test.want.BlockBlueScore {
t.Fatalf("test '%s' got blue score of %v want blue score of %v ", t.Fatalf("test '%s' got blue score of %v want blue score of %v ",
@ -519,31 +519,35 @@ func TestCalcPastMedianTime(t *testing.T) {
} }
tests := []struct { tests := []struct {
blockNumber uint32 blockNumber uint32
expectedSecondsSinceGenesis int64 expectedMillisecondsSinceGenesis int64
}{ }{
{ {
blockNumber: 262, blockNumber: 262,
expectedSecondsSinceGenesis: 130, expectedMillisecondsSinceGenesis: 130000,
}, },
{ {
blockNumber: 270, blockNumber: 270,
expectedSecondsSinceGenesis: 138, expectedMillisecondsSinceGenesis: 138000,
}, },
{ {
blockNumber: 240, blockNumber: 240,
expectedSecondsSinceGenesis: 108, expectedMillisecondsSinceGenesis: 108000,
}, },
{ {
blockNumber: 5, blockNumber: 5,
expectedSecondsSinceGenesis: 0, expectedMillisecondsSinceGenesis: 0,
}, },
} }
for _, test := range tests { for _, test := range tests {
secondsSinceGenesis := nodes[test.blockNumber].PastMedianTime(dag).Unix() - dag.genesis.Header().Timestamp.Unix() millisecondsSinceGenesis := nodes[test.blockNumber].PastMedianTime(dag).UnixMilliseconds() -
if secondsSinceGenesis != test.expectedSecondsSinceGenesis { dag.genesis.Header().Timestamp.UnixMilliseconds()
t.Errorf("TestCalcPastMedianTime: expected past median time of block %v to be %v seconds from genesis but got %v", test.blockNumber, test.expectedSecondsSinceGenesis, secondsSinceGenesis)
if millisecondsSinceGenesis != test.expectedMillisecondsSinceGenesis {
t.Errorf("TestCalcPastMedianTime: expected past median time of block %v to be %v milliseconds "+
"from genesis but got %v",
test.blockNumber, test.expectedMillisecondsSinceGenesis, millisecondsSinceGenesis)
} }
} }
} }
@ -553,18 +557,19 @@ func TestNew(t *testing.T) {
dbPath := filepath.Join(tempDir, "TestNew") dbPath := filepath.Join(tempDir, "TestNew")
_ = os.RemoveAll(dbPath) _ = os.RemoveAll(dbPath)
err := dbaccess.Open(dbPath) databaseContext, err := dbaccess.New(dbPath)
if err != nil { if err != nil {
t.Fatalf("error creating db: %s", err) t.Fatalf("error creating db: %s", err)
} }
defer func() { defer func() {
dbaccess.Close() databaseContext.Close()
os.RemoveAll(dbPath) os.RemoveAll(dbPath)
}() }()
config := &Config{ config := &Config{
DAGParams: &dagconfig.SimnetParams, DatabaseContext: databaseContext,
TimeSource: NewTimeSource(), DAGParams: &dagconfig.SimnetParams,
SigCache: txscript.NewSigCache(1000), TimeSource: NewTimeSource(),
SigCache: txscript.NewSigCache(1000),
} }
_, err = New(config) _, err = New(config)
if err != nil { if err != nil {
@ -592,20 +597,21 @@ func TestAcceptingInInit(t *testing.T) {
// Create a test database // Create a test database
dbPath := filepath.Join(tempDir, "TestAcceptingInInit") dbPath := filepath.Join(tempDir, "TestAcceptingInInit")
_ = os.RemoveAll(dbPath) _ = os.RemoveAll(dbPath)
err := dbaccess.Open(dbPath) databaseContext, err := dbaccess.New(dbPath)
if err != nil { if err != nil {
t.Fatalf("error creating db: %s", err) t.Fatalf("error creating db: %s", err)
} }
defer func() { defer func() {
dbaccess.Close() databaseContext.Close()
os.RemoveAll(dbPath) os.RemoveAll(dbPath)
}() }()
// Create a DAG to add the test block into // Create a DAG to add the test block into
config := &Config{ config := &Config{
DAGParams: &dagconfig.SimnetParams, DatabaseContext: databaseContext,
TimeSource: NewTimeSource(), DAGParams: &dagconfig.SimnetParams,
SigCache: txscript.NewSigCache(1000), TimeSource: NewTimeSource(),
SigCache: txscript.NewSigCache(1000),
} }
dag, err := New(config) dag, err := New(config)
if err != nil { if err != nil {
@ -629,7 +635,7 @@ func TestAcceptingInInit(t *testing.T) {
testNode.status = statusDataStored testNode.status = statusDataStored
// Manually add the test block to the database // Manually add the test block to the database
dbTx, err := dbaccess.NewTx() dbTx, err := databaseContext.NewTx()
if err != nil { if err != nil {
t.Fatalf("Failed to open database "+ t.Fatalf("Failed to open database "+
"transaction: %s", err) "transaction: %s", err)
@ -696,7 +702,7 @@ func TestConfirmations(t *testing.T) {
// Add a chain of blocks // Add a chain of blocks
chainBlocks := make([]*wire.MsgBlock, 5) chainBlocks := make([]*wire.MsgBlock, 5)
chainBlocks[0] = dag.dagParams.GenesisBlock chainBlocks[0] = dag.Params.GenesisBlock
for i := uint32(1); i < 5; i++ { for i := uint32(1); i < 5; i++ {
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1]) chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
} }
@ -805,7 +811,7 @@ func TestAcceptingBlock(t *testing.T) {
numChainBlocks := uint32(10) numChainBlocks := uint32(10)
chainBlocks := make([]*wire.MsgBlock, numChainBlocks) chainBlocks := make([]*wire.MsgBlock, numChainBlocks)
chainBlocks[0] = dag.dagParams.GenesisBlock chainBlocks[0] = dag.Params.GenesisBlock
for i := uint32(1); i <= numChainBlocks-1; i++ { for i := uint32(1); i <= numChainBlocks-1; i++ {
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1]) chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
} }
@ -921,7 +927,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
blockTime := dag.genesis.Header().Timestamp blockTime := dag.genesis.Header().Timestamp
flushUTXODiffStore := func() { flushUTXODiffStore := func() {
dbTx, err := dbaccess.NewTx() dbTx, err := dag.databaseContext.NewTx()
if err != nil { if err != nil {
t.Fatalf("Failed to open database transaction: %s", err) t.Fatalf("Failed to open database transaction: %s", err)
} }
@ -951,7 +957,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
flushUTXODiffStore() flushUTXODiffStore()
return node return node
} }
finalityInterval := dag.dagParams.FinalityInterval finalityInterval := dag.FinalityInterval()
nodes := make([]*blockNode, 0, finalityInterval) nodes := make([]*blockNode, 0, finalityInterval)
currentNode := dag.genesis currentNode := dag.genesis
nodes = append(nodes, currentNode) nodes = append(nodes, currentNode)
@ -1127,7 +1133,7 @@ func TestIsDAGCurrentMaxDiff(t *testing.T) {
&dagconfig.SimnetParams, &dagconfig.SimnetParams,
} }
for _, params := range netParams { for _, params := range netParams {
if params.TargetTimePerBlock*time.Duration(params.FinalityInterval) < isDAGCurrentMaxDiff { if params.FinalityDuration < isDAGCurrentMaxDiff*params.TargetTimePerBlock {
t.Errorf("in %s, a DAG can be considered current even if it's below the finality point", params.Name) t.Errorf("in %s, a DAG can be considered current even if it's below the finality point", params.Name)
} }
} }

View File

@ -28,19 +28,19 @@ var (
byteOrder = binary.LittleEndian byteOrder = binary.LittleEndian
) )
// errNotInDAG signifies that a block hash or height that is not in the // ErrNotInDAG signifies that a block hash that is not in the
// DAG was requested. // DAG was requested.
type errNotInDAG string type ErrNotInDAG string
// Error implements the error interface. // Error implements the error interface.
func (e errNotInDAG) Error() string { func (e ErrNotInDAG) Error() string {
return string(e) return string(e)
} }
// isNotInDAGErr returns whether or not the passed error is an // IsNotInDAGErr returns whether or not the passed error is an
// errNotInDAG error. // ErrNotInDAG error.
func isNotInDAGErr(err error) bool { func IsNotInDAGErr(err error) bool {
var notInDAGErr errNotInDAG var notInDAGErr ErrNotInDAG
return errors.As(err, &notInDAGErr) return errors.As(err, &notInDAGErr)
} }
@ -164,9 +164,9 @@ func saveDAGState(dbContext dbaccess.Context, state *dagState) error {
// createDAGState initializes the DAG state to the // createDAGState initializes the DAG state to the
// genesis block and the node's local subnetwork id. // genesis block and the node's local subnetwork id.
func (dag *BlockDAG) createDAGState(localSubnetworkID *subnetworkid.SubnetworkID) error { func (dag *BlockDAG) createDAGState(localSubnetworkID *subnetworkid.SubnetworkID) error {
return saveDAGState(dbaccess.NoTx(), &dagState{ return saveDAGState(dag.databaseContext, &dagState{
TipHashes: []*daghash.Hash{dag.dagParams.GenesisHash}, TipHashes: []*daghash.Hash{dag.Params.GenesisHash},
LastFinalityPoint: dag.dagParams.GenesisHash, LastFinalityPoint: dag.Params.GenesisHash,
LocalSubnetworkID: localSubnetworkID, LocalSubnetworkID: localSubnetworkID,
}) })
} }
@ -177,7 +177,7 @@ func (dag *BlockDAG) createDAGState(localSubnetworkID *subnetworkid.SubnetworkID
func (dag *BlockDAG) initDAGState() error { func (dag *BlockDAG) initDAGState() error {
// Fetch the stored DAG state from the database. If it doesn't exist, // Fetch the stored DAG state from the database. If it doesn't exist,
// it means that kaspad is running for the first time. // it means that kaspad is running for the first time.
serializedDAGState, err := dbaccess.FetchDAGState(dbaccess.NoTx()) serializedDAGState, err := dbaccess.FetchDAGState(dag.databaseContext)
if dbaccess.IsNotFoundError(err) { if dbaccess.IsNotFoundError(err) {
// Initialize the database and the DAG state to the genesis block. // Initialize the database and the DAG state to the genesis block.
return dag.createDAGState(dag.subnetworkID) return dag.createDAGState(dag.subnetworkID)
@ -209,13 +209,13 @@ func (dag *BlockDAG) initDAGState() error {
} }
log.Debugf("Loading reachability data...") log.Debugf("Loading reachability data...")
err = dag.reachabilityTree.init(dbaccess.NoTx()) err = dag.reachabilityTree.init(dag.databaseContext)
if err != nil { if err != nil {
return err return err
} }
log.Debugf("Loading multiset data...") log.Debugf("Loading multiset data...")
err = dag.multisetStore.init(dbaccess.NoTx()) err = dag.multisetStore.init(dag.databaseContext)
if err != nil { if err != nil {
return err return err
} }
@ -263,7 +263,7 @@ func (dag *BlockDAG) validateLocalSubnetworkID(state *dagState) error {
} }
func (dag *BlockDAG) initBlockIndex() (unprocessedBlockNodes []*blockNode, err error) { func (dag *BlockDAG) initBlockIndex() (unprocessedBlockNodes []*blockNode, err error) {
blockIndexCursor, err := dbaccess.BlockIndexCursor(dbaccess.NoTx()) blockIndexCursor, err := dbaccess.BlockIndexCursor(dag.databaseContext)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -293,7 +293,7 @@ func (dag *BlockDAG) initBlockIndex() (unprocessedBlockNodes []*blockNode, err e
} }
if dag.blockCount == 0 { if dag.blockCount == 0 {
if !node.hash.IsEqual(dag.dagParams.GenesisHash) { if !node.hash.IsEqual(dag.Params.GenesisHash) {
return nil, errors.Errorf("Expected "+ return nil, errors.Errorf("Expected "+
"first entry in block index to be genesis block, "+ "first entry in block index to be genesis block, "+
"found %s", node.hash) "found %s", node.hash)
@ -317,7 +317,7 @@ func (dag *BlockDAG) initBlockIndex() (unprocessedBlockNodes []*blockNode, err e
func (dag *BlockDAG) initUTXOSet() (fullUTXOCollection utxoCollection, err error) { func (dag *BlockDAG) initUTXOSet() (fullUTXOCollection utxoCollection, err error) {
fullUTXOCollection = make(utxoCollection) fullUTXOCollection = make(utxoCollection)
cursor, err := dbaccess.UTXOSetCursor(dbaccess.NoTx()) cursor, err := dbaccess.UTXOSetCursor(dag.databaseContext)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -368,7 +368,7 @@ func (dag *BlockDAG) processUnprocessedBlockNodes(unprocessedBlockNodes []*block
for _, node := range unprocessedBlockNodes { for _, node := range unprocessedBlockNodes {
// Check to see if the block exists in the block DB. If it // Check to see if the block exists in the block DB. If it
// doesn't, the database has certainly been corrupted. // doesn't, the database has certainly been corrupted.
blockExists, err := dbaccess.HasBlock(dbaccess.NoTx(), node.hash) blockExists, err := dbaccess.HasBlock(dag.databaseContext, node.hash)
if err != nil { if err != nil {
return errors.Wrapf(err, "HasBlock "+ return errors.Wrapf(err, "HasBlock "+
"for block %s failed: %s", node.hash, err) "for block %s failed: %s", node.hash, err)
@ -379,7 +379,7 @@ func (dag *BlockDAG) processUnprocessedBlockNodes(unprocessedBlockNodes []*block
} }
// Attempt to accept the block. // Attempt to accept the block.
block, err := fetchBlockByHash(dbaccess.NoTx(), node.hash) block, err := dag.fetchBlockByHash(node.hash)
if err != nil { if err != nil {
return err return err
} }
@ -421,7 +421,7 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
version: header.Version, version: header.Version,
bits: header.Bits, bits: header.Bits,
nonce: header.Nonce, nonce: header.Nonce,
timestamp: header.Timestamp.Unix(), timestamp: header.Timestamp.UnixMilliseconds(),
hashMerkleRoot: header.HashMerkleRoot, hashMerkleRoot: header.HashMerkleRoot,
acceptedIDMerkleRoot: header.AcceptedIDMerkleRoot, acceptedIDMerkleRoot: header.AcceptedIDMerkleRoot,
utxoCommitment: header.UTXOCommitment, utxoCommitment: header.UTXOCommitment,
@ -510,8 +510,8 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
// fetchBlockByHash retrieves the raw block for the provided hash, // fetchBlockByHash retrieves the raw block for the provided hash,
// deserializes it, and returns a util.Block of it. // deserializes it, and returns a util.Block of it.
func fetchBlockByHash(dbContext dbaccess.Context, hash *daghash.Hash) (*util.Block, error) { func (dag *BlockDAG) fetchBlockByHash(hash *daghash.Hash) (*util.Block, error) {
blockBytes, err := dbaccess.FetchBlock(dbContext, hash) blockBytes, err := dbaccess.FetchBlock(dag.databaseContext, hash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -607,10 +607,10 @@ func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
node, ok := dag.index.LookupNode(hash) node, ok := dag.index.LookupNode(hash)
if !ok { if !ok {
str := fmt.Sprintf("block %s is not in the DAG", hash) str := fmt.Sprintf("block %s is not in the DAG", hash)
return nil, errNotInDAG(str) return nil, ErrNotInDAG(str)
} }
block, err := fetchBlockByHash(dbaccess.NoTx(), node.hash) block, err := dag.fetchBlockByHash(node.hash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -639,7 +639,7 @@ func (dag *BlockDAG) BlockHashesFrom(lowHash *daghash.Hash, limit int) ([]*dagha
} }
key := blockIndexKey(lowHash, blueScore) key := blockIndexKey(lowHash, blueScore)
cursor, err := dbaccess.BlockIndexCursorFrom(dbaccess.NoTx(), key) cursor, err := dbaccess.BlockIndexCursorFrom(dag.databaseContext, key)
if dbaccess.IsNotFoundError(err) { if dbaccess.IsNotFoundError(err) {
return nil, errors.Wrapf(err, "block %s not in block index", lowHash) return nil, errors.Wrapf(err, "block %s not in block index", lowHash)
} }

View File

@ -14,25 +14,25 @@ import (
"github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/util/daghash"
) )
// TestErrNotInDAG ensures the functions related to errNotInDAG work // TestErrNotInDAG ensures the functions related to ErrNotInDAG work
// as expected. // as expected.
func TestErrNotInDAG(t *testing.T) { func TestErrNotInDAG(t *testing.T) {
errStr := "no block at height 1 exists" errStr := "no block at height 1 exists"
err := error(errNotInDAG(errStr)) err := error(ErrNotInDAG(errStr))
// Ensure the stringized output for the error is as expected. // Ensure the stringized output for the error is as expected.
if err.Error() != errStr { if err.Error() != errStr {
t.Fatalf("errNotInDAG retuned unexpected error string - "+ t.Fatalf("ErrNotInDAG retuned unexpected error string - "+
"got %q, want %q", err.Error(), errStr) "got %q, want %q", err.Error(), errStr)
} }
// Ensure error is detected as the correct type. // Ensure error is detected as the correct type.
if !isNotInDAGErr(err) { if !IsNotInDAGErr(err) {
t.Fatalf("isNotInDAGErr did not detect as expected type") t.Fatalf("IsNotInDAGErr did not detect as expected type")
} }
err = errors.New("something else") err = errors.New("something else")
if isNotInDAGErr(err) { if IsNotInDAGErr(err) {
t.Fatalf("isNotInDAGErr detected incorrect type") t.Fatalf("IsNotInDAGErr detected incorrect type")
} }
} }

View File

@ -6,14 +6,14 @@ package blockdag
import ( import (
"github.com/kaspanet/kaspad/util/bigintpool" "github.com/kaspanet/kaspad/util/bigintpool"
"time" "github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
) )
// requiredDifficulty calculates the required difficulty for a // requiredDifficulty calculates the required difficulty for a
// block given its bluest parent. // block given its bluest parent.
func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime time.Time) uint32 { func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime mstime.Time) uint32 {
// Genesis block. // Genesis block.
if bluestParent == nil || bluestParent.blueScore < dag.difficultyAdjustmentWindowSize+1 { if bluestParent == nil || bluestParent.blueScore < dag.difficultyAdjustmentWindowSize+1 {
return dag.powMaxBits return dag.powMaxBits
@ -34,7 +34,7 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ti
defer bigintpool.Release(newTarget) defer bigintpool.Release(newTarget)
windowTimeStampDifference := bigintpool.Acquire(windowMaxTimeStamp - windowMinTimestamp) windowTimeStampDifference := bigintpool.Acquire(windowMaxTimeStamp - windowMinTimestamp)
defer bigintpool.Release(windowTimeStampDifference) defer bigintpool.Release(windowTimeStampDifference)
targetTimePerBlock := bigintpool.Acquire(dag.targetTimePerBlock) targetTimePerBlock := bigintpool.Acquire(dag.Params.TargetTimePerBlock.Milliseconds())
defer bigintpool.Release(targetTimePerBlock) defer bigintpool.Release(targetTimePerBlock)
difficultyAdjustmentWindowSize := bigintpool.Acquire(int64(dag.difficultyAdjustmentWindowSize)) difficultyAdjustmentWindowSize := bigintpool.Acquire(int64(dag.difficultyAdjustmentWindowSize))
defer bigintpool.Release(difficultyAdjustmentWindowSize) defer bigintpool.Release(difficultyAdjustmentWindowSize)
@ -44,7 +44,7 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ti
Mul(newTarget, windowTimeStampDifference). Mul(newTarget, windowTimeStampDifference).
Div(newTarget, targetTimePerBlock). Div(newTarget, targetTimePerBlock).
Div(newTarget, difficultyAdjustmentWindowSize) Div(newTarget, difficultyAdjustmentWindowSize)
if newTarget.Cmp(dag.dagParams.PowMax) > 0 { if newTarget.Cmp(dag.Params.PowMax) > 0 {
return dag.powMaxBits return dag.powMaxBits
} }
newTargetBits := util.BigToCompact(newTarget) newTargetBits := util.BigToCompact(newTarget)
@ -55,7 +55,7 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ti
// be built on top of the current tips. // be built on top of the current tips.
// //
// This function is safe for concurrent access. // This function is safe for concurrent access.
func (dag *BlockDAG) NextRequiredDifficulty(timestamp time.Time) uint32 { func (dag *BlockDAG) NextRequiredDifficulty(timestamp mstime.Time) uint32 {
difficulty := dag.requiredDifficulty(dag.virtual.parents.bluest(), timestamp) difficulty := dag.requiredDifficulty(dag.virtual.parents.bluest(), timestamp)
return difficulty return difficulty
} }

View File

@ -6,6 +6,7 @@ package blockdag
import ( import (
"github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/mstime"
"math/big" "math/big"
"testing" "testing"
"time" "time"
@ -90,11 +91,11 @@ func TestDifficulty(t *testing.T) {
} }
defer teardownFunc() defer teardownFunc()
zeroTime := time.Unix(0, 0) zeroTime := mstime.Time{}
addNode := func(parents blockSet, blockTime time.Time) *blockNode { addNode := func(parents blockSet, blockTime mstime.Time) *blockNode {
bluestParent := parents.bluest() bluestParent := parents.bluest()
if blockTime == zeroTime { if blockTime.IsZero() {
blockTime = time.Unix(bluestParent.timestamp, 0) blockTime = bluestParent.time()
blockTime = blockTime.Add(params.TargetTimePerBlock) blockTime = blockTime.Add(params.TargetTimePerBlock)
} }
block, err := PrepareBlockForTest(dag, parents.hashes(), nil) block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
@ -174,7 +175,7 @@ func TestDifficulty(t *testing.T) {
sameBitsCount = 0 sameBitsCount = 0
} }
} }
slowBlockTime := time.Unix(tip.timestamp, 0) slowBlockTime := tip.time()
slowBlockTime = slowBlockTime.Add(params.TargetTimePerBlock + time.Second) slowBlockTime = slowBlockTime.Add(params.TargetTimePerBlock + time.Second)
slowNode := addNode(blockSetFromSlice(tip), slowBlockTime) slowNode := addNode(blockSetFromSlice(tip), slowBlockTime)
if slowNode.bits != tip.bits { if slowNode.bits != tip.bits {

View File

@ -28,11 +28,6 @@ const (
// to a newer version. // to a newer version.
ErrBlockVersionTooOld ErrBlockVersionTooOld
// ErrInvalidTime indicates the time in the passed block has a precision
// that is more than one second. The DAG consensus rules require
// timestamps to have a maximum precision of one second.
ErrInvalidTime
// ErrTimeTooOld indicates the time is either before the median time of // ErrTimeTooOld indicates the time is either before the median time of
// the last several blocks per the DAG consensus rules. // the last several blocks per the DAG consensus rules.
ErrTimeTooOld ErrTimeTooOld
@ -211,6 +206,10 @@ const (
// ErrDelayedBlockIsNotAllowed indicates that a block with a delayed timestamp was // ErrDelayedBlockIsNotAllowed indicates that a block with a delayed timestamp was
// submitted with BFDisallowDelay flag raised. // submitted with BFDisallowDelay flag raised.
ErrDelayedBlockIsNotAllowed ErrDelayedBlockIsNotAllowed
// ErrOrphanBlockIsNotAllowed indicates that an orphan block was submitted with
// BFDisallowOrphans flag raised.
ErrOrphanBlockIsNotAllowed
) )
// Map of ErrorCode values back to their constant names for pretty printing. // Map of ErrorCode values back to their constant names for pretty printing.
@ -218,7 +217,6 @@ var errorCodeStrings = map[ErrorCode]string{
ErrDuplicateBlock: "ErrDuplicateBlock", ErrDuplicateBlock: "ErrDuplicateBlock",
ErrBlockMassTooHigh: "ErrBlockMassTooHigh", ErrBlockMassTooHigh: "ErrBlockMassTooHigh",
ErrBlockVersionTooOld: "ErrBlockVersionTooOld", ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
ErrInvalidTime: "ErrInvalidTime",
ErrTimeTooOld: "ErrTimeTooOld", ErrTimeTooOld: "ErrTimeTooOld",
ErrTimeTooNew: "ErrTimeTooNew", ErrTimeTooNew: "ErrTimeTooNew",
ErrNoParents: "ErrNoParents", ErrNoParents: "ErrNoParents",
@ -260,6 +258,7 @@ var errorCodeStrings = map[ErrorCode]string{
ErrInvalidPayloadHash: "ErrInvalidPayloadHash", ErrInvalidPayloadHash: "ErrInvalidPayloadHash",
ErrInvalidParentsRelation: "ErrInvalidParentsRelation", ErrInvalidParentsRelation: "ErrInvalidParentsRelation",
ErrDelayedBlockIsNotAllowed: "ErrDelayedBlockIsNotAllowed", ErrDelayedBlockIsNotAllowed: "ErrDelayedBlockIsNotAllowed",
ErrOrphanBlockIsNotAllowed: "ErrOrphanBlockIsNotAllowed",
} }
// String returns the ErrorCode as a human-readable name. // String returns the ErrorCode as a human-readable name.

View File

@ -17,7 +17,6 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrDuplicateBlock, "ErrDuplicateBlock"}, {ErrDuplicateBlock, "ErrDuplicateBlock"},
{ErrBlockMassTooHigh, "ErrBlockMassTooHigh"}, {ErrBlockMassTooHigh, "ErrBlockMassTooHigh"},
{ErrBlockVersionTooOld, "ErrBlockVersionTooOld"}, {ErrBlockVersionTooOld, "ErrBlockVersionTooOld"},
{ErrInvalidTime, "ErrInvalidTime"},
{ErrTimeTooOld, "ErrTimeTooOld"}, {ErrTimeTooOld, "ErrTimeTooOld"},
{ErrTimeTooNew, "ErrTimeTooNew"}, {ErrTimeTooNew, "ErrTimeTooNew"},
{ErrNoParents, "ErrNoParents"}, {ErrNoParents, "ErrNoParents"},
@ -58,6 +57,7 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrInvalidPayloadHash, "ErrInvalidPayloadHash"}, {ErrInvalidPayloadHash, "ErrInvalidPayloadHash"},
{ErrInvalidParentsRelation, "ErrInvalidParentsRelation"}, {ErrInvalidParentsRelation, "ErrInvalidParentsRelation"},
{ErrDelayedBlockIsNotAllowed, "ErrDelayedBlockIsNotAllowed"}, {ErrDelayedBlockIsNotAllowed, "ErrDelayedBlockIsNotAllowed"},
{ErrOrphanBlockIsNotAllowed, "ErrOrphanBlockIsNotAllowed"},
{0xffff, "Unknown ErrorCode (65535)"}, {0xffff, "Unknown ErrorCode (65535)"},
} }

View File

@ -2,11 +2,12 @@ package blockdag_test
import ( import (
"fmt" "fmt"
"github.com/pkg/errors"
"math" "math"
"strings" "strings"
"testing" "testing"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util/subnetworkid" "github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/util/daghash"
@ -40,7 +41,7 @@ import (
func TestFinality(t *testing.T) { func TestFinality(t *testing.T) {
params := dagconfig.SimnetParams params := dagconfig.SimnetParams
params.K = 1 params.K = 1
params.FinalityInterval = 100 params.FinalityDuration = 100 * params.TargetTimePerBlock
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", true, blockdag.Config{ dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", true, blockdag.Config{
DAGParams: &params, DAGParams: &params,
}) })
@ -49,7 +50,7 @@ func TestFinality(t *testing.T) {
} }
defer teardownFunc() defer teardownFunc()
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) { buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
msgBlock, err := mining.PrepareBlockForTest(dag, &params, parentHashes, nil, false) msgBlock, err := mining.PrepareBlockForTest(dag, parentHashes, nil, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -74,7 +75,7 @@ func TestFinality(t *testing.T) {
currentNode := genesis currentNode := genesis
// First we build a chain of params.FinalityInterval blocks for future use // First we build a chain of params.FinalityInterval blocks for future use
for i := uint64(0); i < params.FinalityInterval; i++ { for i := uint64(0); i < dag.FinalityInterval(); i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()}) currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil { if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err) t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@ -86,7 +87,7 @@ func TestFinality(t *testing.T) {
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and // Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
// we expect the block with height 1 * params.FinalityInterval to be the last finality point // we expect the block with height 1 * params.FinalityInterval to be the last finality point
currentNode = genesis currentNode = genesis
for i := uint64(0); i < params.FinalityInterval; i++ { for i := uint64(0); i < dag.FinalityInterval(); i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()}) currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil { if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err) t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@ -95,7 +96,7 @@ func TestFinality(t *testing.T) {
expectedFinalityPoint := currentNode expectedFinalityPoint := currentNode
for i := uint64(0); i < params.FinalityInterval; i++ { for i := uint64(0); i < dag.FinalityInterval(); i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()}) currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil { if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err) t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@ -175,9 +176,19 @@ func TestFinalityInterval(t *testing.T) {
&dagconfig.SimnetParams, &dagconfig.SimnetParams,
} }
for _, params := range netParams { for _, params := range netParams {
if params.FinalityInterval > wire.MaxInvPerMsg { func() {
t.Errorf("FinalityInterval in %s should be lower or equal to wire.MaxInvPerMsg", params.Name) dag, teardownFunc, err := blockdag.DAGSetup("TestFinalityInterval", true, blockdag.Config{
} DAGParams: params,
})
if err != nil {
t.Fatalf("Failed to setup dag instance for %s: %v", params.Name, err)
}
defer teardownFunc()
if dag.FinalityInterval() > wire.MaxInvPerMsg {
t.Errorf("FinalityInterval in %s should be lower or equal to wire.MaxInvPerMsg", params.Name)
}
}()
} }
} }
@ -200,7 +211,7 @@ func TestSubnetworkRegistry(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("could not register network: %s", err) t.Fatalf("could not register network: %s", err)
} }
limit, err := blockdag.GasLimit(subnetworkID) limit, err := dag.GasLimit(subnetworkID)
if err != nil { if err != nil {
t.Fatalf("could not retrieve gas limit: %s", err) t.Fatalf("could not retrieve gas limit: %s", err)
} }
@ -221,7 +232,7 @@ func TestChainedTransactions(t *testing.T) {
} }
defer teardownFunc() defer teardownFunc()
block1, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{params.GenesisHash}, nil, false) block1, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{params.GenesisHash}, nil, false)
if err != nil { if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err) t.Fatalf("PrepareBlockForTest: %v", err)
} }
@ -269,7 +280,7 @@ func TestChainedTransactions(t *testing.T) {
} }
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut}) chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx}, false) block2, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx}, false)
if err != nil { if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err) t.Fatalf("PrepareBlockForTest: %v", err)
} }
@ -315,7 +326,7 @@ func TestChainedTransactions(t *testing.T) {
} }
nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut}) nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut})
block3, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false) block3, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false)
if err != nil { if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err) t.Fatalf("PrepareBlockForTest: %v", err)
} }
@ -372,7 +383,7 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
} }
// Create the block // Create the block
msgBlock, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{previousBlock.Hash()}, txs, false) msgBlock, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{previousBlock.Hash()}, txs, false)
if err != nil { if err != nil {
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %s", err) t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %s", err)
} }
@ -429,7 +440,7 @@ func TestGasLimit(t *testing.T) {
cbTxs := []*wire.MsgTx{} cbTxs := []*wire.MsgTx{}
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
fundsBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), nil, false) fundsBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), nil, false)
if err != nil { if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err) t.Fatalf("PrepareBlockForTest: %v", err)
} }
@ -481,7 +492,7 @@ func TestGasLimit(t *testing.T) {
tx2 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx2In}, []*wire.TxOut{tx2Out}, subnetworkID, 10000, []byte{}) tx2 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx2In}, []*wire.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
// Here we check that we can't process a block that has transactions that exceed the gas limit // Here we check that we can't process a block that has transactions that exceed the gas limit
overLimitBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true) overLimitBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true)
if err != nil { if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err) t.Fatalf("PrepareBlockForTest: %v", err)
} }
@ -516,7 +527,7 @@ func TestGasLimit(t *testing.T) {
subnetworkID, math.MaxUint64, []byte{}) subnetworkID, math.MaxUint64, []byte{})
// Here we check that we can't process a block that its transactions' gas overflows uint64 // Here we check that we can't process a block that its transactions' gas overflows uint64
overflowGasBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true) overflowGasBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true)
if err != nil { if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err) t.Fatalf("PrepareBlockForTest: %v", err)
} }
@ -550,7 +561,7 @@ func TestGasLimit(t *testing.T) {
nonExistentSubnetworkTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{nonExistentSubnetworkTxIn}, nonExistentSubnetworkTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{nonExistentSubnetworkTxIn},
[]*wire.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{}) []*wire.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true) nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
if err != nil { if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err) t.Fatalf("PrepareBlockForTest: %v", err)
} }
@ -571,7 +582,7 @@ func TestGasLimit(t *testing.T) {
} }
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit // Here we check that we can process a block with a transaction that doesn't exceed the gas limit
validBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1}, true) validBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
if err != nil { if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err) t.Fatalf("PrepareBlockForTest: %v", err)
} }

View File

@ -78,13 +78,13 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
} }
candidateAnticoneSize++ candidateAnticoneSize++
if candidateAnticoneSize > dag.dagParams.K { if candidateAnticoneSize > dag.Params.K {
// k-cluster violation: The candidate's blue anticone exceeded k // k-cluster violation: The candidate's blue anticone exceeded k
possiblyBlue = false possiblyBlue = false
break break
} }
if candidateBluesAnticoneSizes[block] == dag.dagParams.K { if candidateBluesAnticoneSizes[block] == dag.Params.K {
// k-cluster violation: A block in candidate's blue anticone already // k-cluster violation: A block in candidate's blue anticone already
// has k blue blocks in its own anticone // has k blue blocks in its own anticone
possiblyBlue = false possiblyBlue = false
@ -93,7 +93,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
// This is a sanity check that validates that a blue // This is a sanity check that validates that a blue
// block's blue anticone is not already larger than K. // block's blue anticone is not already larger than K.
if candidateBluesAnticoneSizes[block] > dag.dagParams.K { if candidateBluesAnticoneSizes[block] > dag.Params.K {
return nil, errors.New("found blue anticone size larger than k") return nil, errors.New("found blue anticone size larger than k")
} }
} }
@ -109,7 +109,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
// The maximum length of node.blues can be K+1 because // The maximum length of node.blues can be K+1 because
// it contains the selected parent. // it contains the selected parent.
if dagconfig.KType(len(newNode.blues)) == dag.dagParams.K+1 { if dagconfig.KType(len(newNode.blues)) == dag.Params.K+1 {
break break
} }
} }

View File

@ -2,14 +2,15 @@ package blockdag
import ( import (
"fmt" "fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"reflect" "reflect"
"sort" "sort"
"strings" "strings"
"testing" "testing"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
) )
type testBlockData struct { type testBlockData struct {
@ -33,7 +34,7 @@ func TestGHOSTDAG(t *testing.T) {
}{ }{
{ {
k: 3, k: 3,
expectedReds: []string{"F", "G", "H", "I", "N", "Q"}, expectedReds: []string{"F", "G", "H", "I", "N", "P"},
dagData: []*testBlockData{ dagData: []*testBlockData{
{ {
parents: []string{"A"}, parents: []string{"A"},
@ -166,7 +167,7 @@ func TestGHOSTDAG(t *testing.T) {
id: "T", id: "T",
expectedScore: 13, expectedScore: 13,
expectedSelectedParent: "S", expectedSelectedParent: "S",
expectedBlues: []string{"S", "O", "P"}, expectedBlues: []string{"S", "O", "Q"},
}, },
}, },
}, },
@ -294,15 +295,15 @@ func TestBlueAnticoneSizeErrors(t *testing.T) {
defer teardownFunc() defer teardownFunc()
// Prepare a block chain with size K beginning with the genesis block // Prepare a block chain with size K beginning with the genesis block
currentBlockA := dag.dagParams.GenesisBlock currentBlockA := dag.Params.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ { for i := dagconfig.KType(0); i < dag.Params.K; i++ {
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockA) newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockA)
currentBlockA = newBlock currentBlockA = newBlock
} }
// Prepare another block chain with size K beginning with the genesis block // Prepare another block chain with size K beginning with the genesis block
currentBlockB := dag.dagParams.GenesisBlock currentBlockB := dag.Params.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ { for i := dagconfig.KType(0); i < dag.Params.K; i++ {
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockB) newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockB)
currentBlockB = newBlock currentBlockB = newBlock
} }
@ -342,8 +343,8 @@ func TestGHOSTDAGErrors(t *testing.T) {
defer teardownFunc() defer teardownFunc()
// Add two child blocks to the genesis // Add two child blocks to the genesis
block1 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock) block1 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
block2 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock) block2 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
// Add a child block to the previous two blocks // Add a child block to the previous two blocks
block3 := prepareAndProcessBlockByParentMsgBlocks(t, dag, block1, block2) block3 := prepareAndProcessBlockByParentMsgBlocks(t, dag, block1, block2)
@ -351,7 +352,7 @@ func TestGHOSTDAGErrors(t *testing.T) {
// Clear the reachability store // Clear the reachability store
dag.reachabilityTree.store.loaded = map[daghash.Hash]*reachabilityData{} dag.reachabilityTree.store.loaded = map[daghash.Hash]*reachabilityData{}
dbTx, err := dbaccess.NewTx() dbTx, err := dag.databaseContext.NewTx()
if err != nil { if err != nil {
t.Fatalf("NewTx: %s", err) t.Fatalf("NewTx: %s", err)
} }

View File

@ -3,6 +3,7 @@ package indexers
import ( import (
"bytes" "bytes"
"encoding/gob" "encoding/gob"
"github.com/kaspanet/kaspad/blockdag" "github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dbaccess" "github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
@ -14,7 +15,8 @@ import (
// it stores a mapping between a block's hash and the set of transactions that the // it stores a mapping between a block's hash and the set of transactions that the
// block accepts among its blue blocks. // block accepts among its blue blocks.
type AcceptanceIndex struct { type AcceptanceIndex struct {
dag *blockdag.BlockDAG dag *blockdag.BlockDAG
databaseContext *dbaccess.DatabaseContext
} }
// Ensure the AcceptanceIndex type implements the Indexer interface. // Ensure the AcceptanceIndex type implements the Indexer interface.
@ -31,8 +33,8 @@ func NewAcceptanceIndex() *AcceptanceIndex {
} }
// DropAcceptanceIndex drops the acceptance index. // DropAcceptanceIndex drops the acceptance index.
func DropAcceptanceIndex() error { func DropAcceptanceIndex(databaseContext *dbaccess.DatabaseContext) error {
dbTx, err := dbaccess.NewTx() dbTx, err := databaseContext.NewTx()
if err != nil { if err != nil {
return err return err
} }
@ -49,8 +51,9 @@ func DropAcceptanceIndex() error {
// Init initializes the hash-based acceptance index. // Init initializes the hash-based acceptance index.
// //
// This is part of the Indexer interface. // This is part of the Indexer interface.
func (idx *AcceptanceIndex) Init(dag *blockdag.BlockDAG) error { func (idx *AcceptanceIndex) Init(dag *blockdag.BlockDAG, databaseContext *dbaccess.DatabaseContext) error {
idx.dag = dag idx.dag = dag
idx.databaseContext = databaseContext
return idx.recover() return idx.recover()
} }
@ -60,7 +63,7 @@ func (idx *AcceptanceIndex) Init(dag *blockdag.BlockDAG) error {
// This is part of the Indexer interface. // This is part of the Indexer interface.
func (idx *AcceptanceIndex) recover() error { func (idx *AcceptanceIndex) recover() error {
return idx.dag.ForEachHash(func(hash daghash.Hash) error { return idx.dag.ForEachHash(func(hash daghash.Hash) error {
dbTx, err := dbaccess.NewTx() dbTx, err := idx.databaseContext.NewTx()
if err != nil { if err != nil {
return err return err
} }
@ -102,7 +105,7 @@ func (idx *AcceptanceIndex) ConnectBlock(dbContext *dbaccess.TxContext, blockHas
// TxsAcceptanceData returns the acceptance data of all the transactions that // TxsAcceptanceData returns the acceptance data of all the transactions that
// were accepted by the block with hash blockHash. // were accepted by the block with hash blockHash.
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) { func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
serializedTxsAcceptanceData, err := dbaccess.FetchAcceptanceData(dbaccess.NoTx(), blockHash) serializedTxsAcceptanceData, err := dbaccess.FetchAcceptanceData(idx.databaseContext, blockHash)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,13 +1,6 @@
package indexers package indexers
import ( import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -15,6 +8,14 @@ import (
"reflect" "reflect"
"syscall" "syscall"
"testing" "testing"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
) )
func TestAcceptanceIndexSerializationAndDeserialization(t *testing.T) { func TestAcceptanceIndexSerializationAndDeserialization(t *testing.T) {
@ -96,14 +97,15 @@ func TestAcceptanceIndexRecover(t *testing.T) {
} }
defer os.RemoveAll(db1Path) defer os.RemoveAll(db1Path)
err = dbaccess.Open(db1Path) databaseContext1, err := dbaccess.New(db1Path)
if err != nil { if err != nil {
t.Fatalf("error creating db: %s", err) t.Fatalf("error creating db: %s", err)
} }
db1Config := blockdag.Config{ db1Config := blockdag.Config{
IndexManager: db1IndexManager, IndexManager: db1IndexManager,
DAGParams: params, DAGParams: params,
DatabaseContext: databaseContext1,
} }
db1DAG, teardown, err := blockdag.DAGSetup("", false, db1Config) db1DAG, teardown, err := blockdag.DAGSetup("", false, db1Config)
@ -160,17 +162,18 @@ func TestAcceptanceIndexRecover(t *testing.T) {
t.Fatalf("Error fetching acceptance data: %s", err) t.Fatalf("Error fetching acceptance data: %s", err)
} }
err = dbaccess.Close() err = databaseContext1.Close()
if err != nil { if err != nil {
t.Fatalf("Error closing the database: %s", err) t.Fatalf("Error closing the database: %s", err)
} }
err = dbaccess.Open(db2Path) databaseContext2, err := dbaccess.New(db2Path)
if err != nil { if err != nil {
t.Fatalf("error creating db: %s", err) t.Fatalf("error creating db: %s", err)
} }
db2Config := blockdag.Config{ db2Config := blockdag.Config{
DAGParams: params, DAGParams: params,
DatabaseContext: databaseContext2,
} }
db2DAG, teardown, err := blockdag.DAGSetup("", false, db2Config) db2DAG, teardown, err := blockdag.DAGSetup("", false, db2Config)
@ -206,11 +209,11 @@ func TestAcceptanceIndexRecover(t *testing.T) {
t.Fatalf("copyDirectory: %s", err) t.Fatalf("copyDirectory: %s", err)
} }
err = dbaccess.Close() err = databaseContext2.Close()
if err != nil { if err != nil {
t.Fatalf("Error closing the database: %s", err) t.Fatalf("Error closing the database: %s", err)
} }
err = dbaccess.Open(db3Path) databaseContext3, err := dbaccess.New(db3Path)
if err != nil { if err != nil {
t.Fatalf("error creating db: %s", err) t.Fatalf("error creating db: %s", err)
} }
@ -218,8 +221,9 @@ func TestAcceptanceIndexRecover(t *testing.T) {
db3AcceptanceIndex := NewAcceptanceIndex() db3AcceptanceIndex := NewAcceptanceIndex()
db3IndexManager := NewManager([]Indexer{db3AcceptanceIndex}) db3IndexManager := NewManager([]Indexer{db3AcceptanceIndex})
db3Config := blockdag.Config{ db3Config := blockdag.Config{
IndexManager: db3IndexManager, IndexManager: db3IndexManager,
DAGParams: params, DAGParams: params,
DatabaseContext: databaseContext3,
} }
_, teardown, err = blockdag.DAGSetup("", false, db3Config) _, teardown, err = blockdag.DAGSetup("", false, db3Config)

View File

@ -18,7 +18,7 @@ import (
type Indexer interface { type Indexer interface {
// Init is invoked when the index manager is first initializing the // Init is invoked when the index manager is first initializing the
// index. // index.
Init(dag *blockdag.BlockDAG) error Init(dag *blockdag.BlockDAG, databaseContext *dbaccess.DatabaseContext) error
// ConnectBlock is invoked when the index manager is notified that a new // ConnectBlock is invoked when the index manager is notified that a new
// block has been connected to the DAG. // block has been connected to the DAG.

View File

@ -22,9 +22,9 @@ var _ blockdag.IndexManager = (*Manager)(nil)
// Init initializes the enabled indexes. // Init initializes the enabled indexes.
// This is part of the blockdag.IndexManager interface. // This is part of the blockdag.IndexManager interface.
func (m *Manager) Init(dag *blockdag.BlockDAG) error { func (m *Manager) Init(dag *blockdag.BlockDAG, databaseContext *dbaccess.DatabaseContext) error {
for _, indexer := range m.enabledIndexes { for _, indexer := range m.enabledIndexes {
if err := indexer.Init(dag); err != nil { if err := indexer.Init(dag, databaseContext); err != nil {
return err return err
} }
} }

View File

@ -3,12 +3,14 @@ package blockdag
import ( import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"time"
"github.com/kaspanet/go-secp256k1" "github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/txscript" "github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/wire" "github.com/kaspanet/kaspad/wire"
"time"
) )
// BlockForMining returns a block with the given transactions // BlockForMining returns a block with the given transactions
@ -104,19 +106,19 @@ func (dag *BlockDAG) NextCoinbaseFromAddress(payToAddress util.Address, extraDat
// on the end of the DAG. In particular, it is one second after // on the end of the DAG. In particular, it is one second after
// the median timestamp of the last several blocks per the DAG consensus // the median timestamp of the last several blocks per the DAG consensus
// rules. // rules.
func (dag *BlockDAG) NextBlockMinimumTime() time.Time { func (dag *BlockDAG) NextBlockMinimumTime() mstime.Time {
return dag.CalcPastMedianTime().Add(time.Second) return dag.CalcPastMedianTime().Add(time.Millisecond)
} }
// NextBlockTime returns a valid block time for the // NextBlockTime returns a valid block time for the
// next block that will point to the existing DAG tips. // next block that will point to the existing DAG tips.
func (dag *BlockDAG) NextBlockTime() time.Time { func (dag *BlockDAG) NextBlockTime() mstime.Time {
// The timestamp for the block must not be before the median timestamp // The timestamp for the block must not be before the median timestamp
// of the last several blocks. Thus, choose the maximum between the // of the last several blocks. Thus, choose the maximum between the
// current time and one second after the past median time. The current // current time and one second after the past median time. The current
// timestamp is truncated to a second boundary before comparison since a // timestamp is truncated to a millisecond boundary before comparison since a
// block timestamp does not supported a precision greater than one // block timestamp does not supported a precision greater than one
// second. // millisecond.
newTimestamp := dag.Now() newTimestamp := dag.Now()
minTimestamp := dag.NextBlockMinimumTime() minTimestamp := dag.NextBlockMinimumTime()
if newTimestamp.Before(minTimestamp) { if newTimestamp.Before(minTimestamp) {

View File

@ -6,9 +6,10 @@ package blockdag
import ( import (
"fmt" "fmt"
"time"
"github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors" "github.com/pkg/errors"
"time"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/util/daghash"
@ -49,6 +50,10 @@ const (
// This is used for the case where a block is submitted through RPC. // This is used for the case where a block is submitted through RPC.
BFDisallowDelay BFDisallowDelay
// BFDisallowOrphans is set to indicate that an orphan block should be rejected.
// This is used for the case where a block is submitted through RPC.
BFDisallowOrphans
// BFNone is a convenience value to specifically indicate no flags. // BFNone is a convenience value to specifically indicate no flags.
BFNone BehaviorFlags = 0 BFNone BehaviorFlags = 0
) )
@ -151,6 +156,7 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
isAfterDelay := flags&BFAfterDelay == BFAfterDelay isAfterDelay := flags&BFAfterDelay == BFAfterDelay
wasBlockStored := flags&BFWasStored == BFWasStored wasBlockStored := flags&BFWasStored == BFWasStored
disallowDelay := flags&BFDisallowDelay == BFDisallowDelay disallowDelay := flags&BFDisallowDelay == BFDisallowDelay
disallowOrphans := flags&BFDisallowOrphans == BFDisallowOrphans
blockHash := block.Hash() blockHash := block.Hash()
log.Tracef("Processing block %s", blockHash) log.Tracef("Processing block %s", blockHash)
@ -199,12 +205,16 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
missingParents = append(missingParents, parentHash) missingParents = append(missingParents, parentHash)
} }
} }
if len(missingParents) > 0 && disallowOrphans {
str := fmt.Sprintf("Cannot process orphan blocks while the BFDisallowOrphans flag is raised %s", blockHash)
return false, false, ruleError(ErrOrphanBlockIsNotAllowed, str)
}
// Handle the case of a block with a valid timestamp(non-delayed) which points to a delayed block. // Handle the case of a block with a valid timestamp(non-delayed) which points to a delayed block.
delay, isParentDelayed := dag.maxDelayOfParents(missingParents) delay, isParentDelayed := dag.maxDelayOfParents(missingParents)
if isParentDelayed { if isParentDelayed {
// Add Nanosecond to ensure that parent process time will be after its child. // Add Millisecond to ensure that parent process time will be after its child.
delay += time.Nanosecond delay += time.Millisecond
err := dag.addDelayedBlock(block, delay) err := dag.addDelayedBlock(block, delay)
if err != nil { if err != nil {
return false, false, err return false, false, err
@ -221,7 +231,7 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
// The number K*2 was chosen since in peace times anticone is limited to K blocks, // The number K*2 was chosen since in peace times anticone is limited to K blocks,
// while some red block can make it a bit bigger, but much more than that indicates // while some red block can make it a bit bigger, but much more than that indicates
// there might be some problem with the netsync process. // there might be some problem with the netsync process.
if flags&BFIsSync == BFIsSync && dagconfig.KType(len(dag.orphans)) < dag.dagParams.K*2 { if flags&BFIsSync == BFIsSync && dagconfig.KType(len(dag.orphans)) < dag.Params.K*2 {
log.Debugf("Adding orphan block %s. This is normal part of netsync process", blockHash) log.Debugf("Adding orphan block %s. This is normal part of netsync process", blockHash)
} else { } else {
log.Infof("Adding orphan block %s", blockHash) log.Infof("Adding orphan block %s", blockHash)

View File

@ -1,11 +1,12 @@
package blockdag package blockdag
import ( import (
"github.com/kaspanet/kaspad/util"
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/util/daghash"
) )
@ -89,18 +90,18 @@ func TestProcessDelayedBlocks(t *testing.T) {
} }
}() }()
initialTime := dag1.dagParams.GenesisBlock.Header.Timestamp initialTime := dag1.Params.GenesisBlock.Header.Timestamp
// Here we use a fake time source that returns a timestamp // Here we use a fake time source that returns a timestamp
// one hour into the future to make delayedBlock artificially // one hour into the future to make delayedBlock artificially
// valid. // valid.
dag1.timeSource = newFakeTimeSource(initialTime.Add(time.Hour)) dag1.timeSource = newFakeTimeSource(initialTime.Add(time.Hour))
delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.dagParams.GenesisBlock.BlockHash()}, nil) delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.Params.GenesisBlock.BlockHash()}, nil)
if err != nil { if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err) t.Fatalf("error in PrepareBlockForTest: %s", err)
} }
blockDelay := time.Duration(dag1.dagParams.TimestampDeviationTolerance*uint64(dag1.targetTimePerBlock)+5) * time.Second blockDelay := time.Duration(dag1.Params.TimestampDeviationTolerance)*dag1.Params.TargetTimePerBlock + 5*time.Second
delayedBlock.Header.Timestamp = initialTime.Add(blockDelay) delayedBlock.Header.Timestamp = initialTime.Add(blockDelay)
isOrphan, isDelayed, err := dag1.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck) isOrphan, isDelayed, err := dag1.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
@ -177,7 +178,7 @@ func TestProcessDelayedBlocks(t *testing.T) {
t.Errorf("dag.IsKnownBlock should return true for a child of a delayed block") t.Errorf("dag.IsKnownBlock should return true for a child of a delayed block")
} }
blockBeforeDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()}, nil) blockBeforeDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.Params.GenesisBlock.BlockHash()}, nil)
if err != nil { if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err) t.Fatalf("error in PrepareBlockForTest: %s", err)
} }
@ -202,12 +203,15 @@ func TestProcessDelayedBlocks(t *testing.T) {
} }
// We advance the clock to the point where delayedBlock timestamp is valid. // We advance the clock to the point where delayedBlock timestamp is valid.
deviationTolerance := int64(dag2.TimestampDeviationTolerance) * dag2.targetTimePerBlock deviationTolerance := time.Duration(dag2.TimestampDeviationTolerance) * dag2.Params.TargetTimePerBlock
secondsUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.Unix() - deviationTolerance - dag2.Now().Unix() + 1 timeUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.
dag2.timeSource = newFakeTimeSource(initialTime.Add(time.Duration(secondsUntilDelayedBlockIsValid) * time.Second)) Add(-deviationTolerance).
Sub(dag2.Now()) +
time.Second
dag2.timeSource = newFakeTimeSource(initialTime.Add(timeUntilDelayedBlockIsValid))
blockAfterDelay, err := PrepareBlockForTest(dag2, blockAfterDelay, err := PrepareBlockForTest(dag2,
[]*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()}, []*daghash.Hash{dag2.Params.GenesisBlock.BlockHash()},
nil) nil)
if err != nil { if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err) t.Fatalf("error in PrepareBlockForTest: %s", err)

View File

@ -903,7 +903,7 @@ func (rt *reachabilityTree) init(dbContext dbaccess.Context) error {
if !dbaccess.IsNotFoundError(err) { if !dbaccess.IsNotFoundError(err) {
return err return err
} }
reindexRootHash = rt.dag.dagParams.GenesisHash reindexRootHash = rt.dag.Params.GenesisHash
} }
// Init the reindex root // Init the reindex root

View File

@ -126,7 +126,7 @@ func (v *txValidator) Validate(items []*txValidateItem) error {
// Start up validation handlers that are used to asynchronously // Start up validation handlers that are used to asynchronously
// validate each transaction input. // validate each transaction input.
for i := 0; i < maxGoRoutines; i++ { for i := 0; i < maxGoRoutines; i++ {
spawn(v.validateHandler) spawn("txValidator.validateHandler", v.validateHandler)
} }
// Validate each of the inputs. The quit channel is closed when any // Validate each of the inputs. The quit channel is closed when any

View File

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"github.com/kaspanet/kaspad/dbaccess" "github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -75,8 +76,8 @@ func TxToSubnetworkID(tx *wire.MsgTx) (*subnetworkid.SubnetworkID, error) {
} }
// fetchSubnetwork returns a registered subnetwork. // fetchSubnetwork returns a registered subnetwork.
func fetchSubnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) { func (dag *BlockDAG) fetchSubnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
serializedSubnetwork, err := dbaccess.FetchSubnetworkData(dbaccess.NoTx(), subnetworkID) serializedSubnetwork, err := dbaccess.FetchSubnetworkData(dag.databaseContext, subnetworkID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -91,8 +92,8 @@ func fetchSubnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, erro
// GasLimit returns the gas limit of a registered subnetwork. If the subnetwork does not // GasLimit returns the gas limit of a registered subnetwork. If the subnetwork does not
// exist this method returns an error. // exist this method returns an error.
func GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) { func (dag *BlockDAG) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
sNet, err := fetchSubnetwork(subnetworkID) sNet, err := dag.fetchSubnetwork(subnetworkID)
if err != nil { if err != nil {
return 0, err return 0, err
} }

View File

@ -1,6 +1,9 @@
package blockdag package blockdag
import "time" import (
"github.com/kaspanet/kaspad/util/mstime"
"time"
)
const syncRateWindowDuration = 15 * time.Minute const syncRateWindowDuration = 15 * time.Minute
@ -8,7 +11,7 @@ const syncRateWindowDuration = 15 * time.Minute
// //
// This function MUST be called with the DAG state lock held (for writes). // This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) addBlockProcessingTimestamp() { func (dag *BlockDAG) addBlockProcessingTimestamp() {
now := time.Now() now := mstime.Now()
dag.recentBlockProcessingTimestamps = append(dag.recentBlockProcessingTimestamps, now) dag.recentBlockProcessingTimestamps = append(dag.recentBlockProcessingTimestamps, now)
dag.removeNonRecentTimestampsFromRecentBlockProcessingTimestamps() dag.removeNonRecentTimestampsFromRecentBlockProcessingTimestamps()
} }
@ -21,8 +24,8 @@ func (dag *BlockDAG) removeNonRecentTimestampsFromRecentBlockProcessingTimestamp
dag.recentBlockProcessingTimestamps = dag.recentBlockProcessingTimestampsRelevantWindow() dag.recentBlockProcessingTimestamps = dag.recentBlockProcessingTimestampsRelevantWindow()
} }
func (dag *BlockDAG) recentBlockProcessingTimestampsRelevantWindow() []time.Time { func (dag *BlockDAG) recentBlockProcessingTimestampsRelevantWindow() []mstime.Time {
minTime := time.Now().Add(-syncRateWindowDuration) minTime := mstime.Now().Add(-syncRateWindowDuration)
windowStartIndex := len(dag.recentBlockProcessingTimestamps) windowStartIndex := len(dag.recentBlockProcessingTimestamps)
for i, processTime := range dag.recentBlockProcessingTimestamps { for i, processTime := range dag.recentBlockProcessingTimestamps {
if processTime.After(minTime) { if processTime.After(minTime) {
@ -49,9 +52,9 @@ func (dag *BlockDAG) IsSyncRateBelowThreshold(maxDeviation float64) bool {
return false return false
} }
return dag.syncRate() < 1/dag.dagParams.TargetTimePerBlock.Seconds()*maxDeviation return dag.syncRate() < 1/dag.Params.TargetTimePerBlock.Seconds()*maxDeviation
} }
func (dag *BlockDAG) uptime() time.Duration { func (dag *BlockDAG) uptime() time.Duration {
return time.Now().Sub(dag.startTime) return mstime.Now().Sub(dag.startTime)
} }

View File

@ -5,11 +5,6 @@ package blockdag
import ( import (
"compress/bzip2" "compress/bzip2"
"encoding/binary" "encoding/binary"
"github.com/kaspanet/kaspad/database/ffldb/ldb"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -19,6 +14,12 @@ import (
"sync" "sync"
"testing" "testing"
"github.com/kaspanet/kaspad/database/ffldb/ldb"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/kaspanet/kaspad/util/subnetworkid" "github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/txscript" "github.com/kaspanet/kaspad/txscript"
@ -49,9 +50,9 @@ func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), err
// overwrite `spawn` to count the number of running goroutines // overwrite `spawn` to count the number of running goroutines
spawnWaitGroup := sync.WaitGroup{} spawnWaitGroup := sync.WaitGroup{}
realSpawn := spawn realSpawn := spawn
spawn = func(f func()) { spawn = func(name string, f func()) {
spawnWaitGroup.Add(1) spawnWaitGroup.Add(1)
realSpawn(func() { realSpawn(name, func() {
f() f()
spawnWaitGroup.Done() spawnWaitGroup.Done()
}) })
@ -75,17 +76,19 @@ func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), err
dbPath := filepath.Join(tmpDir, dbName) dbPath := filepath.Join(tmpDir, dbName)
_ = os.RemoveAll(dbPath) _ = os.RemoveAll(dbPath)
err = dbaccess.Open(dbPath) databaseContext, err := dbaccess.New(dbPath)
if err != nil { if err != nil {
return nil, nil, errors.Errorf("error creating db: %s", err) return nil, nil, errors.Errorf("error creating db: %s", err)
} }
config.DatabaseContext = databaseContext
// Setup a teardown function for cleaning up. This function is // Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing. // returned to the caller to be invoked when it is done testing.
teardown = func() { teardown = func() {
spawnWaitGroup.Wait() spawnWaitGroup.Wait()
spawn = realSpawn spawn = realSpawn
dbaccess.Close() databaseContext.Close()
ldb.Options = originalLDBOptions ldb.Options = originalLDBOptions
os.RemoveAll(dbPath) os.RemoveAll(dbPath)
} }
@ -249,7 +252,7 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
oldVirtual := SetVirtualForTest(dag, newVirtual) oldVirtual := SetVirtualForTest(dag, newVirtual)
defer SetVirtualForTest(dag, oldVirtual) defer SetVirtualForTest(dag, oldVirtual)
OpTrueAddr, err := opTrueAddress(dag.dagParams.Prefix) OpTrueAddr, err := opTrueAddress(dag.Params.Prefix)
if err != nil { if err != nil {
return nil, err return nil, err
} }

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -157,7 +157,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
// The state is simply defined if the start time hasn't been // The state is simply defined if the start time hasn't been
// been reached yet. // been reached yet.
if uint64(medianTime.Unix()) < checker.BeginTime() { if uint64(medianTime.UnixMilliseconds()) < checker.BeginTime() {
cache.Update(prevNode.hash, ThresholdDefined) cache.Update(prevNode.hash, ThresholdDefined)
break break
} }
@ -194,7 +194,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
// The deployment of the rule change fails if it expires // The deployment of the rule change fails if it expires
// before it is accepted and locked in. // before it is accepted and locked in.
medianTime := prevNode.PastMedianTime(dag) medianTime := prevNode.PastMedianTime(dag)
medianTimeUnix := uint64(medianTime.Unix()) medianTimeUnix := uint64(medianTime.UnixMilliseconds())
if medianTimeUnix >= checker.EndTime() { if medianTimeUnix >= checker.EndTime() {
state = ThresholdFailed state = ThresholdFailed
break break
@ -211,7 +211,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
// The deployment of the rule change fails if it expires // The deployment of the rule change fails if it expires
// before it is accepted and locked in. // before it is accepted and locked in.
medianTime := prevNode.PastMedianTime(dag) medianTime := prevNode.PastMedianTime(dag)
if uint64(medianTime.Unix()) >= checker.EndTime() { if uint64(medianTime.UnixMilliseconds()) >= checker.EndTime() {
state = ThresholdFailed state = ThresholdFailed
break break
} }
@ -297,11 +297,11 @@ func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
// //
// This function MUST be called with the DAG state lock held (for writes). // This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) { func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
if deploymentID > uint32(len(dag.dagParams.Deployments)) { if deploymentID > uint32(len(dag.Params.Deployments)) {
return ThresholdFailed, errors.Errorf("deployment ID %d does not exist", deploymentID) return ThresholdFailed, errors.Errorf("deployment ID %d does not exist", deploymentID)
} }
deployment := &dag.dagParams.Deployments[deploymentID] deployment := &dag.Params.Deployments[deploymentID]
checker := deploymentChecker{deployment: deployment, dag: dag} checker := deploymentChecker{deployment: deployment, dag: dag}
cache := &dag.deploymentCaches[deploymentID] cache := &dag.deploymentCaches[deploymentID]
@ -325,8 +325,8 @@ func (dag *BlockDAG) initThresholdCaches() error {
return err return err
} }
} }
for id := 0; id < len(dag.dagParams.Deployments); id++ { for id := 0; id < len(dag.Params.Deployments); id++ {
deployment := &dag.dagParams.Deployments[id] deployment := &dag.Params.Deployments[id]
cache := &dag.deploymentCaches[id] cache := &dag.deploymentCaches[id]
checker := deploymentChecker{deployment: deployment, dag: dag} checker := deploymentChecker{deployment: deployment, dag: dag}
_, err := dag.thresholdState(prevNode, checker, cache) _, err := dag.thresholdState(prevNode, checker, cache)

View File

@ -1,22 +1,22 @@
package blockdag package blockdag
import ( import (
"time" "github.com/kaspanet/kaspad/util/mstime"
) )
// TimeSource is the interface to access time. // TimeSource is the interface to access time.
type TimeSource interface { type TimeSource interface {
// Now returns the current time. // Now returns the current time.
Now() time.Time Now() mstime.Time
} }
// timeSource provides an implementation of the TimeSource interface // timeSource provides an implementation of the TimeSource interface
// that simply returns the current local time. // that simply returns the current local time.
type timeSource struct{} type timeSource struct{}
// Now returns the current local time, with one second precision. // Now returns the current local time, with one millisecond precision.
func (m *timeSource) Now() time.Time { func (m *timeSource) Now() mstime.Time {
return time.Unix(time.Now().Unix(), 0) return mstime.Now()
} }
// NewTimeSource returns a new instance of a TimeSource // NewTimeSource returns a new instance of a TimeSource

View File

@ -2,6 +2,7 @@ package blockdag
import ( import (
"bytes" "bytes"
"github.com/kaspanet/kaspad/dbaccess" "github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/locks" "github.com/kaspanet/kaspad/util/locks"
@ -116,7 +117,7 @@ func (diffStore *utxoDiffStore) diffChildByNode(node *blockNode) (*blockNode, er
} }
func (diffStore *utxoDiffStore) diffDataFromDB(hash *daghash.Hash) (*blockUTXODiffData, error) { func (diffStore *utxoDiffStore) diffDataFromDB(hash *daghash.Hash) (*blockUTXODiffData, error) {
serializedBlockDiffData, err := dbaccess.FetchUTXODiffData(dbaccess.NoTx(), hash) serializedBlockDiffData, err := dbaccess.FetchUTXODiffData(diffStore.dag.databaseContext, hash)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -66,7 +66,7 @@ func TestUTXODiffStore(t *testing.T) {
// Flush changes to db, delete them from the dag.utxoDiffStore.loaded // Flush changes to db, delete them from the dag.utxoDiffStore.loaded
// map, and check if the diff data is re-fetched from the database. // map, and check if the diff data is re-fetched from the database.
dbTx, err := dbaccess.NewTx() dbTx, err := dag.databaseContext.NewTx()
if err != nil { if err != nil {
t.Fatalf("Failed to open database transaction: %s", err) t.Fatalf("Failed to open database transaction: %s", err)
} }

View File

@ -6,6 +6,7 @@ package blockdag
import ( import (
"fmt" "fmt"
"github.com/kaspanet/kaspad/util/mstime"
"math" "math"
"sort" "sort"
"time" "time"
@ -56,12 +57,12 @@ func isNullOutpoint(outpoint *wire.Outpoint) bool {
// met, meaning that all the inputs of a given transaction have reached a // met, meaning that all the inputs of a given transaction have reached a
// blue score or time sufficient for their relative lock-time maturity. // blue score or time sufficient for their relative lock-time maturity.
func SequenceLockActive(sequenceLock *SequenceLock, blockBlueScore uint64, func SequenceLockActive(sequenceLock *SequenceLock, blockBlueScore uint64,
medianTimePast time.Time) bool { medianTimePast mstime.Time) bool {
// If either the seconds, or blue score relative-lock time has not yet // If either the milliseconds, or blue score relative-lock time has not yet
// reached, then the transaction is not yet mature according to its // reached, then the transaction is not yet mature according to its
// sequence locks. // sequence locks.
if sequenceLock.Seconds >= medianTimePast.Unix() || if sequenceLock.Milliseconds >= medianTimePast.UnixMilliseconds() ||
sequenceLock.BlockBlueScore >= int64(blockBlueScore) { sequenceLock.BlockBlueScore >= int64(blockBlueScore) {
return false return false
} }
@ -70,7 +71,7 @@ func SequenceLockActive(sequenceLock *SequenceLock, blockBlueScore uint64,
} }
// IsFinalizedTransaction determines whether or not a transaction is finalized. // IsFinalizedTransaction determines whether or not a transaction is finalized.
func IsFinalizedTransaction(tx *util.Tx, blockBlueScore uint64, blockTime time.Time) bool { func IsFinalizedTransaction(tx *util.Tx, blockBlueScore uint64, blockTime mstime.Time) bool {
msgTx := tx.MsgTx() msgTx := tx.MsgTx()
// Lock time of zero means the transaction is finalized. // Lock time of zero means the transaction is finalized.
@ -87,7 +88,7 @@ func IsFinalizedTransaction(tx *util.Tx, blockBlueScore uint64, blockTime time.T
if lockTime < txscript.LockTimeThreshold { if lockTime < txscript.LockTimeThreshold {
blockTimeOrBlueScore = int64(blockBlueScore) blockTimeOrBlueScore = int64(blockBlueScore)
} else { } else {
blockTimeOrBlueScore = blockTime.Unix() blockTimeOrBlueScore = blockTime.UnixMilliseconds()
} }
if int64(lockTime) < blockTimeOrBlueScore { if int64(lockTime) < blockTimeOrBlueScore {
return true return true
@ -133,15 +134,6 @@ func CheckTransactionSanity(tx *util.Tx, subnetworkID *subnetworkid.SubnetworkID
return ruleError(ErrNoTxInputs, "transaction has no inputs") return ruleError(ErrNoTxInputs, "transaction has no inputs")
} }
// A transaction must not exceed the maximum allowed block mass when
// serialized.
serializedTxSize := msgTx.SerializeSize()
if serializedTxSize*MassPerTxByte > wire.MaxMassPerTx {
str := fmt.Sprintf("serialized transaction is too big - got "+
"%d, max %d", serializedTxSize, wire.MaxMassPerBlock)
return ruleError(ErrTxMassTooHigh, str)
}
// Ensure the transaction amounts are in range. Each transaction // Ensure the transaction amounts are in range. Each transaction
// output must not be negative or more than the max allowed per // output must not be negative or more than the max allowed per
// transaction. Also, the total of all outputs must abide by the same // transaction. Also, the total of all outputs must abide by the same
@ -273,9 +265,9 @@ func (dag *BlockDAG) checkProofOfWork(header *wire.BlockHeader, flags BehaviorFl
} }
// The target difficulty must be less than the maximum allowed. // The target difficulty must be less than the maximum allowed.
if target.Cmp(dag.dagParams.PowMax) > 0 { if target.Cmp(dag.Params.PowMax) > 0 {
str := fmt.Sprintf("block target difficulty of %064x is "+ str := fmt.Sprintf("block target difficulty of %064x is "+
"higher than max of %064x", target, dag.dagParams.PowMax) "higher than max of %064x", target, dag.Params.PowMax)
return ruleError(ErrUnexpectedDifficulty, str) return ruleError(ErrUnexpectedDifficulty, str)
} }
@ -411,7 +403,7 @@ func (dag *BlockDAG) checkBlockHeaderSanity(block *util.Block, flags BehaviorFla
} }
if len(header.ParentHashes) == 0 { if len(header.ParentHashes) == 0 {
if !header.BlockHash().IsEqual(dag.dagParams.GenesisHash) { if !header.BlockHash().IsEqual(dag.Params.GenesisHash) {
return 0, ruleError(ErrNoParents, "block has no parents") return 0, ruleError(ErrNoParents, "block has no parents")
} }
} else { } else {
@ -421,23 +413,11 @@ func (dag *BlockDAG) checkBlockHeaderSanity(block *util.Block, flags BehaviorFla
} }
} }
// A block timestamp must not have a greater precision than one second.
// This check is necessary because Go time.Time values support
// nanosecond precision whereas the consensus rules only apply to
// seconds and it's much nicer to deal with standard Go time values
// instead of converting to seconds everywhere.
if !header.Timestamp.Equal(time.Unix(header.Timestamp.Unix(), 0)) {
str := fmt.Sprintf("block timestamp of %s has a higher "+
"precision than one second", header.Timestamp)
return 0, ruleError(ErrInvalidTime, str)
}
// Ensure the block time is not too far in the future. If it's too far, return // Ensure the block time is not too far in the future. If it's too far, return
// the duration of time that should be waited before the block becomes valid. // the duration of time that should be waited before the block becomes valid.
// This check needs to be last as it does not return an error but rather marks the // This check needs to be last as it does not return an error but rather marks the
// header as delayed (and valid). // header as delayed (and valid).
maxTimestamp := dag.Now().Add(time.Second * maxTimestamp := dag.Now().Add(time.Duration(dag.TimestampDeviationTolerance) * dag.Params.TargetTimePerBlock)
time.Duration(int64(dag.TimestampDeviationTolerance)*dag.targetTimePerBlock))
if header.Timestamp.After(maxTimestamp) { if header.Timestamp.After(maxTimestamp) {
return header.Timestamp.Sub(maxTimestamp), nil return header.Timestamp.Sub(maxTimestamp), nil
} }
@ -573,7 +553,7 @@ func (dag *BlockDAG) checkBlockTransactionOrder(block *util.Block) error {
func (dag *BlockDAG) checkNoNonNativeTransactions(block *util.Block) error { func (dag *BlockDAG) checkNoNonNativeTransactions(block *util.Block) error {
// Disallow non-native/coinbase subnetworks in networks that don't allow them // Disallow non-native/coinbase subnetworks in networks that don't allow them
if !dag.dagParams.EnableNonNativeSubnetworks { if !dag.Params.EnableNonNativeSubnetworks {
transactions := block.Transactions() transactions := block.Transactions()
for _, tx := range transactions { for _, tx := range transactions {
if !(tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) || if !(tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) ||
@ -958,7 +938,7 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
for _, tx := range transactions { for _, tx := range transactions {
txFee, err := CheckTransactionInputsAndCalulateFee(tx, block.blueScore, pastUTXO, txFee, err := CheckTransactionInputsAndCalulateFee(tx, block.blueScore, pastUTXO,
dag.dagParams, fastAdd) dag.Params, fastAdd)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -10,6 +10,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/dagconfig"
@ -22,37 +24,37 @@ import (
// TestSequenceLocksActive tests the SequenceLockActive function to ensure it // TestSequenceLocksActive tests the SequenceLockActive function to ensure it
// works as expected in all possible combinations/scenarios. // works as expected in all possible combinations/scenarios.
func TestSequenceLocksActive(t *testing.T) { func TestSequenceLocksActive(t *testing.T) {
seqLock := func(h int64, s int64) *SequenceLock { seqLock := func(blueScore int64, milliseconds int64) *SequenceLock {
return &SequenceLock{ return &SequenceLock{
Seconds: s, Milliseconds: milliseconds,
BlockBlueScore: h, BlockBlueScore: blueScore,
} }
} }
tests := []struct { tests := []struct {
seqLock *SequenceLock seqLock *SequenceLock
blockBlueScore uint64 blockBlueScore uint64
mtp time.Time mtp mstime.Time
want bool want bool
}{ }{
// Block based sequence lock with equal block blue score. // Block based sequence lock with equal block blue score.
{seqLock: seqLock(1000, -1), blockBlueScore: 1001, mtp: time.Unix(9, 0), want: true}, {seqLock: seqLock(1000, -1), blockBlueScore: 1001, mtp: mstime.UnixMilliseconds(9), want: true},
// Time based sequence lock with mtp past the absolute time. // Time based sequence lock with mtp past the absolute time.
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: time.Unix(31, 0), want: true}, {seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: mstime.UnixMilliseconds(31), want: true},
// Block based sequence lock with current blue score below seq lock block blue score. // Block based sequence lock with current blue score below seq lock block blue score.
{seqLock: seqLock(1000, -1), blockBlueScore: 90, mtp: time.Unix(9, 0), want: false}, {seqLock: seqLock(1000, -1), blockBlueScore: 90, mtp: mstime.UnixMilliseconds(9), want: false},
// Time based sequence lock with current time before lock time. // Time based sequence lock with current time before lock time.
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: time.Unix(29, 0), want: false}, {seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: mstime.UnixMilliseconds(29), want: false},
// Block based sequence lock at the same blue score, so shouldn't yet be active. // Block based sequence lock at the same blue score, so shouldn't yet be active.
{seqLock: seqLock(1000, -1), blockBlueScore: 1000, mtp: time.Unix(9, 0), want: false}, {seqLock: seqLock(1000, -1), blockBlueScore: 1000, mtp: mstime.UnixMilliseconds(9), want: false},
// Time based sequence lock with current time equal to lock time, so shouldn't yet be active. // Time based sequence lock with current time equal to lock time, so shouldn't yet be active.
{seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: time.Unix(30, 0), want: false}, {seqLock: seqLock(-1, 30), blockBlueScore: 2, mtp: mstime.UnixMilliseconds(30), want: false},
} }
t.Logf("Running %d sequence locks tests", len(tests)) t.Logf("Running %d sequence locks tests", len(tests))
@ -164,7 +166,7 @@ func TestCheckBlockSanity(t *testing.T) {
return return
} }
defer teardownFunc() defer teardownFunc()
dag.timeSource = newFakeTimeSource(time.Now()) dag.timeSource = newFakeTimeSource(mstime.Now())
block := util.NewBlock(&Block100000) block := util.NewBlock(&Block100000)
if len(block.Transactions()) < 3 { if len(block.Transactions()) < 3 {
@ -194,18 +196,6 @@ func TestCheckBlockSanity(t *testing.T) {
t.Errorf("CheckBlockSanity: unexpected return %s delay", delay) t.Errorf("CheckBlockSanity: unexpected return %s delay", delay)
} }
// Ensure a block that has a timestamp with a precision higher than one
// second fails.
timestamp := block.MsgBlock().Header.Timestamp
block.MsgBlock().Header.Timestamp = timestamp.Add(time.Nanosecond)
delay, err = dag.checkBlockSanity(block, BFNone)
if err == nil {
t.Errorf("CheckBlockSanity: error is nil when it shouldn't be")
}
if delay != 0 {
t.Errorf("CheckBlockSanity: unexpected return %s delay", delay)
}
var invalidParentsOrderBlock = wire.MsgBlock{ var invalidParentsOrderBlock = wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
Version: 0x10000000, Version: 0x10000000,
@ -241,7 +231,7 @@ func TestCheckBlockSanity(t *testing.T) {
0x4e, 0x06, 0xba, 0x64, 0xd7, 0x61, 0xda, 0x25, 0x4e, 0x06, 0xba, 0x64, 0xd7, 0x61, 0xda, 0x25,
0x1a, 0x0e, 0x21, 0xd4, 0x64, 0x49, 0x02, 0xa2, 0x1a, 0x0e, 0x21, 0xd4, 0x64, 0x49, 0x02, 0xa2,
}, },
Timestamp: time.Unix(0x5cd18053, 0), Timestamp: mstime.UnixMilliseconds(0x5cd18053000),
Bits: 0x207fffff, Bits: 0x207fffff,
Nonce: 0x1, Nonce: 0x1,
}, },
@ -489,7 +479,7 @@ func TestCheckBlockSanity(t *testing.T) {
blockInTheFuture := Block100000 blockInTheFuture := Block100000
expectedDelay := 10 * time.Second expectedDelay := 10 * time.Second
deviationTolerance := time.Duration(dag.TimestampDeviationTolerance*uint64(dag.targetTimePerBlock)) * time.Second deviationTolerance := time.Duration(dag.TimestampDeviationTolerance) * dag.Params.TargetTimePerBlock
blockInTheFuture.Header.Timestamp = dag.Now().Add(deviationTolerance + expectedDelay) blockInTheFuture.Header.Timestamp = dag.Now().Add(deviationTolerance + expectedDelay)
delay, err = dag.checkBlockSanity(util.NewBlock(&blockInTheFuture), BFNoPoWCheck) delay, err = dag.checkBlockSanity(util.NewBlock(&blockInTheFuture), BFNoPoWCheck)
if err != nil { if err != nil {
@ -565,9 +555,9 @@ func TestValidateParents(t *testing.T) {
} }
defer teardownFunc() defer teardownFunc()
a := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock) a := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
b := prepareAndProcessBlockByParentMsgBlocks(t, dag, a) b := prepareAndProcessBlockByParentMsgBlocks(t, dag, a)
c := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock) c := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
aNode := nodeByMsgBlock(t, dag, a) aNode := nodeByMsgBlock(t, dag, a)
bNode := nodeByMsgBlock(t, dag, b) bNode := nodeByMsgBlock(t, dag, b)
@ -612,7 +602,6 @@ func TestCheckTransactionSanity(t *testing.T) {
{"good one", 1, 1, 1, *subnetworkid.SubnetworkIDNative, nil, nil, nil}, {"good one", 1, 1, 1, *subnetworkid.SubnetworkIDNative, nil, nil, nil},
{"no inputs", 0, 1, 1, *subnetworkid.SubnetworkIDNative, nil, nil, ruleError(ErrNoTxInputs, "")}, {"no inputs", 0, 1, 1, *subnetworkid.SubnetworkIDNative, nil, nil, ruleError(ErrNoTxInputs, "")},
{"no outputs", 1, 0, 1, *subnetworkid.SubnetworkIDNative, nil, nil, nil}, {"no outputs", 1, 0, 1, *subnetworkid.SubnetworkIDNative, nil, nil, nil},
{"too massive", 1, 1000000, 1, *subnetworkid.SubnetworkIDNative, nil, nil, ruleError(ErrTxMassTooHigh, "")},
{"too much sompi in one output", 1, 1, util.MaxSompi + 1, {"too much sompi in one output", 1, 1, util.MaxSompi + 1,
*subnetworkid.SubnetworkIDNative, *subnetworkid.SubnetworkIDNative,
nil, nil,
@ -740,9 +729,9 @@ var Block100000 = wire.MsgBlock{
0x3c, 0xb1, 0x16, 0x8f, 0x5f, 0x6b, 0x45, 0x87, 0x3c, 0xb1, 0x16, 0x8f, 0x5f, 0x6b, 0x45, 0x87,
}, },
UTXOCommitment: &daghash.ZeroHash, UTXOCommitment: &daghash.ZeroHash,
Timestamp: time.Unix(0x5cdac4b1, 0), Timestamp: mstime.UnixMilliseconds(0x17305aa654a),
Bits: 0x207fffff, Bits: 0x207fffff,
Nonce: 0x00000001, Nonce: 1,
}, },
Transactions: []*wire.MsgTx{ Transactions: []*wire.MsgTx{
{ {
@ -1047,9 +1036,9 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
0x0b, 0x79, 0xf5, 0x29, 0x6d, 0x1c, 0xaa, 0x90, 0x0b, 0x79, 0xf5, 0x29, 0x6d, 0x1c, 0xaa, 0x90,
0x2f, 0x01, 0xd4, 0x83, 0x9b, 0x2a, 0x04, 0x5e, 0x2f, 0x01, 0xd4, 0x83, 0x9b, 0x2a, 0x04, 0x5e,
}, },
Timestamp: time.Unix(0x5cd16eaa, 0), Timestamp: mstime.UnixMilliseconds(0x5cd16eaa000),
Bits: 0x207fffff, Bits: 0x207fffff,
Nonce: 0x0, Nonce: 1,
}, },
Transactions: []*wire.MsgTx{ Transactions: []*wire.MsgTx{
{ {

View File

@ -78,7 +78,7 @@ func (c bitConditionChecker) EndTime() uint64 {
// //
// This is part of the thresholdConditionChecker interface implementation. // This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 { func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 {
return c.dag.dagParams.RuleChangeActivationThreshold return c.dag.Params.RuleChangeActivationThreshold
} }
// MinerConfirmationWindow is the number of blocks in each threshold state // MinerConfirmationWindow is the number of blocks in each threshold state
@ -89,7 +89,7 @@ func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 {
// //
// This is part of the thresholdConditionChecker interface implementation. // This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) MinerConfirmationWindow() uint64 { func (c bitConditionChecker) MinerConfirmationWindow() uint64 {
return c.dag.dagParams.MinerConfirmationWindow return c.dag.Params.MinerConfirmationWindow
} }
// Condition returns true when the specific bit associated with the checker is // Condition returns true when the specific bit associated with the checker is
@ -159,7 +159,7 @@ func (c deploymentChecker) EndTime() uint64 {
// //
// This is part of the thresholdConditionChecker interface implementation. // This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) RuleChangeActivationThreshold() uint64 { func (c deploymentChecker) RuleChangeActivationThreshold() uint64 {
return c.dag.dagParams.RuleChangeActivationThreshold return c.dag.Params.RuleChangeActivationThreshold
} }
// MinerConfirmationWindow is the number of blocks in each threshold state // MinerConfirmationWindow is the number of blocks in each threshold state
@ -170,7 +170,7 @@ func (c deploymentChecker) RuleChangeActivationThreshold() uint64 {
// //
// This is part of the thresholdConditionChecker interface implementation. // This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) MinerConfirmationWindow() uint64 { func (c deploymentChecker) MinerConfirmationWindow() uint64 {
return c.dag.dagParams.MinerConfirmationWindow return c.dag.Params.MinerConfirmationWindow
} }
// Condition returns true when the specific bit defined by the deployment // Condition returns true when the specific bit defined by the deployment
@ -198,8 +198,8 @@ func (dag *BlockDAG) calcNextBlockVersion(prevNode *blockNode) (int32, error) {
// that is either in the process of being voted on, or locked in for the // that is either in the process of being voted on, or locked in for the
// activation at the next threshold window change. // activation at the next threshold window change.
expectedVersion := uint32(vbTopBits) expectedVersion := uint32(vbTopBits)
for id := 0; id < len(dag.dagParams.Deployments); id++ { for id := 0; id < len(dag.Params.Deployments); id++ {
deployment := &dag.dagParams.Deployments[id] deployment := &dag.Params.Deployments[id]
cache := &dag.deploymentCaches[id] cache := &dag.deploymentCaches[id]
checker := deploymentChecker{deployment: deployment, dag: dag} checker := deploymentChecker{deployment: deployment, dag: dag}
state, err := dag.thresholdState(prevNode, checker, cache) state, err := dag.thresholdState(prevNode, checker, cache)

View File

@ -21,7 +21,7 @@ const (
var ( var (
cfg *ConfigFlags cfg *ConfigFlags
log *logs.Logger log *logs.Logger
spawn func(func()) spawn func(string, func())
) )
// realMain is the real main function for the utility. It is necessary to work // realMain is the real main function for the utility. It is necessary to work

View File

@ -7,6 +7,7 @@ package main
import ( import (
"encoding/binary" "encoding/binary"
"github.com/kaspanet/kaspad/blockdag/indexers" "github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors" "github.com/pkg/errors"
"io" "io"
"sync" "sync"
@ -39,8 +40,8 @@ type blockImporter struct {
receivedLogBlocks int64 receivedLogBlocks int64
receivedLogTx int64 receivedLogTx int64
lastHeight int64 lastHeight int64
lastBlockTime time.Time lastBlockTime mstime.Time
lastLogTime time.Time lastLogTime mstime.Time
} }
// readBlock reads the next block from the input file. // readBlock reads the next block from the input file.
@ -170,7 +171,7 @@ out:
func (bi *blockImporter) logProgress() { func (bi *blockImporter) logProgress() {
bi.receivedLogBlocks++ bi.receivedLogBlocks++
now := time.Now() now := mstime.Now()
duration := now.Sub(bi.lastLogTime) duration := now.Sub(bi.lastLogTime)
if duration < time.Second*time.Duration(cfg.Progress) { if duration < time.Second*time.Duration(cfg.Progress) {
return return
@ -264,12 +265,12 @@ func (bi *blockImporter) Import() chan *importResults {
// Start up the read and process handling goroutines. This setup allows // Start up the read and process handling goroutines. This setup allows
// blocks to be read from disk in parallel while being processed. // blocks to be read from disk in parallel while being processed.
bi.wg.Add(2) bi.wg.Add(2)
spawn(bi.readHandler) spawn("blockImporter.readHandler", bi.readHandler)
spawn(bi.processHandler) spawn("blockImporter.processHandler", bi.processHandler)
// Wait for the import to finish in a separate goroutine and signal // Wait for the import to finish in a separate goroutine and signal
// the status handler when done. // the status handler when done.
spawn(func() { spawn("blockImporter.sendToDoneChan", func() {
bi.wg.Wait() bi.wg.Wait()
bi.doneChan <- true bi.doneChan <- true
}) })
@ -277,7 +278,7 @@ func (bi *blockImporter) Import() chan *importResults {
// Start the status handler and return the result channel that it will // Start the status handler and return the result channel that it will
// send the results on when the import is done. // send the results on when the import is done.
resultChan := make(chan *importResults) resultChan := make(chan *importResults)
spawn(func() { spawn("blockImporter.statusHandler", func() {
bi.statusHandler(resultChan) bi.statusHandler(resultChan)
}) })
return resultChan return resultChan
@ -315,6 +316,6 @@ func newBlockImporter(r io.ReadSeeker) (*blockImporter, error) {
errChan: make(chan error), errChan: make(chan error),
quit: make(chan struct{}), quit: make(chan struct{}),
dag: dag, dag: dag,
lastLogTime: time.Now(), lastLogTime: mstime.Now(),
}, nil }, nil
} }

View File

@ -16,7 +16,7 @@ import (
"strings" "strings"
"github.com/jessevdk/go-flags" "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/rpcmodel" "github.com/kaspanet/kaspad/rpc/model"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
) )
@ -24,7 +24,7 @@ const (
// unusableFlags are the command usage flags which this utility are not // unusableFlags are the command usage flags which this utility are not
// able to use. In particular it doesn't support websockets and // able to use. In particular it doesn't support websockets and
// consequently notifications. // consequently notifications.
unusableFlags = rpcmodel.UFWebsocketOnly | rpcmodel.UFNotification unusableFlags = model.UFWebsocketOnly | model.UFNotification
) )
var ( var (
@ -45,10 +45,10 @@ func listCommands() {
) )
// Get a list of registered commands and categorize and filter them. // Get a list of registered commands and categorize and filter them.
cmdMethods := rpcmodel.RegisteredCmdMethods() cmdMethods := model.RegisteredCmdMethods()
categorized := make([][]string, numCategories) categorized := make([][]string, numCategories)
for _, method := range cmdMethods { for _, method := range cmdMethods {
flags, err := rpcmodel.MethodUsageFlags(method) flags, err := model.MethodUsageFlags(method)
if err != nil { if err != nil {
// This should never happen since the method was just // This should never happen since the method was just
// returned from the package, but be safe. // returned from the package, but be safe.
@ -60,7 +60,7 @@ func listCommands() {
continue continue
} }
usage, err := rpcmodel.MethodUsageText(method) usage, err := model.MethodUsageText(method)
if err != nil { if err != nil {
// This should never happen since the method was just // This should never happen since the method was just
// returned from the package, but be safe. // returned from the package, but be safe.

View File

@ -11,7 +11,7 @@ import (
"net/http" "net/http"
"github.com/btcsuite/go-socks/socks" "github.com/btcsuite/go-socks/socks"
"github.com/kaspanet/kaspad/rpcmodel" "github.com/kaspanet/kaspad/rpc/model"
) )
// newHTTPClient returns a new HTTP client that is configured according to the // newHTTPClient returns a new HTTP client that is configured according to the
@ -117,7 +117,7 @@ func sendPostRequest(marshalledJSON []byte, cfg *ConfigFlags) ([]byte, error) {
} }
// Unmarshal the response. // Unmarshal the response.
var resp rpcmodel.Response var resp model.Response
if err := json.Unmarshal(respBytes, &resp); err != nil { if err := json.Unmarshal(respBytes, &resp); err != nil {
return nil, err return nil, err
} }

View File

@ -11,7 +11,7 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/kaspanet/kaspad/rpcmodel" "github.com/kaspanet/kaspad/rpc/model"
) )
const ( const (
@ -21,7 +21,7 @@ const (
// commandUsage display the usage for a specific command. // commandUsage display the usage for a specific command.
func commandUsage(method string) { func commandUsage(method string) {
usage, err := rpcmodel.MethodUsageText(method) usage, err := model.MethodUsageText(method)
if err != nil { if err != nil {
// This should never happen since the method was already checked // This should never happen since the method was already checked
// before calling this function, but be safe. // before calling this function, but be safe.
@ -60,7 +60,7 @@ func main() {
// Ensure the specified method identifies a valid registered command and // Ensure the specified method identifies a valid registered command and
// is one of the usable types. // is one of the usable types.
method := args[0] method := args[0]
usageFlags, err := rpcmodel.MethodUsageFlags(method) usageFlags, err := model.MethodUsageFlags(method)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Unrecognized command '%s'\n", method) fmt.Fprintf(os.Stderr, "Unrecognized command '%s'\n", method)
fmt.Fprintln(os.Stderr, listCmdMessage) fmt.Fprintln(os.Stderr, listCmdMessage)
@ -105,13 +105,13 @@ func main() {
// Attempt to create the appropriate command using the arguments // Attempt to create the appropriate command using the arguments
// provided by the user. // provided by the user.
cmd, err := rpcmodel.NewCommand(method, params...) cmd, err := model.NewCommand(method, params...)
if err != nil { if err != nil {
// Show the error along with its error code when it's a // Show the error along with its error code when it's a
// rpcmodel.Error as it reallistcally will always be since the // model.Error as it reallistcally will always be since the
// NewCommand function is only supposed to return errors of that // NewCommand function is only supposed to return errors of that
// type. // type.
var rpcModelErr rpcmodel.Error var rpcModelErr model.Error
if ok := errors.As(err, &rpcModelErr); ok { if ok := errors.As(err, &rpcModelErr); ok {
fmt.Fprintf(os.Stderr, "%s error: %s (command code: %s)\n", fmt.Fprintf(os.Stderr, "%s error: %s (command code: %s)\n",
method, err, rpcModelErr.ErrorCode) method, err, rpcModelErr.ErrorCode)
@ -119,7 +119,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
// The error is not a rpcmodel.Error and this really should not // The error is not a model.Error and this really should not
// happen. Nevertheless, fallback to just showing the error // happen. Nevertheless, fallback to just showing the error
// if it should happen due to a bug in the package. // if it should happen due to a bug in the package.
fmt.Fprintf(os.Stderr, "%s error: %s\n", method, err) fmt.Fprintf(os.Stderr, "%s error: %s\n", method, err)
@ -129,7 +129,7 @@ func main() {
// Marshal the command into a JSON-RPC byte slice in preparation for // Marshal the command into a JSON-RPC byte slice in preparation for
// sending it to the RPC server. // sending it to the RPC server.
marshalledJSON, err := rpcmodel.MarshalCommand(1, cmd) marshalledJSON, err := model.MarshalCommand(1, cmd)
if err != nil { if err != nil {
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
os.Exit(1) os.Exit(1)

View File

@ -1,7 +1,7 @@
package main package main
import ( import (
"github.com/kaspanet/kaspad/rpcclient" "github.com/kaspanet/kaspad/rpc/client"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire" "github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -10,30 +10,30 @@ import (
) )
type minerClient struct { type minerClient struct {
*rpcclient.Client *client.Client
onBlockAdded chan struct{} onBlockAdded chan struct{}
} }
func newMinerClient(connCfg *rpcclient.ConnConfig) (*minerClient, error) { func newMinerClient(connCfg *client.ConnConfig) (*minerClient, error) {
client := &minerClient{ minerClient := &minerClient{
onBlockAdded: make(chan struct{}, 1), onBlockAdded: make(chan struct{}, 1),
} }
notificationHandlers := &rpcclient.NotificationHandlers{ notificationHandlers := &client.NotificationHandlers{
OnFilteredBlockAdded: func(_ uint64, header *wire.BlockHeader, OnFilteredBlockAdded: func(_ uint64, header *wire.BlockHeader,
txs []*util.Tx) { txs []*util.Tx) {
client.onBlockAdded <- struct{}{} minerClient.onBlockAdded <- struct{}{}
}, },
} }
var err error var err error
client.Client, err = rpcclient.New(connCfg, notificationHandlers) minerClient.Client, err = client.New(connCfg, notificationHandlers)
if err != nil { if err != nil {
return nil, errors.Errorf("Error connecting to address %s: %s", connCfg.Host, err) return nil, errors.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
} }
if err = client.NotifyBlocks(); err != nil { if err = minerClient.NotifyBlocks(); err != nil {
return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err) return nil, errors.Wrapf(err, "error while registering minerClient %s for block notifications", minerClient.Host())
} }
return client, nil return minerClient, nil
} }
func connectToServer(cfg *configFlags) (*minerClient, error) { func connectToServer(cfg *configFlags) (*minerClient, error) {
@ -47,7 +47,7 @@ func connectToServer(cfg *configFlags) (*minerClient, error) {
return nil, err return nil, err
} }
connCfg := &rpcclient.ConnConfig{ connCfg := &client.ConnConfig{
Host: rpcAddr, Host: rpcAddr,
Endpoint: "ws", Endpoint: "ws",
User: cfg.RPCUser, User: cfg.RPCUser,

View File

@ -3,7 +3,7 @@ package main
import ( import (
"fmt" "fmt"
"github.com/kaspanet/kaspad/logs" "github.com/kaspanet/kaspad/logs"
"github.com/kaspanet/kaspad/rpcclient" "github.com/kaspanet/kaspad/rpc/client"
"github.com/kaspanet/kaspad/util/panics" "github.com/kaspanet/kaspad/util/panics"
"os" "os"
) )
@ -28,5 +28,5 @@ func initLog(logFile, errLogFile string) {
} }
func enableRPCLogging() { func enableRPCLogging() {
rpcclient.UseLogger(backendLog, logs.LevelTrace) client.UseLogger(backendLog, logs.LevelTrace)
} }

View File

@ -17,7 +17,7 @@ import (
) )
func main() { func main() {
defer panics.HandlePanic(log, nil) defer panics.HandlePanic(log, "MAIN", nil)
interrupt := signal.InterruptListener() interrupt := signal.InterruptListener()
cfg, err := parseConfig() cfg, err := parseConfig()
@ -50,7 +50,7 @@ func main() {
} }
doneChan := make(chan struct{}) doneChan := make(chan struct{})
spawn(func() { spawn("mineLoop", func() {
err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay, cfg.MineWhenNotSynced, miningAddr) err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay, cfg.MineWhenNotSynced, miningAddr)
if err != nil { if err != nil {
panic(errors.Wrap(err, "error in mine loop")) panic(errors.Wrap(err, "error in mine loop"))

View File

@ -1,23 +1,17 @@
package main package main
import ( import (
"encoding/hex"
nativeerrors "errors" nativeerrors "errors"
"math/rand" "math/rand"
"strconv"
"strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/kaspanet/kaspad/rpcclient" clientpkg "github.com/kaspanet/kaspad/rpc/client"
"github.com/pkg/errors" "github.com/kaspanet/kaspad/rpc/model"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire" "github.com/pkg/errors"
) )
var random = rand.New(rand.NewSource(time.Now().UnixNano())) var random = rand.New(rand.NewSource(time.Now().UnixNano()))
@ -33,7 +27,7 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64, min
templateStopChan := make(chan struct{}) templateStopChan := make(chan struct{})
doneChan := make(chan struct{}) doneChan := make(chan struct{})
spawn(func() { spawn("mineLoop-internalLoop", func() {
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ { for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ {
foundBlock := make(chan *util.Block) foundBlock := make(chan *util.Block)
@ -41,7 +35,7 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64, min
block := <-foundBlock block := <-foundBlock
templateStopChan <- struct{}{} templateStopChan <- struct{}{}
wg.Add(1) wg.Add(1)
spawn(func() { spawn("mineLoop-handleFoundBlock", func() {
if blockDelay != 0 { if blockDelay != 0 {
time.Sleep(time.Duration(blockDelay) * time.Millisecond) time.Sleep(time.Duration(blockDelay) * time.Millisecond)
} }
@ -67,7 +61,7 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64, min
} }
func logHashRate() { func logHashRate() {
spawn(func() { spawn("logHashRate", func() {
lastCheck := time.Now() lastCheck := time.Now()
for range time.Tick(logHashRateInterval) { for range time.Tick(logHashRateInterval) {
currentHashesTried := hashesTried currentHashesTried := hashesTried
@ -85,11 +79,11 @@ func logHashRate() {
func mineNextBlock(client *minerClient, miningAddr util.Address, foundBlock chan *util.Block, mineWhenNotSynced bool, func mineNextBlock(client *minerClient, miningAddr util.Address, foundBlock chan *util.Block, mineWhenNotSynced bool,
templateStopChan chan struct{}, errChan chan error) { templateStopChan chan struct{}, errChan chan error) {
newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult) newTemplateChan := make(chan *model.GetBlockTemplateResult)
spawn(func() { spawn("templatesLoop", func() {
templatesLoop(client, miningAddr, newTemplateChan, errChan, templateStopChan) templatesLoop(client, miningAddr, newTemplateChan, errChan, templateStopChan)
}) })
spawn(func() { spawn("solveLoop", func() {
solveLoop(newTemplateChan, foundBlock, mineWhenNotSynced, errChan) solveLoop(newTemplateChan, foundBlock, mineWhenNotSynced, errChan)
}) })
} }
@ -97,64 +91,18 @@ func mineNextBlock(client *minerClient, miningAddr util.Address, foundBlock chan
func handleFoundBlock(client *minerClient, block *util.Block) error { func handleFoundBlock(client *minerClient, block *util.Block) error {
log.Infof("Found block %s with parents %s. Submitting to %s", block.Hash(), block.MsgBlock().Header.ParentHashes, client.Host()) log.Infof("Found block %s with parents %s. Submitting to %s", block.Hash(), block.MsgBlock().Header.ParentHashes, client.Host())
err := client.SubmitBlock(block, &rpcmodel.SubmitBlockOptions{}) err := client.SubmitBlock(block, &model.SubmitBlockOptions{})
if err != nil { if err != nil {
return errors.Errorf("Error submitting block %s to %s: %s", block.Hash(), client.Host(), err) return errors.Errorf("Error submitting block %s to %s: %s", block.Hash(), client.Host(), err)
} }
return nil return nil
} }
func parseBlock(template *rpcmodel.GetBlockTemplateResult) (*util.Block, error) {
// parse parent hashes
parentHashes := make([]*daghash.Hash, len(template.ParentHashes))
for i, parentHash := range template.ParentHashes {
hash, err := daghash.NewHashFromStr(parentHash)
if err != nil {
return nil, errors.Errorf("Error decoding hash %s: %s", parentHash, err)
}
parentHashes[i] = hash
}
// parse Bits
bitsUint64, err := strconv.ParseUint(template.Bits, 16, 32)
if err != nil {
return nil, errors.Errorf("Error decoding bits %s: %s", template.Bits, err)
}
bits := uint32(bitsUint64)
// parseAcceptedIDMerkleRoot
acceptedIDMerkleRoot, err := daghash.NewHashFromStr(template.AcceptedIDMerkleRoot)
if err != nil {
return nil, errors.Errorf("Error parsing acceptedIDMerkleRoot: %s", err)
}
utxoCommitment, err := daghash.NewHashFromStr(template.UTXOCommitment)
if err != nil {
return nil, errors.Errorf("Error parsing utxoCommitment: %s", err)
}
// parse rest of block
msgBlock := wire.NewMsgBlock(
wire.NewBlockHeader(template.Version, parentHashes, &daghash.Hash{},
acceptedIDMerkleRoot, utxoCommitment, bits, 0))
for i, txResult := range template.Transactions {
reader := hex.NewDecoder(strings.NewReader(txResult.Data))
tx := &wire.MsgTx{}
if err := tx.KaspaDecode(reader, 0); err != nil {
return nil, errors.Errorf("Error decoding tx #%d: %s", i, err)
}
msgBlock.AddTransaction(tx)
}
block := util.NewBlock(msgBlock)
msgBlock.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block.Transactions()).Root()
return block, nil
}
func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util.Block) { func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util.Block) {
msgBlock := block.MsgBlock() msgBlock := block.MsgBlock()
targetDifficulty := util.CompactToBig(msgBlock.Header.Bits) targetDifficulty := util.CompactToBig(msgBlock.Header.Bits)
initialNonce := random.Uint64() initialNonce := random.Uint64()
for i := random.Uint64(); i != initialNonce-1; i++ { for i := initialNonce; i != initialNonce-1; i++ {
select { select {
case <-stopChan: case <-stopChan:
return return
@ -172,7 +120,7 @@ func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util
} }
func templatesLoop(client *minerClient, miningAddr util.Address, func templatesLoop(client *minerClient, miningAddr util.Address,
newTemplateChan chan *rpcmodel.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) { newTemplateChan chan *model.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) {
longPollID := "" longPollID := ""
getBlockTemplateLongPoll := func() { getBlockTemplateLongPoll := func() {
@ -182,7 +130,7 @@ func templatesLoop(client *minerClient, miningAddr util.Address,
log.Infof("Requesting template without longPollID from %s", client.Host()) log.Infof("Requesting template without longPollID from %s", client.Host())
} }
template, err := getBlockTemplate(client, miningAddr, longPollID) template, err := getBlockTemplate(client, miningAddr, longPollID)
if nativeerrors.Is(err, rpcclient.ErrResponseTimedOut) { if nativeerrors.Is(err, clientpkg.ErrResponseTimedOut) {
log.Infof("Got timeout while requesting template '%s' from %s", longPollID, client.Host()) log.Infof("Got timeout while requesting template '%s' from %s", longPollID, client.Host())
return return
} else if err != nil { } else if err != nil {
@ -209,11 +157,11 @@ func templatesLoop(client *minerClient, miningAddr util.Address,
} }
} }
func getBlockTemplate(client *minerClient, miningAddr util.Address, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) { func getBlockTemplate(client *minerClient, miningAddr util.Address, longPollID string) (*model.GetBlockTemplateResult, error) {
return client.GetBlockTemplate(miningAddr.String(), longPollID) return client.GetBlockTemplate(miningAddr.String(), longPollID)
} }
func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, func solveLoop(newTemplateChan chan *model.GetBlockTemplateResult, foundBlock chan *util.Block,
mineWhenNotSynced bool, errChan chan error) { mineWhenNotSynced bool, errChan chan error) {
var stopOldTemplateSolving chan struct{} var stopOldTemplateSolving chan struct{}
@ -231,13 +179,13 @@ func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock
} }
stopOldTemplateSolving = make(chan struct{}) stopOldTemplateSolving = make(chan struct{})
block, err := parseBlock(template) block, err := clientpkg.ConvertGetBlockTemplateResultToBlock(template)
if err != nil { if err != nil {
errChan <- errors.Errorf("Error parsing block: %s", err) errChan <- errors.Errorf("Error parsing block: %s", err)
return return
} }
spawn(func() { spawn("solveBlock", func() {
solveBlock(block, stopOldTemplateSolving, foundBlock) solveBlock(block, stopOldTemplateSolving, foundBlock)
}) })
} }

View File

@ -18,6 +18,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/btcsuite/go-socks/socks" "github.com/btcsuite/go-socks/socks"
@ -68,8 +70,6 @@ var (
defaultLogDir = filepath.Join(DefaultHomeDir, defaultLogDirname) defaultLogDir = filepath.Join(DefaultHomeDir, defaultLogDirname)
) )
var activeConfig *Config
// RunServiceCommand is only set to a real function on Windows. It is used // RunServiceCommand is only set to a real function on Windows. It is used
// to parse and execute service commands specified via the -s flag. // to parse and execute service commands specified via the -s flag.
var RunServiceCommand func(string) error var RunServiceCommand func(string) error
@ -172,42 +172,8 @@ func newConfigParser(cfgFlags *Flags, so *serviceOptions, options flags.Options)
return parser return parser
} }
//LoadAndSetActiveConfig loads the config that can be afterward be accesible through ActiveConfig() func defaultFlags() *Flags {
func LoadAndSetActiveConfig() error { return &Flags{
tcfg, _, err := loadConfig()
if err != nil {
return err
}
activeConfig = tcfg
return nil
}
// ActiveConfig is a getter to the main config
func ActiveConfig() *Config {
return activeConfig
}
// SetActiveConfig sets the active config
// to the given config.
func SetActiveConfig(cfg *Config) {
activeConfig = cfg
}
// loadConfig initializes and parses the config using a config file and command
// line options.
//
// The configuration proceeds as follows:
// 1) Start with a default config with sane settings
// 2) Pre-parse the command line to check for an alternative config file
// 3) Load configuration file overwriting defaults with any specified options
// 4) Parse CLI options and overwrite/add any specified options
//
// The above results in kaspad functioning properly without any config settings
// while still allowing the user to override settings with config files and
// command line options. Command line options always take precedence.
func loadConfig() (*Config, []string, error) {
// Default config.
cfgFlags := Flags{
ConfigFile: defaultConfigFile, ConfigFile: defaultConfigFile,
DebugLevel: defaultLogLevel, DebugLevel: defaultLogLevel,
TargetOutboundPeers: defaultTargetOutboundPeers, TargetOutboundPeers: defaultTargetOutboundPeers,
@ -227,6 +193,29 @@ func loadConfig() (*Config, []string, error) {
MinRelayTxFee: defaultMinRelayTxFee, MinRelayTxFee: defaultMinRelayTxFee,
AcceptanceIndex: defaultAcceptanceIndex, AcceptanceIndex: defaultAcceptanceIndex,
} }
}
// DefaultConfig returns the default kaspad configuration
func DefaultConfig() *Config {
config := &Config{Flags: defaultFlags()}
config.NetworkFlags.ActiveNetParams = &dagconfig.MainnetParams
return config
}
// LoadConfig initializes and parses the config using a config file and command
// line options.
//
// The configuration proceeds as follows:
// 1) Start with a default config with sane settings
// 2) Pre-parse the command line to check for an alternative config file
// 3) Load configuration file overwriting defaults with any specified options
// 4) Parse CLI options and overwrite/add any specified options
//
// The above results in kaspad functioning properly without any config settings
// while still allowing the user to override settings with config files and
// command line options. Command line options always take precedence.
func LoadConfig() (cfg *Config, remainingArgs []string, err error) {
cfgFlags := defaultFlags()
// Service options which are only added on Windows. // Service options which are only added on Windows.
serviceOpts := serviceOptions{} serviceOpts := serviceOptions{}
@ -236,8 +225,8 @@ func loadConfig() (*Config, []string, error) {
// help message error can be ignored here since they will be caught by // help message error can be ignored here since they will be caught by
// the final parse below. // the final parse below.
preCfg := cfgFlags preCfg := cfgFlags
preParser := newConfigParser(&preCfg, &serviceOpts, flags.HelpFlag) preParser := newConfigParser(preCfg, &serviceOpts, flags.HelpFlag)
_, err := preParser.Parse() _, err = preParser.Parse()
if err != nil { if err != nil {
var flagsErr *flags.Error var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp { if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp {
@ -269,9 +258,9 @@ func loadConfig() (*Config, []string, error) {
// Load additional config from file. // Load additional config from file.
var configFileError error var configFileError error
parser := newConfigParser(&cfgFlags, &serviceOpts, flags.Default) parser := newConfigParser(cfgFlags, &serviceOpts, flags.Default)
activeConfig = &Config{ cfg = &Config{
Flags: &cfgFlags, Flags: cfgFlags,
} }
if !(preCfg.RegressionTest || preCfg.Simnet) || preCfg.ConfigFile != if !(preCfg.RegressionTest || preCfg.Simnet) || preCfg.ConfigFile !=
defaultConfigFile { defaultConfigFile {
@ -297,12 +286,12 @@ func loadConfig() (*Config, []string, error) {
} }
// Don't add peers from the config file when in regression test mode. // Don't add peers from the config file when in regression test mode.
if preCfg.RegressionTest && len(activeConfig.AddPeers) > 0 { if preCfg.RegressionTest && len(cfg.AddPeers) > 0 {
activeConfig.AddPeers = nil cfg.AddPeers = nil
} }
// Parse command line options again to ensure they take precedence. // Parse command line options again to ensure they take precedence.
remainingArgs, err := parser.Parse() remainingArgs, err = parser.Parse()
if err != nil { if err != nil {
var flagsErr *flags.Error var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp { if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
@ -332,8 +321,8 @@ func loadConfig() (*Config, []string, error) {
return nil, nil, err return nil, nil, err
} }
if !activeConfig.DisableRPC { if !cfg.DisableRPC {
if activeConfig.RPCUser == "" { if cfg.RPCUser == "" {
str := "%s: rpcuser cannot be empty" str := "%s: rpcuser cannot be empty"
err := errors.Errorf(str, funcName) err := errors.Errorf(str, funcName)
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
@ -341,7 +330,7 @@ func loadConfig() (*Config, []string, error) {
return nil, nil, err return nil, nil, err
} }
if activeConfig.RPCPass == "" { if cfg.RPCPass == "" {
str := "%s: rpcpass cannot be empty" str := "%s: rpcpass cannot be empty"
err := errors.Errorf(str, funcName) err := errors.Errorf(str, funcName)
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
@ -350,7 +339,7 @@ func loadConfig() (*Config, []string, error) {
} }
} }
err = activeConfig.ResolveNetwork(parser) err = cfg.ResolveNetwork(parser)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -359,21 +348,21 @@ func loadConfig() (*Config, []string, error) {
// according to the default of the active network. The set // according to the default of the active network. The set
// configuration value takes precedence over the default value for the // configuration value takes precedence over the default value for the
// selected network. // selected network.
relayNonStd := activeConfig.NetParams().RelayNonStdTxs relayNonStd := cfg.NetParams().RelayNonStdTxs
switch { switch {
case activeConfig.RelayNonStd && activeConfig.RejectNonStd: case cfg.RelayNonStd && cfg.RejectNonStd:
str := "%s: rejectnonstd and relaynonstd cannot be used " + str := "%s: rejectnonstd and relaynonstd cannot be used " +
"together -- choose only one" "together -- choose only one"
err := errors.Errorf(str, funcName) err := errors.Errorf(str, funcName)
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage) fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err return nil, nil, err
case activeConfig.RejectNonStd: case cfg.RejectNonStd:
relayNonStd = false relayNonStd = false
case activeConfig.RelayNonStd: case cfg.RelayNonStd:
relayNonStd = true relayNonStd = true
} }
activeConfig.RelayNonStd = relayNonStd cfg.RelayNonStd = relayNonStd
// Append the network type to the data directory so it is "namespaced" // Append the network type to the data directory so it is "namespaced"
// per network. In addition to the block database, there are other // per network. In addition to the block database, there are other
@ -381,26 +370,26 @@ func loadConfig() (*Config, []string, error) {
// All data is specific to a network, so namespacing the data directory // All data is specific to a network, so namespacing the data directory
// means each individual piece of serialized data does not have to // means each individual piece of serialized data does not have to
// worry about changing names per network and such. // worry about changing names per network and such.
activeConfig.DataDir = cleanAndExpandPath(activeConfig.DataDir) cfg.DataDir = cleanAndExpandPath(cfg.DataDir)
activeConfig.DataDir = filepath.Join(activeConfig.DataDir, activeConfig.NetParams().Name) cfg.DataDir = filepath.Join(cfg.DataDir, cfg.NetParams().Name)
// Append the network type to the log directory so it is "namespaced" // Append the network type to the log directory so it is "namespaced"
// per network in the same fashion as the data directory. // per network in the same fashion as the data directory.
activeConfig.LogDir = cleanAndExpandPath(activeConfig.LogDir) cfg.LogDir = cleanAndExpandPath(cfg.LogDir)
activeConfig.LogDir = filepath.Join(activeConfig.LogDir, activeConfig.NetParams().Name) cfg.LogDir = filepath.Join(cfg.LogDir, cfg.NetParams().Name)
// Special show command to list supported subsystems and exit. // Special show command to list supported subsystems and exit.
if activeConfig.DebugLevel == "show" { if cfg.DebugLevel == "show" {
fmt.Println("Supported subsystems", logger.SupportedSubsystems()) fmt.Println("Supported subsystems", logger.SupportedSubsystems())
os.Exit(0) os.Exit(0)
} }
// Initialize log rotation. After log rotation has been initialized, the // Initialize log rotation. After log rotation has been initialized, the
// logger variables may be used. // logger variables may be used.
logger.InitLog(filepath.Join(activeConfig.LogDir, defaultLogFilename), filepath.Join(activeConfig.LogDir, defaultErrLogFilename)) logger.InitLog(filepath.Join(cfg.LogDir, defaultLogFilename), filepath.Join(cfg.LogDir, defaultErrLogFilename))
// Parse, validate, and set debug log level(s). // Parse, validate, and set debug log level(s).
if err := logger.ParseAndSetDebugLevels(activeConfig.DebugLevel); err != nil { if err := logger.ParseAndSetDebugLevels(cfg.DebugLevel); err != nil {
err := errors.Errorf("%s: %s", funcName, err.Error()) err := errors.Errorf("%s: %s", funcName, err.Error())
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage) fmt.Fprintln(os.Stderr, usageMessage)
@ -408,8 +397,8 @@ func loadConfig() (*Config, []string, error) {
} }
// Validate profile port number // Validate profile port number
if activeConfig.Profile != "" { if cfg.Profile != "" {
profilePort, err := strconv.Atoi(activeConfig.Profile) profilePort, err := strconv.Atoi(cfg.Profile)
if err != nil || profilePort < 1024 || profilePort > 65535 { if err != nil || profilePort < 1024 || profilePort > 65535 {
str := "%s: The profile port must be between 1024 and 65535" str := "%s: The profile port must be between 1024 and 65535"
err := errors.Errorf(str, funcName) err := errors.Errorf(str, funcName)
@ -420,20 +409,20 @@ func loadConfig() (*Config, []string, error) {
} }
// Don't allow ban durations that are too short. // Don't allow ban durations that are too short.
if activeConfig.BanDuration < time.Second { if cfg.BanDuration < time.Second {
str := "%s: The banduration option may not be less than 1s -- parsed [%s]" str := "%s: The banduration option may not be less than 1s -- parsed [%s]"
err := errors.Errorf(str, funcName, activeConfig.BanDuration) err := errors.Errorf(str, funcName, cfg.BanDuration)
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage) fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err return nil, nil, err
} }
// Validate any given whitelisted IP addresses and networks. // Validate any given whitelisted IP addresses and networks.
if len(activeConfig.Whitelists) > 0 { if len(cfg.Whitelists) > 0 {
var ip net.IP var ip net.IP
activeConfig.Whitelists = make([]*net.IPNet, 0, len(activeConfig.Flags.Whitelists)) cfg.Whitelists = make([]*net.IPNet, 0, len(cfg.Flags.Whitelists))
for _, addr := range activeConfig.Flags.Whitelists { for _, addr := range cfg.Flags.Whitelists {
_, ipnet, err := net.ParseCIDR(addr) _, ipnet, err := net.ParseCIDR(addr)
if err != nil { if err != nil {
ip = net.ParseIP(addr) ip = net.ParseIP(addr)
@ -456,12 +445,12 @@ func loadConfig() (*Config, []string, error) {
Mask: net.CIDRMask(bits, bits), Mask: net.CIDRMask(bits, bits),
} }
} }
activeConfig.Whitelists = append(activeConfig.Whitelists, ipnet) cfg.Whitelists = append(cfg.Whitelists, ipnet)
} }
} }
// --addPeer and --connect do not mix. // --addPeer and --connect do not mix.
if len(activeConfig.AddPeers) > 0 && len(activeConfig.ConnectPeers) > 0 { if len(cfg.AddPeers) > 0 && len(cfg.ConnectPeers) > 0 {
str := "%s: the --addpeer and --connect options can not be " + str := "%s: the --addpeer and --connect options can not be " +
"mixed" "mixed"
err := errors.Errorf(str, funcName) err := errors.Errorf(str, funcName)
@ -471,27 +460,28 @@ func loadConfig() (*Config, []string, error) {
} }
// --proxy or --connect without --listen disables listening. // --proxy or --connect without --listen disables listening.
if (activeConfig.Proxy != "" || len(activeConfig.ConnectPeers) > 0) && if (cfg.Proxy != "" || len(cfg.ConnectPeers) > 0) &&
len(activeConfig.Listeners) == 0 { len(cfg.Listeners) == 0 {
activeConfig.DisableListen = true cfg.DisableListen = true
} }
// Connect means no DNS seeding. // ConnectPeers means no DNS seeding and no outbound peers
if len(activeConfig.ConnectPeers) > 0 { if len(cfg.ConnectPeers) > 0 {
activeConfig.DisableDNSSeed = true cfg.DisableDNSSeed = true
cfg.TargetOutboundPeers = 0
} }
// Add the default listener if none were specified. The default // Add the default listener if none were specified. The default
// listener is all addresses on the listen port for the network // listener is all addresses on the listen port for the network
// we are to connect to. // we are to connect to.
if len(activeConfig.Listeners) == 0 { if len(cfg.Listeners) == 0 {
activeConfig.Listeners = []string{ cfg.Listeners = []string{
net.JoinHostPort("", activeConfig.NetParams().DefaultPort), net.JoinHostPort("", cfg.NetParams().DefaultPort),
} }
} }
// Check to make sure limited and admin users don't have the same username // Check to make sure limited and admin users don't have the same username
if activeConfig.RPCUser == activeConfig.RPCLimitUser && activeConfig.RPCUser != "" { if cfg.RPCUser == cfg.RPCLimitUser && cfg.RPCUser != "" {
str := "%s: --rpcuser and --rpclimituser must not specify the " + str := "%s: --rpcuser and --rpclimituser must not specify the " +
"same username" "same username"
err := errors.Errorf(str, funcName) err := errors.Errorf(str, funcName)
@ -501,7 +491,7 @@ func loadConfig() (*Config, []string, error) {
} }
// Check to make sure limited and admin users don't have the same password // Check to make sure limited and admin users don't have the same password
if activeConfig.RPCPass == activeConfig.RPCLimitPass && activeConfig.RPCPass != "" { if cfg.RPCPass == cfg.RPCLimitPass && cfg.RPCPass != "" {
str := "%s: --rpcpass and --rpclimitpass must not specify the " + str := "%s: --rpcpass and --rpclimitpass must not specify the " +
"same password" "same password"
err := errors.Errorf(str, funcName) err := errors.Errorf(str, funcName)
@ -511,39 +501,39 @@ func loadConfig() (*Config, []string, error) {
} }
// The RPC server is disabled if no username or password is provided. // The RPC server is disabled if no username or password is provided.
if (activeConfig.RPCUser == "" || activeConfig.RPCPass == "") && if (cfg.RPCUser == "" || cfg.RPCPass == "") &&
(activeConfig.RPCLimitUser == "" || activeConfig.RPCLimitPass == "") { (cfg.RPCLimitUser == "" || cfg.RPCLimitPass == "") {
activeConfig.DisableRPC = true cfg.DisableRPC = true
} }
if activeConfig.DisableRPC { if cfg.DisableRPC {
log.Infof("RPC service is disabled") log.Infof("RPC service is disabled")
} }
// Default RPC to listen on localhost only. // Default RPC to listen on localhost only.
if !activeConfig.DisableRPC && len(activeConfig.RPCListeners) == 0 { if !cfg.DisableRPC && len(cfg.RPCListeners) == 0 {
addrs, err := net.LookupHost("localhost") addrs, err := net.LookupHost("localhost")
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
activeConfig.RPCListeners = make([]string, 0, len(addrs)) cfg.RPCListeners = make([]string, 0, len(addrs))
for _, addr := range addrs { for _, addr := range addrs {
addr = net.JoinHostPort(addr, activeConfig.NetParams().RPCPort) addr = net.JoinHostPort(addr, cfg.NetParams().RPCPort)
activeConfig.RPCListeners = append(activeConfig.RPCListeners, addr) cfg.RPCListeners = append(cfg.RPCListeners, addr)
} }
} }
if activeConfig.RPCMaxConcurrentReqs < 0 { if cfg.RPCMaxConcurrentReqs < 0 {
str := "%s: The rpcmaxwebsocketconcurrentrequests option may " + str := "%s: The rpcmaxwebsocketconcurrentrequests option may " +
"not be less than 0 -- parsed [%d]" "not be less than 0 -- parsed [%d]"
err := errors.Errorf(str, funcName, activeConfig.RPCMaxConcurrentReqs) err := errors.Errorf(str, funcName, cfg.RPCMaxConcurrentReqs)
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage) fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err return nil, nil, err
} }
// Validate the the minrelaytxfee. // Validate the the minrelaytxfee.
activeConfig.MinRelayTxFee, err = util.NewAmount(activeConfig.Flags.MinRelayTxFee) cfg.MinRelayTxFee, err = util.NewAmount(cfg.Flags.MinRelayTxFee)
if err != nil { if err != nil {
str := "%s: invalid minrelaytxfee: %s" str := "%s: invalid minrelaytxfee: %s"
err := errors.Errorf(str, funcName, err) err := errors.Errorf(str, funcName, err)
@ -553,39 +543,39 @@ func loadConfig() (*Config, []string, error) {
} }
// Disallow 0 and negative min tx fees. // Disallow 0 and negative min tx fees.
if activeConfig.MinRelayTxFee == 0 { if cfg.MinRelayTxFee == 0 {
str := "%s: The minrelaytxfee option must be greater than 0 -- parsed [%d]" str := "%s: The minrelaytxfee option must be greater than 0 -- parsed [%d]"
err := errors.Errorf(str, funcName, activeConfig.MinRelayTxFee) err := errors.Errorf(str, funcName, cfg.MinRelayTxFee)
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage) fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err return nil, nil, err
} }
// Limit the max block mass to a sane value. // Limit the max block mass to a sane value.
if activeConfig.BlockMaxMass < blockMaxMassMin || activeConfig.BlockMaxMass > if cfg.BlockMaxMass < blockMaxMassMin || cfg.BlockMaxMass >
blockMaxMassMax { blockMaxMassMax {
str := "%s: The blockmaxmass option must be in between %d " + str := "%s: The blockmaxmass option must be in between %d " +
"and %d -- parsed [%d]" "and %d -- parsed [%d]"
err := errors.Errorf(str, funcName, blockMaxMassMin, err := errors.Errorf(str, funcName, blockMaxMassMin,
blockMaxMassMax, activeConfig.BlockMaxMass) blockMaxMassMax, cfg.BlockMaxMass)
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage) fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err return nil, nil, err
} }
// Limit the max orphan count to a sane value. // Limit the max orphan count to a sane value.
if activeConfig.MaxOrphanTxs < 0 { if cfg.MaxOrphanTxs < 0 {
str := "%s: The maxorphantx option may not be less than 0 " + str := "%s: The maxorphantx option may not be less than 0 " +
"-- parsed [%d]" "-- parsed [%d]"
err := errors.Errorf(str, funcName, activeConfig.MaxOrphanTxs) err := errors.Errorf(str, funcName, cfg.MaxOrphanTxs)
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage) fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err return nil, nil, err
} }
// Look for illegal characters in the user agent comments. // Look for illegal characters in the user agent comments.
for _, uaComment := range activeConfig.UserAgentComments { for _, uaComment := range cfg.UserAgentComments {
if strings.ContainsAny(uaComment, "/:()") { if strings.ContainsAny(uaComment, "/:()") {
err := errors.Errorf("%s: The following characters must not "+ err := errors.Errorf("%s: The following characters must not "+
"appear in user agent comments: '/', ':', '(', ')'", "appear in user agent comments: '/', ':', '(', ')'",
@ -597,7 +587,7 @@ func loadConfig() (*Config, []string, error) {
} }
// --acceptanceindex and --dropacceptanceindex do not mix. // --acceptanceindex and --dropacceptanceindex do not mix.
if activeConfig.AcceptanceIndex && activeConfig.DropAcceptanceIndex { if cfg.AcceptanceIndex && cfg.DropAcceptanceIndex {
err := errors.Errorf("%s: the --acceptanceindex and --dropacceptanceindex "+ err := errors.Errorf("%s: the --acceptanceindex and --dropacceptanceindex "+
"options may not be activated at the same time", "options may not be activated at the same time",
funcName) funcName)
@ -608,29 +598,29 @@ func loadConfig() (*Config, []string, error) {
// Add default port to all listener addresses if needed and remove // Add default port to all listener addresses if needed and remove
// duplicate addresses. // duplicate addresses.
activeConfig.Listeners, err = network.NormalizeAddresses(activeConfig.Listeners, cfg.Listeners, err = network.NormalizeAddresses(cfg.Listeners,
activeConfig.NetParams().DefaultPort) cfg.NetParams().DefaultPort)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
// Add default port to all rpc listener addresses if needed and remove // Add default port to all rpc listener addresses if needed and remove
// duplicate addresses. // duplicate addresses.
activeConfig.RPCListeners, err = network.NormalizeAddresses(activeConfig.RPCListeners, cfg.RPCListeners, err = network.NormalizeAddresses(cfg.RPCListeners,
activeConfig.NetParams().RPCPort) cfg.NetParams().RPCPort)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
// Only allow TLS to be disabled if the RPC is bound to localhost // Only allow TLS to be disabled if the RPC is bound to localhost
// addresses. // addresses.
if !activeConfig.DisableRPC && activeConfig.DisableTLS { if !cfg.DisableRPC && cfg.DisableTLS {
allowedTLSListeners := map[string]struct{}{ allowedTLSListeners := map[string]struct{}{
"localhost": {}, "localhost": {},
"127.0.0.1": {}, "127.0.0.1": {},
"::1": {}, "::1": {},
} }
for _, addr := range activeConfig.RPCListeners { for _, addr := range cfg.RPCListeners {
host, _, err := net.SplitHostPort(addr) host, _, err := net.SplitHostPort(addr)
if err != nil { if err != nil {
str := "%s: RPC listen interface '%s' is " + str := "%s: RPC listen interface '%s' is " +
@ -652,16 +642,25 @@ func loadConfig() (*Config, []string, error) {
} }
} }
// Disallow --addpeer and --connect used together
if len(cfg.AddPeers) > 0 && len(cfg.ConnectPeers) > 0 {
str := "%s: --addpeer and --connect can not be used together"
err := errors.Errorf(str, funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Add default port to all added peer addresses if needed and remove // Add default port to all added peer addresses if needed and remove
// duplicate addresses. // duplicate addresses.
activeConfig.AddPeers, err = network.NormalizeAddresses(activeConfig.AddPeers, cfg.AddPeers, err = network.NormalizeAddresses(cfg.AddPeers,
activeConfig.NetParams().DefaultPort) cfg.NetParams().DefaultPort)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
activeConfig.ConnectPeers, err = network.NormalizeAddresses(activeConfig.ConnectPeers, cfg.ConnectPeers, err = network.NormalizeAddresses(cfg.ConnectPeers,
activeConfig.NetParams().DefaultPort) cfg.NetParams().DefaultPort)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -671,24 +670,24 @@ func loadConfig() (*Config, []string, error) {
// net.DialTimeout function as well as the system DNS resolver. When a // net.DialTimeout function as well as the system DNS resolver. When a
// proxy is specified, the dial function is set to the proxy specific // proxy is specified, the dial function is set to the proxy specific
// dial function. // dial function.
activeConfig.Dial = net.DialTimeout cfg.Dial = net.DialTimeout
activeConfig.Lookup = net.LookupIP cfg.Lookup = net.LookupIP
if activeConfig.Proxy != "" { if cfg.Proxy != "" {
_, _, err := net.SplitHostPort(activeConfig.Proxy) _, _, err := net.SplitHostPort(cfg.Proxy)
if err != nil { if err != nil {
str := "%s: Proxy address '%s' is invalid: %s" str := "%s: Proxy address '%s' is invalid: %s"
err := errors.Errorf(str, funcName, activeConfig.Proxy, err) err := errors.Errorf(str, funcName, cfg.Proxy, err)
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage) fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err return nil, nil, err
} }
proxy := &socks.Proxy{ proxy := &socks.Proxy{
Addr: activeConfig.Proxy, Addr: cfg.Proxy,
Username: activeConfig.ProxyUser, Username: cfg.ProxyUser,
Password: activeConfig.ProxyPass, Password: cfg.ProxyPass,
} }
activeConfig.Dial = proxy.DialTimeout cfg.Dial = proxy.DialTimeout
} }
// Warn about missing config file only after all other configuration is // Warn about missing config file only after all other configuration is
@ -698,7 +697,7 @@ func loadConfig() (*Config, []string, error) {
log.Warnf("%s", configFileError) log.Warnf("%s", configFileError)
} }
return activeConfig, remainingArgs, nil return cfg, remainingArgs, nil
} }
// createDefaultConfig copies the file sample-kaspad.conf to the given destination path, // createDefaultConfig copies the file sample-kaspad.conf to the given destination path,

View File

@ -0,0 +1,113 @@
package connmanager
import (
"time"
)
const (
minRetryDuration = 30 * time.Second
maxRetryDuration = 10 * time.Minute
)
func nextRetryDuration(previousDuration time.Duration) time.Duration {
if previousDuration < minRetryDuration {
return minRetryDuration
}
if previousDuration*2 > maxRetryDuration {
return maxRetryDuration
}
return previousDuration * 2
}
// checkRequestedConnections checks that all activeRequested are still active, and initiates connections
// for pendingRequested.
// While doing so, it filters out of connSet all connections that were initiated as a connectionRequest
func (c *ConnectionManager) checkRequestedConnections(connSet connectionSet) {
c.connectionRequestsLock.Lock()
defer c.connectionRequestsLock.Unlock()
now := time.Now()
for address, connReq := range c.activeRequested {
connection, ok := connSet.get(address)
if !ok { // a requested connection was disconnected
delete(c.activeRequested, address)
if connReq.isPermanent { // if is one-try - ignore. If permanent - add to pending list to retry
connReq.nextAttempt = now
connReq.retryDuration = 0
c.pendingRequested[address] = connReq
}
continue
}
connSet.remove(connection)
}
for address, connReq := range c.pendingRequested {
if connReq.nextAttempt.After(now) { // ignore connection requests which are still waiting for retry
continue
}
connection, ok := connSet.get(address)
// The pending connection request has already connected - move it to active
// This can happen in rare cases such as when the other side has connected to our node
// while it has been pending on our side.
if ok {
delete(c.pendingRequested, address)
c.pendingRequested[address] = connReq
connSet.remove(connection)
continue
}
// try to initiate connection
err := c.initiateConnection(connReq.address)
if err != nil {
log.Infof("Couldn't connect to %s: %s", address, err)
// if connection request is one try - remove from pending and ignore failure
if !connReq.isPermanent {
delete(c.pendingRequested, address)
continue
}
// if connection request is permanent - keep in pending, and increase retry time
connReq.retryDuration = nextRetryDuration(connReq.retryDuration)
connReq.nextAttempt = now.Add(connReq.retryDuration)
log.Debugf("Retrying permanent connection to %s in %s", address, connReq.retryDuration)
continue
}
// if connected successfully - move from pending to active
delete(c.pendingRequested, address)
c.activeRequested[address] = connReq
}
}
// AddConnectionRequest adds the given address to list of pending connection requests
func (c *ConnectionManager) AddConnectionRequest(address string, isPermanent bool) {
// spawn goroutine so that caller doesn't wait in case connectionManager is in the midst of handling
// connection requests
spawn("ConnectionManager.AddConnectionRequest", func() {
c.connectionRequestsLock.Lock()
defer c.connectionRequestsLock.Unlock()
if _, ok := c.activeRequested[address]; ok {
return
}
c.pendingRequested[address] = &connectionRequest{
address: address,
isPermanent: isPermanent,
}
c.run()
})
}
// RemoveConnection disconnects the connection for the given address
// and removes it entirely from the connection manager.
func (c *ConnectionManager) RemoveConnection(address string) {
// TODO(libp2p): unimplemented
panic("unimplemented")
}

View File

@ -0,0 +1,30 @@
package connmanager
import (
"github.com/kaspanet/kaspad/netadapter"
)
type connectionSet map[string]*netadapter.NetConnection
func (cs connectionSet) add(connection *netadapter.NetConnection) {
cs[connection.Address()] = connection
}
func (cs connectionSet) remove(connection *netadapter.NetConnection) {
delete(cs, connection.Address())
}
func (cs connectionSet) get(address string) (*netadapter.NetConnection, bool) {
connection, ok := cs[address]
return connection, ok
}
func convertToSet(connections []*netadapter.NetConnection) connectionSet {
connSet := make(connectionSet, len(connections))
for _, connection := range connections {
connSet[connection.Address()] = connection
}
return connSet
}

145
connmanager/connmanager.go Normal file
View File

@ -0,0 +1,145 @@
package connmanager
import (
"sync"
"sync/atomic"
"time"
"github.com/kaspanet/kaspad/addressmanager"
"github.com/kaspanet/kaspad/netadapter"
"github.com/kaspanet/kaspad/config"
)
// connectionRequest represents a user request (either through CLI or RPC) to connect to a certain node
type connectionRequest struct {
address string
isPermanent bool
nextAttempt time.Time
retryDuration time.Duration
}
// ConnectionManager monitors that the current active connections satisfy the requirements of
// outgoing, requested and incoming connections
type ConnectionManager struct {
cfg *config.Config
netAdapter *netadapter.NetAdapter
addressManager *addressmanager.AddressManager
activeRequested map[string]*connectionRequest
pendingRequested map[string]*connectionRequest
activeOutgoing map[string]struct{}
targetOutgoing int
activeIncoming map[string]struct{}
maxIncoming int
stop uint32
connectionRequestsLock sync.Mutex
resetLoopChan chan struct{}
loopTicker *time.Ticker
}
// New instantiates a new instance of a ConnectionManager
func New(cfg *config.Config, netAdapter *netadapter.NetAdapter, addressManager *addressmanager.AddressManager) (*ConnectionManager, error) {
c := &ConnectionManager{
cfg: cfg,
netAdapter: netAdapter,
addressManager: addressManager,
activeRequested: map[string]*connectionRequest{},
pendingRequested: map[string]*connectionRequest{},
activeOutgoing: map[string]struct{}{},
activeIncoming: map[string]struct{}{},
resetLoopChan: make(chan struct{}),
loopTicker: time.NewTicker(connectionsLoopInterval),
}
connectPeers := cfg.AddPeers
if len(cfg.ConnectPeers) > 0 {
connectPeers = cfg.ConnectPeers
}
c.maxIncoming = cfg.MaxInboundPeers
c.targetOutgoing = cfg.TargetOutboundPeers
for _, connectPeer := range connectPeers {
c.pendingRequested[connectPeer] = &connectionRequest{
address: connectPeer,
isPermanent: true,
}
}
return c, nil
}
// Start begins the operation of the ConnectionManager
func (c *ConnectionManager) Start() {
spawn("ConnectionManager.connectionsLoop", c.connectionsLoop)
}
// Stop halts the operation of the ConnectionManager
func (c *ConnectionManager) Stop() {
atomic.StoreUint32(&c.stop, 1)
for _, connection := range c.netAdapter.Connections() {
connection.Disconnect()
}
c.loopTicker.Stop()
}
func (c *ConnectionManager) run() {
c.resetLoopChan <- struct{}{}
}
func (c *ConnectionManager) initiateConnection(address string) error {
log.Infof("Connecting to %s", address)
return c.netAdapter.Connect(address)
}
const connectionsLoopInterval = 30 * time.Second
func (c *ConnectionManager) connectionsLoop() {
for atomic.LoadUint32(&c.stop) == 0 {
connections := c.netAdapter.Connections()
// We convert the connections list to a set, so that connections can be found quickly
// Then we go over the set, classifying connection by category: requested, outgoing or incoming.
// Every step removes all matching connections so that once we get to checkIncomingConnections -
// the only connections left are the incoming ones
connSet := convertToSet(connections)
c.checkRequestedConnections(connSet)
c.checkOutgoingConnections(connSet)
c.checkIncomingConnections(connSet)
c.waitTillNextIteration()
}
}
// ConnectionCount returns the count of the connected connections
func (c *ConnectionManager) ConnectionCount() int {
return c.netAdapter.ConnectionCount()
}
// Ban marks the given netConnection as banned
func (c *ConnectionManager) Ban(netConnection *netadapter.NetConnection) error {
return c.addressManager.Ban(netConnection.NetAddress())
}
// IsBanned returns whether the given netConnection is banned
func (c *ConnectionManager) IsBanned(netConnection *netadapter.NetConnection) (bool, error) {
return c.addressManager.IsBanned(netConnection.NetAddress())
}
func (c *ConnectionManager) waitTillNextIteration() {
select {
case <-c.resetLoopChan:
c.loopTicker.Stop()
c.loopTicker = time.NewTicker(connectionsLoopInterval)
case <-c.loopTicker.C:
}
}

View File

@ -0,0 +1,20 @@
package connmanager
// checkIncomingConnections makes sure there's no more than maxIncoming incoming connections
// if there are - it randomly disconnects enough to go below that number
func (c *ConnectionManager) checkIncomingConnections(incomingConnectionSet connectionSet) {
if len(incomingConnectionSet) <= c.maxIncoming {
return
}
numConnectionsOverMax := len(incomingConnectionSet) - c.maxIncoming
// randomly disconnect nodes until the number of incoming connections is smaller than maxIncoming
for _, connection := range incomingConnectionSet {
connection.Disconnect()
numConnectionsOverMax--
if numConnectionsOverMax == 0 {
break
}
}
}

9
connmanager/log.go Normal file
View File

@ -0,0 +1,9 @@
package connmanager
import (
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.CMGR)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@ -0,0 +1,61 @@
package connmanager
// checkOutgoingConnections goes over all activeOutgoing and makes sure they are still active.
// Then it opens connections so that we have targetOutgoing active connections
func (c *ConnectionManager) checkOutgoingConnections(connSet connectionSet) {
for address := range c.activeOutgoing {
connection, ok := connSet.get(address)
if ok { // connection is still connected
connSet.remove(connection)
continue
}
// if connection is dead - remove from list of active ones
delete(c.activeOutgoing, address)
}
liveConnections := len(c.activeOutgoing)
if c.targetOutgoing == liveConnections {
return
}
log.Debugf("Have got %d outgoing connections out of target %d, adding %d more",
liveConnections, c.targetOutgoing, c.targetOutgoing-liveConnections)
connectionsNeededCount := c.targetOutgoing - len(c.activeOutgoing)
connectionAttempts := connectionsNeededCount * 2
for i := 0; i < connectionAttempts; i++ {
// Return in case we've already reached or surpassed our target
if len(c.activeOutgoing) >= c.targetOutgoing {
return
}
address := c.addressManager.GetAddress()
if address == nil {
log.Warnf("No more addresses available")
return
}
netAddress := address.NetAddress()
tcpAddress := netAddress.TCPAddress()
addressString := tcpAddress.String()
isBanned, err := c.addressManager.IsBanned(netAddress)
if err != nil {
log.Infof("Couldn't resolve whether %s is banned: %s", addressString, err)
continue
}
if isBanned {
continue
}
c.addressManager.Attempt(netAddress)
err = c.initiateConnection(addressString)
if err != nil {
log.Infof("Couldn't connect to %s: %s", addressString, err)
continue
}
c.addressManager.Connected(netAddress)
c.activeOutgoing[addressString] = struct{}{}
}
}

View File

@ -1,27 +0,0 @@
connmgr
=======
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/connmgr)
Package connmgr implements a generic Kaspa network connection manager.
## Overview
Connection Manager handles all the general connection concerns such as
maintaining a set number of outbound connections, sourcing peers, banning,
limiting max connections, etc.
The package provides a generic connection manager which is able to accept
connection requests from a source or a set of given addresses, dial them and
notify the caller on connections. The main intended use is to initialize a pool
of active connections and maintain them to remain connected to the P2P network.
In addition the connection manager provides the following utilities:
- Notifications on connections or disconnections
- Handle failures and retry new addresses from the source
- Connect only to specified addresses
- Permanent connections with increasing backoff retry timers
- Disconnect or Remove an established connection

View File

@ -1,779 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package connmgr
import (
nativeerrors "errors"
"fmt"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/wire"
"net"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
)
// maxFailedAttempts is the maximum number of successive failed connection
// attempts after which network failure is assumed and new connections will
// be delayed by the configured retry duration.
const maxFailedAttempts = 25
var (
// maxRetryDuration is the max duration of time retrying of a persistent
// connection is allowed to grow to. This is necessary since the retry
// logic uses a backoff mechanism which increases the interval base times
// the number of retries that have been done.
maxRetryDuration = time.Minute * 5
// defaultRetryDuration is the default duration of time for retrying
// persistent connections.
defaultRetryDuration = time.Second * 5
)
var (
//ErrDialNil is used to indicate that Dial cannot be nil in the configuration.
ErrDialNil = errors.New("Config: Dial cannot be nil")
// ErrMaxOutboundPeers is an error that is thrown when the max amount of peers had
// been reached.
ErrMaxOutboundPeers = errors.New("max outbound peers reached")
// ErrAlreadyConnected is an error that is thrown if the peer is already
// connected.
ErrAlreadyConnected = errors.New("peer already connected")
// ErrAlreadyPermanent is an error that is thrown if the peer is already
// connected as a permanent peer.
ErrAlreadyPermanent = errors.New("peer exists as a permanent peer")
// ErrPeerNotFound is an error that is thrown if the peer was not found.
ErrPeerNotFound = errors.New("peer not found")
//ErrAddressManagerNil is used to indicate that Address Manager cannot be nil in the configuration.
ErrAddressManagerNil = errors.New("Config: Address manager cannot be nil")
)
// ConnState represents the state of the requested connection.
type ConnState uint8
// ConnState can be either pending, established, disconnected or failed. When
// a new connection is requested, it is attempted and categorized as
// established or failed depending on the connection result. An established
// connection which was disconnected is categorized as disconnected.
const (
ConnPending ConnState = iota
ConnFailing
ConnCanceled
ConnEstablished
ConnDisconnected
)
// ConnReq is the connection request to a network address. If permanent, the
// connection will be retried on disconnection.
type ConnReq struct {
// The following variables must only be used atomically.
id uint64
Addr *net.TCPAddr
Permanent bool
conn net.Conn
state ConnState
stateMtx sync.RWMutex
retryCount uint32
}
// updateState updates the state of the connection request.
func (c *ConnReq) updateState(state ConnState) {
c.stateMtx.Lock()
defer c.stateMtx.Unlock()
c.state = state
}
// ID returns a unique identifier for the connection request.
func (c *ConnReq) ID() uint64 {
return atomic.LoadUint64(&c.id)
}
// State is the connection state of the requested connection.
func (c *ConnReq) State() ConnState {
c.stateMtx.RLock()
defer c.stateMtx.RUnlock()
state := c.state
return state
}
// String returns a human-readable string for the connection request.
func (c *ConnReq) String() string {
if c.Addr == nil || c.Addr.String() == "" {
return fmt.Sprintf("reqid %d", atomic.LoadUint64(&c.id))
}
return fmt.Sprintf("%s (reqid %d)", c.Addr, atomic.LoadUint64(&c.id))
}
// Config holds the configuration options related to the connection manager.
type Config struct {
// Listeners defines a slice of listeners for which the connection
// manager will take ownership of and accept connections. When a
// connection is accepted, the OnAccept handler will be invoked with the
// connection. Since the connection manager takes ownership of these
// listeners, they will be closed when the connection manager is
// stopped.
//
// This field will not have any effect if the OnAccept field is not
// also specified. It may be nil if the caller does not wish to listen
// for incoming connections.
Listeners []net.Listener
// OnAccept is a callback that is fired when an inbound connection is
// accepted. It is the caller's responsibility to close the connection.
// Failure to close the connection will result in the connection manager
// believing the connection is still active and thus have undesirable
// side effects such as still counting toward maximum connection limits.
//
// This field will not have any effect if the Listeners field is not
// also specified since there couldn't possibly be any accepted
// connections in that case.
OnAccept func(net.Conn)
// TargetOutbound is the number of outbound network connections to
// maintain. Defaults to 8.
TargetOutbound uint32
// RetryDuration is the duration to wait before retrying connection
// requests. Defaults to 5s.
RetryDuration time.Duration
// OnConnection is a callback that is fired when a new outbound
// connection is established.
OnConnection func(*ConnReq, net.Conn)
// OnConnectionFailed is a callback that is fired when a new outbound
// connection has failed to be established.
OnConnectionFailed func(*ConnReq)
// OnDisconnection is a callback that is fired when an outbound
// connection is disconnected.
OnDisconnection func(*ConnReq)
AddrManager *addrmgr.AddrManager
// Dial connects to the address on the named network. It cannot be nil.
Dial func(net.Addr) (net.Conn, error)
}
// registerPending is used to register a pending connection attempt. By
// registering pending connection attempts we allow callers to cancel pending
// connection attempts before their successful or in the case they're not
// longer wanted.
type registerPending struct {
c *ConnReq
done chan struct{}
}
// handleConnected is used to queue a successful connection.
type handleConnected struct {
c *ConnReq
conn net.Conn
}
// handleDisconnected is used to remove a connection.
type handleDisconnected struct {
id uint64
retry bool
}
// handleFailed is used to remove a pending connection.
type handleFailed struct {
c *ConnReq
err error
}
// ConnManager provides a manager to handle network connections.
type ConnManager struct {
// The following variables must only be used atomically.
connReqCount uint64
start int32
stop int32
addressMtx sync.Mutex
usedOutboundGroups map[string]int64
usedAddresses map[string]struct{}
cfg Config
wg sync.WaitGroup
failedAttempts uint64
requests chan interface{}
quit chan struct{}
}
// handleFailedConn handles a connection failed due to a disconnect or any
// other failure. If permanent, it retries the connection after the configured
// retry duration. Otherwise, if required, it makes a new connection request.
// After maxFailedConnectionAttempts new connections will be retried after the
// configured retry duration.
func (cm *ConnManager) handleFailedConn(c *ConnReq, err error) {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
// Don't write throttled logs more than once every throttledConnFailedLogInterval
shouldWriteLog := shouldWriteConnFailedLog(err)
if shouldWriteLog {
// If we are to write a log, set its lastLogTime to now
setConnFailedLastLogTime(err, time.Now())
}
if c.Permanent {
c.retryCount++
d := time.Duration(c.retryCount) * cm.cfg.RetryDuration
if d > maxRetryDuration {
d = maxRetryDuration
}
if shouldWriteLog {
log.Debugf("Retrying further connections to %s every %s", c, d)
}
spawnAfter(d, func() {
cm.connect(c)
})
} else {
if c.Addr != nil {
cm.releaseAddress(c.Addr)
}
cm.failedAttempts++
if cm.failedAttempts >= maxFailedAttempts {
if shouldWriteLog {
log.Debugf("Max failed connection attempts reached: [%d] "+
"-- retrying further connections every %s", maxFailedAttempts,
cm.cfg.RetryDuration)
}
spawnAfter(cm.cfg.RetryDuration, cm.NewConnReq)
} else {
spawn(cm.NewConnReq)
}
}
}
func (cm *ConnManager) releaseAddress(addr *net.TCPAddr) {
cm.addressMtx.Lock()
defer cm.addressMtx.Unlock()
groupKey := usedOutboundGroupsKey(addr)
cm.usedOutboundGroups[groupKey]--
if cm.usedOutboundGroups[groupKey] < 0 {
panic(fmt.Errorf("cm.usedOutboundGroups[%s] has a negative value of %d. This should never happen", groupKey, cm.usedOutboundGroups[groupKey]))
}
delete(cm.usedAddresses, usedAddressesKey(addr))
}
func (cm *ConnManager) markAddressAsUsed(addr *net.TCPAddr) {
cm.usedOutboundGroups[usedOutboundGroupsKey(addr)]++
cm.usedAddresses[usedAddressesKey(addr)] = struct{}{}
}
func (cm *ConnManager) isOutboundGroupUsed(addr *net.TCPAddr) bool {
_, ok := cm.usedOutboundGroups[usedOutboundGroupsKey(addr)]
return ok
}
func (cm *ConnManager) isAddressUsed(addr *net.TCPAddr) bool {
_, ok := cm.usedAddresses[usedAddressesKey(addr)]
return ok
}
func usedOutboundGroupsKey(addr *net.TCPAddr) string {
// A fake service flag is used since it doesn't affect the group key.
na := wire.NewNetAddress(addr, wire.SFNodeNetwork)
return addrmgr.GroupKey(na)
}
func usedAddressesKey(addr *net.TCPAddr) string {
return addr.String()
}
// throttledError defines an error type whose logs get throttled. This is to
// prevent flooding the logs with identical errors.
type throttledError error
var (
// throttledConnFailedLogInterval is the minimum duration of time between
// the logs defined in throttledConnFailedLogs.
throttledConnFailedLogInterval = time.Minute * 10
// throttledConnFailedLogs are logs that get written at most every
// throttledConnFailedLogInterval. Each entry in this map defines a type
// of error that we want to throttle. The value of each entry is the last
// time that type of log had been written.
throttledConnFailedLogs = map[throttledError]time.Time{
ErrNoAddress: {},
}
// ErrNoAddress is an error that is thrown when there aren't any
// valid connection addresses.
ErrNoAddress throttledError = errors.New("no valid connect address")
)
// shouldWriteConnFailedLog resolves whether to write logs related to connection
// failures. Errors that had not been previously registered in throttledConnFailedLogs
// and non-error (nil values) must always be logged.
func shouldWriteConnFailedLog(err error) bool {
if err == nil {
return true
}
lastLogTime, ok := throttledConnFailedLogs[err]
return !ok || lastLogTime.Add(throttledConnFailedLogInterval).Before(time.Now())
}
// setConnFailedLastLogTime sets the last log time of the specified error
func setConnFailedLastLogTime(err error, lastLogTime time.Time) {
var throttledErr throttledError
nativeerrors.As(err, &throttledErr)
throttledConnFailedLogs[err] = lastLogTime
}
// connHandler handles all connection related requests. It must be run as a
// goroutine.
//
// The connection handler makes sure that we maintain a pool of active outbound
// connections so that we remain connected to the network. Connection requests
// are processed and mapped by their assigned ids.
func (cm *ConnManager) connHandler() {
var (
// pending holds all registered conn requests that have yet to
// succeed.
pending = make(map[uint64]*ConnReq)
// conns represents the set of all actively connected peers.
conns = make(map[uint64]*ConnReq, cm.cfg.TargetOutbound)
)
out:
for {
select {
case req := <-cm.requests:
switch msg := req.(type) {
case registerPending:
connReq := msg.c
connReq.updateState(ConnPending)
pending[msg.c.id] = connReq
close(msg.done)
case handleConnected:
connReq := msg.c
if _, ok := pending[connReq.id]; !ok {
if msg.conn != nil {
msg.conn.Close()
}
log.Debugf("Ignoring connection for "+
"canceled connreq=%s", connReq)
continue
}
connReq.updateState(ConnEstablished)
connReq.conn = msg.conn
conns[connReq.id] = connReq
log.Debugf("Connected to %s", connReq)
connReq.retryCount = 0
delete(pending, connReq.id)
if cm.cfg.OnConnection != nil {
cm.cfg.OnConnection(connReq, msg.conn)
}
case handleDisconnected:
connReq, ok := conns[msg.id]
if !ok {
connReq, ok = pending[msg.id]
if !ok {
log.Errorf("Unknown connid=%d",
msg.id)
continue
}
// Pending connection was found, remove
// it from pending map if we should
// ignore a later, successful
// connection.
connReq.updateState(ConnCanceled)
log.Debugf("Canceling: %s", connReq)
delete(pending, msg.id)
continue
}
// An existing connection was located, mark as
// disconnected and execute disconnection
// callback.
log.Debugf("Disconnected from %s", connReq)
delete(conns, msg.id)
if connReq.conn != nil {
connReq.conn.Close()
}
if cm.cfg.OnDisconnection != nil {
spawn(func() {
cm.cfg.OnDisconnection(connReq)
})
}
// All internal state has been cleaned up, if
// this connection is being removed, we will
// make no further attempts with this request.
if !msg.retry {
connReq.updateState(ConnDisconnected)
continue
}
// Otherwise, we will attempt a reconnection.
// The connection request is re added to the
// pending map, so that subsequent processing
// of connections and failures do not ignore
// the request.
connReq.updateState(ConnPending)
log.Debugf("Reconnecting to %s",
connReq)
pending[msg.id] = connReq
cm.handleFailedConn(connReq, nil)
case handleFailed:
connReq := msg.c
if _, ok := pending[connReq.id]; !ok {
log.Debugf("Ignoring connection for "+
"canceled conn req: %s", connReq)
continue
}
connReq.updateState(ConnFailing)
if shouldWriteConnFailedLog(msg.err) {
log.Debugf("Failed to connect to %s: %s",
connReq, msg.err)
}
cm.handleFailedConn(connReq, msg.err)
if cm.cfg.OnConnectionFailed != nil {
cm.cfg.OnConnectionFailed(connReq)
}
}
case <-cm.quit:
break out
}
}
cm.wg.Done()
log.Trace("Connection handler done")
}
// NotifyConnectionRequestComplete notifies the connection
// manager that a peer had been successfully connected and
// marked as good.
func (cm *ConnManager) NotifyConnectionRequestComplete() {
cm.failedAttempts = 0
}
// NewConnReq creates a new connection request and connects to the
// corresponding address.
func (cm *ConnManager) NewConnReq() {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
c := &ConnReq{}
atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1))
// Submit a request of a pending connection attempt to the connection
// manager. By registering the id before the connection is even
// established, we'll be able to later cancel the connection via the
// Remove method.
done := make(chan struct{})
select {
case cm.requests <- registerPending{c, done}:
case <-cm.quit:
return
}
// Wait for the registration to successfully add the pending conn req to
// the conn manager's internal state.
select {
case <-done:
case <-cm.quit:
return
}
err := cm.associateAddressToConnReq(c)
if err != nil {
select {
case cm.requests <- handleFailed{c, err}:
case <-cm.quit:
}
return
}
cm.connect(c)
}
func (cm *ConnManager) associateAddressToConnReq(c *ConnReq) error {
cm.addressMtx.Lock()
defer cm.addressMtx.Unlock()
addr, err := cm.getNewAddress()
if err != nil {
return err
}
cm.markAddressAsUsed(addr)
c.Addr = addr
return nil
}
// Connect assigns an id and dials a connection to the address of the
// connection request.
func (cm *ConnManager) Connect(c *ConnReq) error {
err := func() error {
cm.addressMtx.Lock()
defer cm.addressMtx.Unlock()
if cm.isAddressUsed(c.Addr) {
return fmt.Errorf("address %s is already in use", c.Addr)
}
cm.markAddressAsUsed(c.Addr)
return nil
}()
if err != nil {
return err
}
cm.connect(c)
return nil
}
// connect assigns an id and dials a connection to the address of the
// connection request. This function assumes that the connection address
// has checked and already marked as used.
func (cm *ConnManager) connect(c *ConnReq) {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
if atomic.LoadUint64(&c.id) == 0 {
atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1))
// Submit a request of a pending connection attempt to the
// connection manager. By registering the id before the
// connection is even established, we'll be able to later
// cancel the connection via the Remove method.
done := make(chan struct{})
select {
case cm.requests <- registerPending{c, done}:
case <-cm.quit:
return
}
// Wait for the registration to successfully add the pending
// conn req to the conn manager's internal state.
select {
case <-done:
case <-cm.quit:
return
}
}
log.Debugf("Attempting to connect to %s", c)
conn, err := cm.cfg.Dial(c.Addr)
if err != nil {
select {
case cm.requests <- handleFailed{c, err}:
case <-cm.quit:
}
return
}
select {
case cm.requests <- handleConnected{c, conn}:
case <-cm.quit:
}
}
// Disconnect disconnects the connection corresponding to the given connection
// id. If permanent, the connection will be retried with an increasing backoff
// duration.
func (cm *ConnManager) Disconnect(id uint64) {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
select {
case cm.requests <- handleDisconnected{id, true}:
case <-cm.quit:
}
}
// Remove removes the connection corresponding to the given connection id from
// known connections.
//
// NOTE: This method can also be used to cancel a lingering connection attempt
// that hasn't yet succeeded.
func (cm *ConnManager) Remove(id uint64) {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
select {
case cm.requests <- handleDisconnected{id, false}:
case <-cm.quit:
}
}
// listenHandler accepts incoming connections on a given listener. It must be
// run as a goroutine.
func (cm *ConnManager) listenHandler(listener net.Listener) {
log.Infof("Server listening on %s", listener.Addr())
for atomic.LoadInt32(&cm.stop) == 0 {
conn, err := listener.Accept()
if err != nil {
// Only log the error if not forcibly shutting down.
if atomic.LoadInt32(&cm.stop) == 0 {
log.Errorf("Can't accept connection: %s", err)
}
continue
}
spawn(func() {
cm.cfg.OnAccept(conn)
})
}
cm.wg.Done()
log.Tracef("Listener handler done for %s", listener.Addr())
}
// Start launches the connection manager and begins connecting to the network.
func (cm *ConnManager) Start() {
// Already started?
if atomic.AddInt32(&cm.start, 1) != 1 {
return
}
log.Trace("Connection manager started")
cm.wg.Add(1)
spawn(cm.connHandler)
// Start all the listeners so long as the caller requested them and
// provided a callback to be invoked when connections are accepted.
if cm.cfg.OnAccept != nil {
for _, listener := range cm.cfg.Listeners {
// Declaring this variable is necessary as it needs be declared in the same
// scope of the anonymous function below it.
listenerCopy := listener
cm.wg.Add(1)
spawn(func() {
cm.listenHandler(listenerCopy)
})
}
}
for i := atomic.LoadUint64(&cm.connReqCount); i < uint64(cm.cfg.TargetOutbound); i++ {
spawn(cm.NewConnReq)
}
}
// Wait blocks until the connection manager halts gracefully.
func (cm *ConnManager) Wait() {
cm.wg.Wait()
}
// Stop gracefully shuts down the connection manager.
func (cm *ConnManager) Stop() {
if atomic.AddInt32(&cm.stop, 1) != 1 {
log.Warnf("Connection manager already stopped")
return
}
// Stop all the listeners. There will not be any listeners if
// listening is disabled.
for _, listener := range cm.cfg.Listeners {
// Ignore the error since this is shutdown and there is no way
// to recover anyways.
_ = listener.Close()
}
close(cm.quit)
log.Trace("Connection manager stopped")
}
func (cm *ConnManager) getNewAddress() (*net.TCPAddr, error) {
for tries := 0; tries < 100; tries++ {
addr := cm.cfg.AddrManager.GetAddress()
if addr == nil {
break
}
// Check if there's already a connection to the same address.
netAddr := addr.NetAddress().TCPAddress()
if cm.isAddressUsed(netAddr) {
continue
}
// Address will not be invalid, local or unroutable
// because addrmanager rejects those on addition.
// Just check that we don't already have an address
// in the same group so that we are not connecting
// to the same network segment at the expense of
// others.
//
// Networks that accept unroutable connections are exempt
// from this rule, since they're meant to run within a
// private subnet, like 10.0.0.0/16.
if !config.ActiveConfig().NetParams().AcceptUnroutable && cm.isOutboundGroupUsed(netAddr) {
continue
}
// only allow recent nodes (10mins) after we failed 30
// times
if tries < 30 && time.Since(addr.LastAttempt()) < 10*time.Minute {
continue
}
// allow nondefault ports after 50 failed tries.
if tries < 50 && fmt.Sprintf("%d", netAddr.Port) !=
config.ActiveConfig().NetParams().DefaultPort {
continue
}
return netAddr, nil
}
return nil, ErrNoAddress
}
// New returns a new connection manager.
// Use Start to start connecting to the network.
func New(cfg *Config) (*ConnManager, error) {
if cfg.Dial == nil {
return nil, errors.WithStack(ErrDialNil)
}
if cfg.AddrManager == nil {
return nil, errors.WithStack(ErrAddressManagerNil)
}
// Default to sane values
if cfg.RetryDuration <= 0 {
cfg.RetryDuration = defaultRetryDuration
}
cm := ConnManager{
cfg: *cfg, // Copy so caller can't mutate
requests: make(chan interface{}),
quit: make(chan struct{}),
usedAddresses: make(map[string]struct{}),
usedOutboundGroups: make(map[string]int64),
}
return &cm, nil
}

View File

@ -1,980 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package connmgr
import (
"fmt"
"io"
"io/ioutil"
"net"
"sync/atomic"
"testing"
"time"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
)
func init() {
// Override the max retry duration when running tests.
maxRetryDuration = 2 * time.Millisecond
}
// mockAddr mocks a network address
type mockAddr struct {
net, address string
}
func (m mockAddr) Network() string { return m.net }
func (m mockAddr) String() string { return m.address }
// mockConn mocks a network connection by implementing the net.Conn interface.
type mockConn struct {
io.Reader
io.Writer
io.Closer
// local network, address for the connection.
lnet, laddr string
// remote network, address for the connection.
rAddr net.Addr
}
// LocalAddr returns the local address for the connection.
func (c mockConn) LocalAddr() net.Addr {
return &mockAddr{c.lnet, c.laddr}
}
// RemoteAddr returns the remote address for the connection.
func (c mockConn) RemoteAddr() net.Addr {
return &mockAddr{c.rAddr.Network(), c.rAddr.String()}
}
// Close handles closing the connection.
func (c mockConn) Close() error {
return nil
}
func (c mockConn) SetDeadline(t time.Time) error { return nil }
func (c mockConn) SetReadDeadline(t time.Time) error { return nil }
func (c mockConn) SetWriteDeadline(t time.Time) error { return nil }
// mockDialer mocks the net.Dial interface by returning a mock connection to
// the given address.
func mockDialer(addr net.Addr) (net.Conn, error) {
r, w := io.Pipe()
c := &mockConn{rAddr: addr}
c.Reader = r
c.Writer = w
return c, nil
}
// TestNewConfig tests that new ConnManager config is validated as expected.
func TestNewConfig(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
_, err := New(&Config{})
if !errors.Is(err, ErrDialNil) {
t.Fatalf("New expected error: %s, got %s", ErrDialNil, err)
}
_, err = New(&Config{
Dial: mockDialer,
})
if !errors.Is(err, ErrAddressManagerNil) {
t.Fatalf("New expected error: %s, got %s", ErrAddressManagerNil, err)
}
amgr, teardown := addressManagerForTest(t, "TestNewConfig", 10)
defer teardown()
_, err = New(&Config{
Dial: mockDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New unexpected error: %v", err)
}
}
// TestStartStop tests that the connection manager starts and stops as
// expected.
func TestStartStop(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
connected := make(chan *ConnReq)
disconnected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestStartStop", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: 1,
AddrManager: amgr,
Dial: mockDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
OnDisconnection: func(c *ConnReq) {
disconnected <- c
},
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
gotConnReq := <-connected
cmgr.Stop()
// already stopped
cmgr.Stop()
// ignored
cr := &ConnReq{
Addr: &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
},
Permanent: true,
}
err = cmgr.Connect(cr)
if err != nil {
t.Fatalf("Connect error: %s", err)
}
if cr.ID() != 0 {
t.Fatalf("start/stop: got id: %v, want: 0", cr.ID())
}
cmgr.Disconnect(gotConnReq.ID())
cmgr.Remove(gotConnReq.ID())
select {
case <-disconnected:
t.Fatalf("start/stop: unexpected disconnection")
case <-time.Tick(10 * time.Millisecond):
break
}
}
func overrideActiveConfig() func() {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
return func() {
// Give some extra time to all open NewConnReq goroutines
// to finish before restoring the active config to prevent
// potential panics.
time.Sleep(10 * time.Millisecond)
config.SetActiveConfig(originalActiveCfg)
}
}
func addressManagerForTest(t *testing.T, testName string, numAddresses uint8) (*addrmgr.AddrManager, func()) {
amgr, teardown := createEmptyAddressManagerForTest(t, testName)
for i := uint8(0); i < numAddresses; i++ {
ip := fmt.Sprintf("173.%d.115.66:16511", i)
err := amgr.AddAddressByIP(ip, nil)
if err != nil {
t.Fatalf("AddAddressByIP unexpectedly failed to add IP %s: %s", ip, err)
}
}
return amgr, teardown
}
func createEmptyAddressManagerForTest(t *testing.T, testName string) (*addrmgr.AddrManager, func()) {
path, err := ioutil.TempDir("", fmt.Sprintf("%s-database", testName))
if err != nil {
t.Fatalf("createEmptyAddressManagerForTest: TempDir unexpectedly "+
"failed: %s", err)
}
err = dbaccess.Open(path)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
return addrmgr.New(nil, nil), func() {
// Wait for the connection manager to finish, so it'll
// have access to the address manager as long as it's
// alive.
time.Sleep(10 * time.Millisecond)
err := dbaccess.Close()
if err != nil {
t.Fatalf("error closing the database: %s", err)
}
}
}
// TestConnectMode tests that the connection manager works in the connect mode.
//
// In connect mode, automatic connections are disabled, so we test that
// requests using Connect are handled and that no other connections are made.
func TestConnectMode(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
connected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestConnectMode", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: 0,
Dial: mockDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cr := &ConnReq{
Addr: &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
},
Permanent: true,
}
cmgr.Start()
cmgr.Connect(cr)
gotConnReq := <-connected
wantID := cr.ID()
gotID := gotConnReq.ID()
if gotID != wantID {
t.Fatalf("connect mode: %v - want ID %v, got ID %v", cr.Addr, wantID, gotID)
}
gotState := cr.State()
wantState := ConnEstablished
if gotState != wantState {
t.Fatalf("connect mode: %v - want state %v, got state %v", cr.Addr, wantState, gotState)
}
select {
case c := <-connected:
t.Fatalf("connect mode: got unexpected connection - %v", c.Addr)
case <-time.After(time.Millisecond):
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestTargetOutbound tests the target number of outbound connections.
//
// We wait until all connections are established, then test they there are the
// only connections made.
func TestTargetOutbound(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
const numAddressesInAddressManager = 10
targetOutbound := uint32(numAddressesInAddressManager - 2)
connected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestTargetOutbound", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: targetOutbound,
Dial: mockDialer,
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
for i := uint32(0); i < targetOutbound; i++ {
<-connected
}
select {
case c := <-connected:
t.Fatalf("target outbound: got unexpected connection - %v", c.Addr)
case <-time.After(time.Millisecond):
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestDuplicateOutboundConnections tests that connection requests cannot use an already used address.
// It checks it by creating one connection request for each address in the address manager, so that
// the next connection request will have to fail because no unused address will be available.
func TestDuplicateOutboundConnections(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
const numAddressesInAddressManager = 10
targetOutbound := uint32(numAddressesInAddressManager - 1)
connected := make(chan struct{})
failedConnections := make(chan struct{})
amgr, teardown := addressManagerForTest(t, "TestDuplicateOutboundConnections", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: targetOutbound,
Dial: mockDialer,
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- struct{}{}
},
OnConnectionFailed: func(_ *ConnReq) {
failedConnections <- struct{}{}
},
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
for i := uint32(0); i < targetOutbound; i++ {
<-connected
}
time.Sleep(time.Millisecond)
// Here we check that making a manual connection request beyond the target outbound connection
// doesn't fail, so we can know that the reason such connection request will fail is an address
// related issue.
cmgr.NewConnReq()
select {
case <-connected:
break
case <-time.After(time.Millisecond):
t.Fatalf("connection request unexpectedly didn't connect")
}
select {
case <-failedConnections:
t.Fatalf("a connection request unexpectedly failed")
case <-time.After(time.Millisecond):
break
}
// After we created numAddressesInAddressManager connection requests, this request should fail
// because there aren't any more available addresses.
cmgr.NewConnReq()
select {
case <-connected:
t.Fatalf("connection request unexpectedly succeeded")
case <-time.After(time.Millisecond):
t.Fatalf("connection request didn't fail as expected")
case <-failedConnections:
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestSameOutboundGroupConnections tests that connection requests cannot use an address with an already used
// address CIDR group.
// It checks it by creating an address manager with only two addresses, that both belong to the same CIDR group
// and checks that the second connection request fails.
func TestSameOutboundGroupConnections(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
amgr, teardown := createEmptyAddressManagerForTest(t, "TestSameOutboundGroupConnections")
defer teardown()
err := amgr.AddAddressByIP("173.190.115.66:16511", nil)
if err != nil {
t.Fatalf("AddAddressByIP unexpectedly failed: %s", err)
}
err = amgr.AddAddressByIP("173.190.115.67:16511", nil)
if err != nil {
t.Fatalf("AddAddressByIP unexpectedly failed: %s", err)
}
connected := make(chan struct{})
failedConnections := make(chan struct{})
cmgr, err := New(&Config{
TargetOutbound: 0,
Dial: mockDialer,
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- struct{}{}
},
OnConnectionFailed: func(_ *ConnReq) {
failedConnections <- struct{}{}
},
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
cmgr.NewConnReq()
select {
case <-connected:
break
case <-time.After(time.Millisecond):
t.Fatalf("connection request unexpectedly didn't connect")
}
select {
case <-failedConnections:
t.Fatalf("a connection request unexpectedly failed")
case <-time.After(time.Millisecond):
break
}
cmgr.NewConnReq()
select {
case <-connected:
t.Fatalf("connection request unexpectedly succeeded")
case <-time.After(time.Millisecond):
t.Fatalf("connection request didn't fail as expected")
case <-failedConnections:
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestRetryPermanent tests that permanent connection requests are retried.
//
// We make a permanent connection request using Connect, disconnect it using
// Disconnect and we wait for it to be connected back.
func TestRetryPermanent(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
connected := make(chan *ConnReq)
disconnected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestRetryPermanent", 10)
defer teardown()
cmgr, err := New(&Config{
RetryDuration: time.Millisecond,
TargetOutbound: 0,
Dial: mockDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
OnDisconnection: func(c *ConnReq) {
disconnected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cr := &ConnReq{
Addr: &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
},
Permanent: true,
}
go cmgr.Connect(cr)
cmgr.Start()
gotConnReq := <-connected
wantID := cr.ID()
gotID := gotConnReq.ID()
if gotID != wantID {
t.Fatalf("retry: %v - want ID %v, got ID %v", cr.Addr, wantID, gotID)
}
gotState := cr.State()
wantState := ConnEstablished
if gotState != wantState {
t.Fatalf("retry: %v - want state %v, got state %v", cr.Addr, wantState, gotState)
}
cmgr.Disconnect(cr.ID())
gotConnReq = <-disconnected
wantID = cr.ID()
gotID = gotConnReq.ID()
if gotID != wantID {
t.Fatalf("retry: %v - want ID %v, got ID %v", cr.Addr, wantID, gotID)
}
gotState = cr.State()
wantState = ConnPending
if gotState != wantState {
// There is a small chance that connection has already been established,
// so check for that as well
if gotState != ConnEstablished {
t.Fatalf("retry: %v - want state %v, got state %v", cr.Addr, wantState, gotState)
}
}
gotConnReq = <-connected
wantID = cr.ID()
gotID = gotConnReq.ID()
if gotID != wantID {
t.Fatalf("retry: %v - want ID %v, got ID %v", cr.Addr, wantID, gotID)
}
gotState = cr.State()
wantState = ConnEstablished
if gotState != wantState {
t.Fatalf("retry: %v - want state %v, got state %v", cr.Addr, wantState, gotState)
}
cmgr.Remove(cr.ID())
gotConnReq = <-disconnected
// Wait for status to be updated
time.Sleep(10 * time.Millisecond)
wantID = cr.ID()
gotID = gotConnReq.ID()
if gotID != wantID {
t.Fatalf("retry: %v - want ID %v, got ID %v", cr.Addr, wantID, gotID)
}
gotState = cr.State()
wantState = ConnDisconnected
if gotState != wantState {
t.Fatalf("retry: %v - want state %v, got state %v", cr.Addr, wantState, gotState)
}
cmgr.Stop()
cmgr.Wait()
}
// TestMaxRetryDuration tests the maximum retry duration.
//
// We have a timed dialer which initially returns err but after RetryDuration
// hits maxRetryDuration returns a mock conn.
func TestMaxRetryDuration(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
networkUp := make(chan struct{})
time.AfterFunc(5*time.Millisecond, func() {
close(networkUp)
})
timedDialer := func(addr net.Addr) (net.Conn, error) {
select {
case <-networkUp:
return mockDialer(addr)
default:
return nil, errors.New("network down")
}
}
amgr, teardown := addressManagerForTest(t, "TestMaxRetryDuration", 10)
defer teardown()
connected := make(chan *ConnReq)
cmgr, err := New(&Config{
RetryDuration: time.Millisecond,
TargetOutbound: 0,
Dial: timedDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cr := &ConnReq{
Addr: &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
},
Permanent: true,
}
go cmgr.Connect(cr)
cmgr.Start()
// retry in 1ms
// retry in 2ms - max retry duration reached
// retry in 2ms - timedDialer returns mockDial
select {
case <-connected:
case <-time.Tick(100 * time.Millisecond):
t.Fatalf("max retry duration: connection timeout")
}
cmgr.Stop()
cmgr.Wait()
}
// TestNetworkFailure tests that the connection manager handles a network
// failure gracefully.
func TestNetworkFailure(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
var dials uint32
errDialer := func(net net.Addr) (net.Conn, error) {
atomic.AddUint32(&dials, 1)
return nil, errors.New("network down")
}
amgr, teardown := addressManagerForTest(t, "TestNetworkFailure", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: 5,
RetryDuration: 5 * time.Millisecond,
Dial: errDialer,
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
t.Fatalf("network failure: got unexpected connection - %v", c.Addr)
},
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
time.Sleep(10 * time.Millisecond)
cmgr.Stop()
cmgr.Wait()
wantMaxDials := uint32(75)
if atomic.LoadUint32(&dials) > wantMaxDials {
t.Fatalf("network failure: unexpected number of dials - got %v, want < %v",
atomic.LoadUint32(&dials), wantMaxDials)
}
}
// TestStopFailed tests that failed connections are ignored after connmgr is
// stopped.
//
// We have a dailer which sets the stop flag on the conn manager and returns an
// err so that the handler assumes that the conn manager is stopped and ignores
// the failure.
func TestStopFailed(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
done := make(chan struct{}, 1)
waitDialer := func(addr net.Addr) (net.Conn, error) {
done <- struct{}{}
time.Sleep(time.Millisecond)
return nil, errors.New("network down")
}
amgr, teardown := addressManagerForTest(t, "TestStopFailed", 10)
defer teardown()
cmgr, err := New(&Config{
Dial: waitDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
go func() {
<-done
atomic.StoreInt32(&cmgr.stop, 1)
time.Sleep(2 * time.Millisecond)
atomic.StoreInt32(&cmgr.stop, 0)
cmgr.Stop()
}()
cr := &ConnReq{
Addr: &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
},
Permanent: true,
}
go cmgr.Connect(cr)
cmgr.Wait()
}
// TestRemovePendingConnection tests that it's possible to cancel a pending
// connection, removing its internal state from the ConnMgr.
func TestRemovePendingConnection(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
// Create a ConnMgr instance with an instance of a dialer that'll never
// succeed.
wait := make(chan struct{})
indefiniteDialer := func(addr net.Addr) (net.Conn, error) {
<-wait
return nil, errors.Errorf("error")
}
amgr, teardown := addressManagerForTest(t, "TestRemovePendingConnection", 10)
defer teardown()
cmgr, err := New(&Config{
Dial: indefiniteDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
// Establish a connection request to a random IP we've chosen.
cr := &ConnReq{
Addr: &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
},
Permanent: true,
}
go cmgr.Connect(cr)
time.Sleep(10 * time.Millisecond)
if cr.State() != ConnPending {
t.Fatalf("pending request hasn't been registered, status: %v",
cr.State())
}
// The request launched above will actually never be able to establish
// a connection. So we'll cancel it _before_ it's able to be completed.
cmgr.Remove(cr.ID())
time.Sleep(10 * time.Millisecond)
// Now examine the status of the connection request, it should read a
// status of failed.
if cr.State() != ConnCanceled {
t.Fatalf("request wasn't canceled, status is: %v", cr.State())
}
close(wait)
cmgr.Stop()
cmgr.Wait()
}
// TestCancelIgnoreDelayedConnection tests that a canceled connection request will
// not execute the on connection callback, even if an outstanding retry
// succeeds.
func TestCancelIgnoreDelayedConnection(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
retryTimeout := 10 * time.Millisecond
// Setup a dialer that will continue to return an error until the
// connect chan is signaled, the dial attempt immediately after will
// succeed in returning a connection.
connect := make(chan struct{})
failingDialer := func(addr net.Addr) (net.Conn, error) {
select {
case <-connect:
return mockDialer(addr)
default:
}
return nil, errors.Errorf("error")
}
connected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestCancelIgnoreDelayedConnection", 10)
defer teardown()
cmgr, err := New(&Config{
Dial: failingDialer,
RetryDuration: retryTimeout,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
// Establish a connection request to a random IP we've chosen.
cr := &ConnReq{
Addr: &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
},
}
cmgr.Connect(cr)
// Allow for the first retry timeout to elapse.
time.Sleep(2 * retryTimeout)
// Connection be marked as failed, even after reattempting to
// connect.
if cr.State() != ConnFailing {
t.Fatalf("failing request should have status failed, status: %v",
cr.State())
}
// Remove the connection, and then immediately allow the next connection
// to succeed.
cmgr.Remove(cr.ID())
close(connect)
// Allow the connection manager to process the removal.
time.Sleep(5 * time.Millisecond)
// Now examine the status of the connection request, it should read a
// status of canceled.
if cr.State() != ConnCanceled {
t.Fatalf("request wasn't canceled, status is: %v", cr.State())
}
// Finally, the connection manager should not signal the on-connection
// callback, since we explicitly canceled this request. We give a
// generous window to ensure the connection manager's lienar backoff is
// allowed to properly elapse.
select {
case <-connected:
t.Fatalf("on-connect should not be called for canceled req")
case <-time.After(5 * retryTimeout):
}
cmgr.Stop()
cmgr.Wait()
}
// mockListener implements the net.Listener interface and is used to test
// code that deals with net.Listeners without having to actually make any real
// connections.
type mockListener struct {
localAddr string
provideConn chan net.Conn
}
// Accept returns a mock connection when it receives a signal via the Connect
// function.
//
// This is part of the net.Listener interface.
func (m *mockListener) Accept() (net.Conn, error) {
for conn := range m.provideConn {
return conn, nil
}
return nil, errors.New("network connection closed")
}
// Close closes the mock listener which will cause any blocked Accept
// operations to be unblocked and return errors.
//
// This is part of the net.Listener interface.
func (m *mockListener) Close() error {
close(m.provideConn)
return nil
}
// Addr returns the address the mock listener was configured with.
//
// This is part of the net.Listener interface.
func (m *mockListener) Addr() net.Addr {
return &mockAddr{"tcp", m.localAddr}
}
// Connect fakes a connection to the mock listener from the provided remote
// address. It will cause the Accept function to return a mock connection
// configured with the provided remote address and the local address for the
// mock listener.
func (m *mockListener) Connect(ip string, port int) {
m.provideConn <- &mockConn{
laddr: m.localAddr,
lnet: "tcp",
rAddr: &net.TCPAddr{
IP: net.ParseIP(ip),
Port: port,
},
}
}
// newMockListener returns a new mock listener for the provided local address
// and port. No ports are actually opened.
func newMockListener(localAddr string) *mockListener {
return &mockListener{
localAddr: localAddr,
provideConn: make(chan net.Conn),
}
}
// TestListeners ensures providing listeners to the connection manager along
// with an accept callback works properly.
func TestListeners(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
// Setup a connection manager with a couple of mock listeners that
// notify a channel when they receive mock connections.
receivedConns := make(chan net.Conn)
listener1 := newMockListener("127.0.0.1:16111")
listener2 := newMockListener("127.0.0.1:9333")
listeners := []net.Listener{listener1, listener2}
amgr, teardown := addressManagerForTest(t, "TestListeners", 10)
defer teardown()
cmgr, err := New(&Config{
Listeners: listeners,
OnAccept: func(conn net.Conn) {
receivedConns <- conn
},
Dial: mockDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
// Fake a couple of mock connections to each of the listeners.
go func() {
for i, listener := range listeners {
l := listener.(*mockListener)
l.Connect("127.0.0.1", 10000+i*2)
l.Connect("127.0.0.1", 10000+i*2+1)
}
}()
// Tally the receive connections to ensure the expected number are
// received. Also, fail the test after a timeout so it will not hang
// forever should the test not work.
expectedNumConns := len(listeners) * 2
var numConns int
out:
for {
select {
case <-receivedConns:
numConns++
if numConns == expectedNumConns {
break out
}
case <-time.After(time.Millisecond * 50):
t.Fatalf("Timeout waiting for %d expected connections",
expectedNumConns)
}
}
cmgr.Stop()
cmgr.Wait()
}
// TestConnReqString ensures that ConnReq.String() does not crash
func TestConnReqString(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatalf("ConnReq.String crashed %v", r)
}
}()
cr1 := &ConnReq{
Addr: &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
},
Permanent: true,
}
_ = cr1.String()
cr2 := &ConnReq{}
_ = cr2.String()
}

View File

@ -1,23 +0,0 @@
/*
Package connmgr implements a generic Kaspa network connection manager.
Connection Manager Overview
Connection Manager handles all the general connection concerns such as
maintaining a set number of outbound connections, sourcing peers, banning,
limiting max connections, etc.
The package provides a generic connection manager which is able to accept
connection requests from a source or a set of given addresses, dial them and
notify the caller on connections. The main intended use is to initialize a pool
of active connections and maintain them to remain connected to the P2P network.
In addition the connection manager provides the following utilities:
- Notifications on connections or disconnections
- Handle failures and retry new addresses from the source
- Connect only to specified addresses
- Permanent connections with increasing backoff retry timers
- Disconnect or Remove an established connection
*/
package connmgr

View File

@ -1,144 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package connmgr
import (
"fmt"
"math"
"sync"
"time"
)
const (
// Halflife defines the time (in seconds) by which the transient part
// of the ban score decays to one half of it's original value.
Halflife = 60
// lambda is the decaying constant.
lambda = math.Ln2 / Halflife
// Lifetime defines the maximum age of the transient part of the ban
// score to be considered a non-zero score (in seconds).
Lifetime = 1800
// precomputedLen defines the amount of decay factors (one per second) that
// should be precomputed at initialization.
precomputedLen = 64
)
// precomputedFactor stores precomputed exponential decay factors for the first
// 'precomputedLen' seconds starting from t == 0.
var precomputedFactor [precomputedLen]float64
// init precomputes decay factors.
func init() {
for i := range precomputedFactor {
precomputedFactor[i] = math.Exp(-1.0 * float64(i) * lambda)
}
}
// decayFactor returns the decay factor at t seconds, using precalculated values
// if available, or calculating the factor if needed.
func decayFactor(t int64) float64 {
if t < precomputedLen {
return precomputedFactor[t]
}
return math.Exp(-1.0 * float64(t) * lambda)
}
// DynamicBanScore provides dynamic ban scores consisting of a persistent and a
// decaying component.
//
// The decaying score enables the creation of evasive logic which handles
// misbehaving peers (especially application layer DoS attacks) gracefully
// by disconnecting and banning peers attempting various kinds of flooding.
// DynamicBanScore allows these two approaches to be used in tandem.
//
// Zero value: Values of type DynamicBanScore are immediately ready for use upon
// declaration.
type DynamicBanScore struct {
lastUnix int64
transient float64
persistent uint32
mtx sync.Mutex
}
// String returns the ban score as a human-readable string.
func (s *DynamicBanScore) String() string {
s.mtx.Lock()
defer s.mtx.Unlock()
r := fmt.Sprintf("persistent %d + transient %f at %d = %d as of now",
s.persistent, s.transient, s.lastUnix, s.Int())
return r
}
// Int returns the current ban score, the sum of the persistent and decaying
// scores.
//
// This function is safe for concurrent access.
func (s *DynamicBanScore) Int() uint32 {
s.mtx.Lock()
defer s.mtx.Unlock()
r := s.int(time.Now())
return r
}
// Increase increases both the persistent and decaying scores by the values
// passed as parameters. The resulting score is returned.
//
// This function is safe for concurrent access.
func (s *DynamicBanScore) Increase(persistent, transient uint32) uint32 {
s.mtx.Lock()
defer s.mtx.Unlock()
r := s.increase(persistent, transient, time.Now())
return r
}
// Reset set both persistent and decaying scores to zero.
//
// This function is safe for concurrent access.
func (s *DynamicBanScore) Reset() {
s.mtx.Lock()
defer s.mtx.Unlock()
s.persistent = 0
s.transient = 0
s.lastUnix = 0
}
// int returns the ban score, the sum of the persistent and decaying scores at a
// given point in time.
//
// This function is not safe for concurrent access. It is intended to be used
// internally and during testing.
func (s *DynamicBanScore) int(t time.Time) uint32 {
dt := t.Unix() - s.lastUnix
if s.transient < 1 || dt < 0 || Lifetime < dt {
return s.persistent
}
return s.persistent + uint32(s.transient*decayFactor(dt))
}
// increase increases the persistent, the decaying or both scores by the values
// passed as parameters. The resulting score is calculated as if the action was
// carried out at the point time represented by the third parameter. The
// resulting score is returned.
//
// This function is not safe for concurrent access.
func (s *DynamicBanScore) increase(persistent, transient uint32, t time.Time) uint32 {
s.persistent += persistent
tu := t.Unix()
dt := tu - s.lastUnix
if transient > 0 {
if Lifetime < dt {
s.transient = 0
} else if s.transient > 1 && dt > 0 {
s.transient *= decayFactor(dt)
}
s.transient += float64(transient)
s.lastUnix = tu
}
return s.persistent + uint32(s.transient)
}

View File

@ -1,68 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package connmgr
import (
"math"
"testing"
"time"
)
// TestDynamicBanScoreDecay tests the exponential decay implemented in
// DynamicBanScore.
func TestDynamicBanScoreDecay(t *testing.T) {
var bs DynamicBanScore
base := time.Now()
r := bs.increase(100, 50, base)
if r != 150 {
t.Errorf("Unexpected result %d after ban score increase.", r)
}
r = bs.int(base.Add(time.Minute))
if r != 125 {
t.Errorf("Halflife check failed - %d instead of 125", r)
}
r = bs.int(base.Add(7 * time.Minute))
if r != 100 {
t.Errorf("Decay after 7m - %d instead of 100", r)
}
}
// TestDynamicBanScoreLifetime tests that DynamicBanScore properly yields zero
// once the maximum age is reached.
func TestDynamicBanScoreLifetime(t *testing.T) {
var bs DynamicBanScore
base := time.Now()
bs.increase(0, math.MaxUint32, base)
r := bs.int(base.Add(Lifetime * time.Second))
if r != 3 { // 3, not 4 due to precision loss and truncating 3.999...
t.Errorf("Pre max age check with MaxUint32 failed - %d", r)
}
r = bs.int(base.Add((Lifetime + 1) * time.Second))
if r != 0 {
t.Errorf("Zero after max age check failed - %d instead of 0", r)
}
}
// TestDynamicBanScore tests exported functions of DynamicBanScore. Exponential
// decay or other time based behavior is tested by other functions.
func TestDynamicBanScoreReset(t *testing.T) {
var bs DynamicBanScore
if bs.Int() != 0 {
t.Errorf("Initial state is not zero.")
}
bs.Increase(100, 0)
r := bs.Int()
if r != 100 {
t.Errorf("Unexpected result %d after ban score increase.", r)
}
bs.Reset()
if bs.Int() != 0 {
t.Errorf("Failed to reset ban score.")
}
}

View File

@ -5,9 +5,8 @@
package dagconfig package dagconfig
import ( import (
"time"
"github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid" "github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire" "github.com/kaspanet/kaspad/wire"
) )
@ -29,10 +28,10 @@ var genesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, genesisTxOuts
// genesisHash is the hash of the first block in the block DAG for the main // genesisHash is the hash of the first block in the block DAG for the main
// network (genesis block). // network (genesis block).
var genesisHash = daghash.Hash{ var genesisHash = daghash.Hash{
0xb3, 0x5d, 0x34, 0x3c, 0xf6, 0xbb, 0xd5, 0xaf, 0xfa, 0x00, 0xbd, 0xcb, 0x46, 0x74, 0xc5, 0xdb,
0x40, 0x4b, 0xff, 0x3f, 0x83, 0x27, 0x71, 0x1e, 0xf7, 0x63, 0xcb, 0x78, 0x7a, 0x94, 0xc5, 0xbf,
0xe1, 0x83, 0xf6, 0x41, 0x32, 0x8c, 0xba, 0xe6, 0xd4, 0x81, 0xd3, 0x52, 0x2d, 0x79, 0xac, 0x57,
0xd3, 0xba, 0x13, 0xef, 0x7b, 0x7e, 0x61, 0x65, 0x73, 0xe6, 0x14, 0x7e, 0x15, 0xef, 0x85, 0x27,
} }
// genesisMerkleRoot is the hash of the first transaction in the genesis block // genesisMerkleRoot is the hash of the first transaction in the genesis block
@ -53,9 +52,9 @@ var genesisBlock = wire.MsgBlock{
HashMerkleRoot: &genesisMerkleRoot, HashMerkleRoot: &genesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.Hash{}, AcceptedIDMerkleRoot: &daghash.Hash{},
UTXOCommitment: &daghash.ZeroHash, UTXOCommitment: &daghash.ZeroHash,
Timestamp: time.Unix(0x5edf4ce0, 0), Timestamp: mstime.UnixMilliseconds(0x1730a81bdb4),
Bits: 0x207fffff, Bits: 0x207fffff,
Nonce: 0, Nonce: 0x1,
}, },
Transactions: []*wire.MsgTx{genesisCoinbaseTx}, Transactions: []*wire.MsgTx{genesisCoinbaseTx},
} }
@ -78,10 +77,10 @@ var devnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, devnetG
// devGenesisHash is the hash of the first block in the block DAG for the development // devGenesisHash is the hash of the first block in the block DAG for the development
// network (genesis block). // network (genesis block).
var devnetGenesisHash = daghash.Hash{ var devnetGenesisHash = daghash.Hash{
0x50, 0x92, 0xd1, 0x1f, 0xaa, 0xba, 0xd3, 0x58, 0x2e, 0x03, 0x7d, 0x31, 0x09, 0x56, 0x82, 0x72,
0xa8, 0x22, 0xd7, 0xec, 0x8e, 0xe3, 0xf4, 0x26, 0x1d, 0x49, 0x39, 0xf3, 0x7d, 0xd5, 0xc8, 0xf4,
0x17, 0x18, 0x74, 0xd7, 0x87, 0x05, 0x9d, 0xed, 0xef, 0x4f, 0xcd, 0xeb, 0x1d, 0x95, 0xad, 0x6e,
0x33, 0xcd, 0xe1, 0x26, 0x1a, 0x69, 0x00, 0x00, 0x02, 0x4f, 0x52, 0xf2, 0xd6, 0x66, 0x00, 0x00,
} }
// devnetGenesisMerkleRoot is the hash of the first transaction in the genesis block // devnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
@ -102,9 +101,9 @@ var devnetGenesisBlock = wire.MsgBlock{
HashMerkleRoot: &devnetGenesisMerkleRoot, HashMerkleRoot: &devnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.Hash{}, AcceptedIDMerkleRoot: &daghash.Hash{},
UTXOCommitment: &daghash.ZeroHash, UTXOCommitment: &daghash.ZeroHash,
Timestamp: time.Unix(0x5edf4ce0, 0), Timestamp: mstime.UnixMilliseconds(0x17305b05694),
Bits: 0x1e7fffff, Bits: 0x1e7fffff,
Nonce: 0xb3ed, Nonce: 0x10bb,
}, },
Transactions: []*wire.MsgTx{devnetGenesisCoinbaseTx}, Transactions: []*wire.MsgTx{devnetGenesisCoinbaseTx},
} }
@ -127,10 +126,10 @@ var regtestGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, regtes
// devGenesisHash is the hash of the first block in the block DAG for the development // devGenesisHash is the hash of the first block in the block DAG for the development
// network (genesis block). // network (genesis block).
var regtestGenesisHash = daghash.Hash{ var regtestGenesisHash = daghash.Hash{
0xf8, 0x1d, 0xe9, 0x86, 0xa5, 0x60, 0xe0, 0x34, 0xda, 0x23, 0x61, 0x5e, 0xf6, 0x2a, 0x95, 0x27,
0x0f, 0x02, 0xaa, 0x8d, 0xea, 0x6f, 0x1f, 0xc6, 0x7f, 0x5a, 0x40, 0xd5, 0x91, 0x97, 0x1c, 0xef,
0x2a, 0xb4, 0x77, 0xbd, 0xca, 0xed, 0xad, 0x3c, 0xd5, 0x86, 0xac, 0xac, 0x82, 0xb3, 0xc9, 0x43,
0x99, 0xe6, 0x98, 0x7c, 0x7b, 0x5e, 0x00, 0x00, 0xd3, 0x49, 0x5f, 0x7e, 0x93, 0x0b, 0x35, 0x2d,
} }
// regtestGenesisMerkleRoot is the hash of the first transaction in the genesis block // regtestGenesisMerkleRoot is the hash of the first transaction in the genesis block
@ -151,9 +150,9 @@ var regtestGenesisBlock = wire.MsgBlock{
HashMerkleRoot: &regtestGenesisMerkleRoot, HashMerkleRoot: &regtestGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.Hash{}, AcceptedIDMerkleRoot: &daghash.Hash{},
UTXOCommitment: &daghash.ZeroHash, UTXOCommitment: &daghash.ZeroHash,
Timestamp: time.Unix(0x5edf4ce0, 0), Timestamp: mstime.UnixMilliseconds(0x1730a958ac4),
Bits: 0x1e7fffff, Bits: 0x207fffff,
Nonce: 0x4a78, Nonce: 0x0,
}, },
Transactions: []*wire.MsgTx{regtestGenesisCoinbaseTx}, Transactions: []*wire.MsgTx{regtestGenesisCoinbaseTx},
} }
@ -175,10 +174,10 @@ var simnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, simnetG
// simnetGenesisHash is the hash of the first block in the block DAG for // simnetGenesisHash is the hash of the first block in the block DAG for
// the simnet (genesis block). // the simnet (genesis block).
var simnetGenesisHash = daghash.Hash{ var simnetGenesisHash = daghash.Hash{
0x34, 0x43, 0xed, 0xdc, 0xab, 0x0c, 0x39, 0x53, 0x86, 0x27, 0xdc, 0x5e, 0xa9, 0x38, 0xc7, 0xa5,
0xa2, 0xc5, 0x6d, 0x12, 0x4b, 0xc2, 0x41, 0x1c, 0x7a, 0x18, 0xcd, 0xe7, 0xda, 0xed, 0x13, 0xe0,
0x1a, 0x05, 0x24, 0xb4, 0xff, 0xeb, 0xe8, 0xbd, 0x24, 0x1b, 0xab, 0xfe, 0xbd, 0xe6, 0x6f, 0xd3,
0xee, 0x6e, 0x9a, 0x77, 0xc7, 0xbb, 0x70, 0x7d, 0x95, 0x34, 0x81, 0x1c, 0x57, 0xd1, 0xc4, 0x3f,
} }
// simnetGenesisMerkleRoot is the hash of the first transaction in the genesis block // simnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
@ -199,9 +198,9 @@ var simnetGenesisBlock = wire.MsgBlock{
HashMerkleRoot: &simnetGenesisMerkleRoot, HashMerkleRoot: &simnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.Hash{}, AcceptedIDMerkleRoot: &daghash.Hash{},
UTXOCommitment: &daghash.ZeroHash, UTXOCommitment: &daghash.ZeroHash,
Timestamp: time.Unix(0x5ede5261, 0), Timestamp: mstime.UnixMilliseconds(0x173001df3d5),
Bits: 0x207fffff, Bits: 0x207fffff,
Nonce: 0x2, Nonce: 0x0,
}, },
Transactions: []*wire.MsgTx{simnetGenesisCoinbaseTx}, Transactions: []*wire.MsgTx{simnetGenesisCoinbaseTx},
} }
@ -221,10 +220,10 @@ var testnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, testne
// testnetGenesisHash is the hash of the first block in the block DAG for the test // testnetGenesisHash is the hash of the first block in the block DAG for the test
// network (genesis block). // network (genesis block).
var testnetGenesisHash = daghash.Hash{ var testnetGenesisHash = daghash.Hash{
0xFC, 0x21, 0x64, 0x1A, 0xB5, 0x59, 0x61, 0x8E, 0x34, 0x8c, 0x71, 0x99, 0x70, 0x13, 0x00, 0xe5,
0xF3, 0x9A, 0x95, 0xF1, 0xDA, 0x07, 0x79, 0xBD, 0xf5, 0x35, 0x98, 0x45, 0x89, 0xc7, 0xa2, 0xab,
0x11, 0x2F, 0x90, 0xFC, 0x8B, 0x33, 0x14, 0x8A, 0xd0, 0x8f, 0x26, 0x00, 0x9c, 0xc6, 0x6b, 0xa3,
0x90, 0x6B, 0x76, 0x08, 0x4B, 0x52, 0x00, 0x00, 0x20, 0x88, 0x86, 0x55, 0x3f, 0x61, 0x00, 0x00,
} }
// testnetGenesisMerkleRoot is the hash of the first transaction in the genesis block // testnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
@ -245,9 +244,9 @@ var testnetGenesisBlock = wire.MsgBlock{
HashMerkleRoot: &testnetGenesisMerkleRoot, HashMerkleRoot: &testnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.ZeroHash, AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash, UTXOCommitment: &daghash.ZeroHash,
Timestamp: time.Unix(0x5efc2128, 0), Timestamp: mstime.UnixMilliseconds(0x1730a66a9d9),
Bits: 0x1e7fffff, Bits: 0x1e7fffff,
Nonce: 0x1124, Nonce: 0x162ca,
}, },
Transactions: []*wire.MsgTx{testnetGenesisCoinbaseTx}, Transactions: []*wire.MsgTx{testnetGenesisCoinbaseTx},
} }

View File

@ -133,7 +133,7 @@ func TestDevnetGenesisBlock(t *testing.T) {
t.Fatalf("TestDevnetGenesisBlock: Genesis block does not "+ t.Fatalf("TestDevnetGenesisBlock: Genesis block does not "+
"appear valid - got %v, want %v", "appear valid - got %v, want %v",
spew.Sdump(buf.Bytes()), spew.Sdump(buf.Bytes()),
spew.Sdump(simnetGenesisBlockBytes)) spew.Sdump(devnetGenesisBlockBytes))
} }
// Check hash of the block against expected hash. // Check hash of the block against expected hash.
@ -154,8 +154,8 @@ var genesisBlockBytes = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x4c, 0xdf, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb4, 0xbd, 0x81, 0x0a, 0x73, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xc4, 0x41, 0xe6, 0x78, 0x1d, 0xf7, 0xb3, 0x39, 0x66, 0x4d, 0x1a, 0x03, 0x00, 0x00, 0x00, 0x00, 0xc4, 0x41, 0xe6, 0x78, 0x1d, 0xf7, 0xb3, 0x39, 0x66, 0x4d, 0x1a, 0x03,
@ -174,8 +174,8 @@ var regtestGenesisBlockBytes = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x4c, 0xdf, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc4, 0x8a, 0x95, 0x0a, 0x73, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x1e, 0x78, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xd4, 0xc4, 0x87, 0x77, 0xf2, 0xe7, 0x5d, 0xf7, 0xff, 0x2d, 0xbb, 0xb6, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xc4, 0x87, 0x77, 0xf2, 0xe7, 0x5d, 0xf7, 0xff, 0x2d, 0xbb, 0xb6,
@ -195,8 +195,8 @@ var testnetGenesisBlockBytes = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x21, 0xfc, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xa9, 0x66, 0x0a, 0x73, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x1e, 0x24, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xca, 0x62, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xf5, 0x41, 0x4c, 0xf4, 0xa8, 0xa2, 0x8c, 0x47, 0x9d, 0xb5, 0x75, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xf5, 0x41, 0x4c, 0xf4, 0xa8, 0xa2, 0x8c, 0x47, 0x9d, 0xb5, 0x75, 0x5e,
@ -214,8 +214,8 @@ var simnetGenesisBlockBytes = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x52, 0xde, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd5, 0xf3, 0x1d, 0x00, 0x73, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x20, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xd9, 0x39, 0x5f, 0x40, 0x2a, 0x5e, 0x24, 0x09, 0x1b, 0x9a, 0x4b, 0xdf, 0x00, 0x00, 0x00, 0x00, 0xd9, 0x39, 0x5f, 0x40, 0x2a, 0x5e, 0x24, 0x09, 0x1b, 0x9a, 0x4b, 0xdf,
@ -235,8 +235,8 @@ var devnetGenesisBlockBytes = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x4c, 0xdf, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x56, 0xb0, 0x05, 0x73, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x1e, 0xed, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xbb, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x1d, 0x1c, 0x05, 0x21, 0x10, 0x45, 0x61, 0xed, 0xc6, 0x0b, 0xdc, 0x85, 0x00, 0x00, 0x00, 0x00, 0x1d, 0x1c, 0x05, 0x21, 0x10, 0x45, 0x61, 0xed, 0xc6, 0x0b, 0xdc, 0x85,

View File

@ -53,7 +53,6 @@ const (
timestampDeviationTolerance = 132 timestampDeviationTolerance = 132
finalityDuration = 24 * time.Hour finalityDuration = 24 * time.Hour
targetTimePerBlock = 1 * time.Second targetTimePerBlock = 1 * time.Second
finalityInterval = uint64(finalityDuration / targetTimePerBlock)
) )
// ConsensusDeployment defines details related to a specific consensus rule // ConsensusDeployment defines details related to a specific consensus rule
@ -136,8 +135,8 @@ type Params struct {
// block. // block.
TargetTimePerBlock time.Duration TargetTimePerBlock time.Duration
// FinalityInterval is the interval that determines the finality window of the DAG. // FinalityDuration is the duration of the finality window.
FinalityInterval uint64 FinalityDuration time.Duration
// TimestampDeviationTolerance is the maximum offset a block timestamp // TimestampDeviationTolerance is the maximum offset a block timestamp
// is allowed to be in the future before it gets delayed // is allowed to be in the future before it gets delayed
@ -203,7 +202,7 @@ var MainnetParams = Params{
BlockCoinbaseMaturity: 100, BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 210000, SubsidyReductionInterval: 210000,
TargetTimePerBlock: targetTimePerBlock, TargetTimePerBlock: targetTimePerBlock,
FinalityInterval: finalityInterval, FinalityDuration: finalityDuration,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance, TimestampDeviationTolerance: timestampDeviationTolerance,
@ -216,8 +215,8 @@ var MainnetParams = Params{
Deployments: [DefinedDeployments]ConsensusDeployment{ Deployments: [DefinedDeployments]ConsensusDeployment{
DeploymentTestDummy: { DeploymentTestDummy: {
BitNumber: 28, BitNumber: 28,
StartTime: 1199145601, // January 1, 2008 UTC StartTime: 1199145601000, // January 1, 2008 UTC
ExpireTime: 1230767999, // December 31, 2008 UTC ExpireTime: 1230767999000, // December 31, 2008 UTC
}, },
}, },
@ -256,7 +255,7 @@ var RegressionNetParams = Params{
BlockCoinbaseMaturity: 100, BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 150, SubsidyReductionInterval: 150,
TargetTimePerBlock: targetTimePerBlock, TargetTimePerBlock: targetTimePerBlock,
FinalityInterval: finalityInterval, FinalityDuration: finalityDuration,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance, TimestampDeviationTolerance: timestampDeviationTolerance,
@ -307,7 +306,7 @@ var TestnetParams = Params{
BlockCoinbaseMaturity: 100, BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 210000, SubsidyReductionInterval: 210000,
TargetTimePerBlock: targetTimePerBlock, TargetTimePerBlock: targetTimePerBlock,
FinalityInterval: finalityInterval, FinalityDuration: finalityDuration,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance, TimestampDeviationTolerance: timestampDeviationTolerance,
@ -320,8 +319,8 @@ var TestnetParams = Params{
Deployments: [DefinedDeployments]ConsensusDeployment{ Deployments: [DefinedDeployments]ConsensusDeployment{
DeploymentTestDummy: { DeploymentTestDummy: {
BitNumber: 28, BitNumber: 28,
StartTime: 1199145601, // January 1, 2008 UTC StartTime: 1199145601000, // January 1, 2008 UTC
ExpireTime: 1230767999, // December 31, 2008 UTC ExpireTime: 1230767999000, // December 31, 2008 UTC
}, },
}, },
@ -363,8 +362,8 @@ var SimnetParams = Params{
PowMax: simnetPowMax, PowMax: simnetPowMax,
BlockCoinbaseMaturity: 100, BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 210000, SubsidyReductionInterval: 210000,
TargetTimePerBlock: targetTimePerBlock, TargetTimePerBlock: time.Millisecond,
FinalityInterval: finalityInterval, FinalityDuration: time.Minute,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance, TimestampDeviationTolerance: timestampDeviationTolerance,
@ -413,7 +412,7 @@ var DevnetParams = Params{
BlockCoinbaseMaturity: 100, BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 210000, SubsidyReductionInterval: 210000,
TargetTimePerBlock: targetTimePerBlock, TargetTimePerBlock: targetTimePerBlock,
FinalityInterval: finalityInterval, FinalityDuration: finalityDuration,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance, TimestampDeviationTolerance: timestampDeviationTolerance,
@ -426,8 +425,8 @@ var DevnetParams = Params{
Deployments: [DefinedDeployments]ConsensusDeployment{ Deployments: [DefinedDeployments]ConsensusDeployment{
DeploymentTestDummy: { DeploymentTestDummy: {
BitNumber: 28, BitNumber: 28,
StartTime: 1199145601, // January 1, 2008 UTC StartTime: 1199145601000, // January 1, 2008 UTC
ExpireTime: 1230767999, // December 31, 2008 UTC ExpireTime: 1230767999000, // December 31, 2008 UTC
}, },
}, },

View File

@ -1,11 +1,12 @@
package dbaccess package dbaccess
import ( import (
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util"
"io/ioutil" "io/ioutil"
"reflect" "reflect"
"testing" "testing"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util"
) )
func TestBlockStoreSanity(t *testing.T) { func TestBlockStoreSanity(t *testing.T) {
@ -15,13 +16,13 @@ func TestBlockStoreSanity(t *testing.T) {
t.Fatalf("TestBlockStoreSanity: TempDir unexpectedly "+ t.Fatalf("TestBlockStoreSanity: TempDir unexpectedly "+
"failed: %s", err) "failed: %s", err)
} }
err = Open(path) databaseContext, err := New(path)
if err != nil { if err != nil {
t.Fatalf("TestBlockStoreSanity: Open unexpectedly "+ t.Fatalf("TestBlockStoreSanity: Open unexpectedly "+
"failed: %s", err) "failed: %s", err)
} }
defer func() { defer func() {
err := Close() err := databaseContext.Close()
if err != nil { if err != nil {
t.Fatalf("TestBlockStoreSanity: Close unexpectedly "+ t.Fatalf("TestBlockStoreSanity: Close unexpectedly "+
"failed: %s", err) "failed: %s", err)
@ -36,7 +37,7 @@ func TestBlockStoreSanity(t *testing.T) {
t.Fatalf("TestBlockStoreSanity: util.Block.Bytes unexpectedly "+ t.Fatalf("TestBlockStoreSanity: util.Block.Bytes unexpectedly "+
"failed: %s", err) "failed: %s", err)
} }
dbTx, err := NewTx() dbTx, err := databaseContext.NewTx()
if err != nil { if err != nil {
t.Fatalf("Failed to open database "+ t.Fatalf("Failed to open database "+
"transaction: %s", err) "transaction: %s", err)
@ -54,7 +55,7 @@ func TestBlockStoreSanity(t *testing.T) {
} }
// Make sure the genesis block now exists in the db // Make sure the genesis block now exists in the db
exists, err := HasBlock(NoTx(), genesisHash) exists, err := HasBlock(databaseContext, genesisHash)
if err != nil { if err != nil {
t.Fatalf("TestBlockStoreSanity: HasBlock unexpectedly "+ t.Fatalf("TestBlockStoreSanity: HasBlock unexpectedly "+
"failed: %s", err) "failed: %s", err)
@ -66,7 +67,7 @@ func TestBlockStoreSanity(t *testing.T) {
// Fetch the genesis block back from the db and make sure // Fetch the genesis block back from the db and make sure
// that it's equal to the original // that it's equal to the original
fetchedGenesisBytes, err := FetchBlock(NoTx(), genesisHash) fetchedGenesisBytes, err := FetchBlock(databaseContext, genesisHash)
if err != nil { if err != nil {
t.Fatalf("TestBlockStoreSanity: FetchBlock unexpectedly "+ t.Fatalf("TestBlockStoreSanity: FetchBlock unexpectedly "+
"failed: %s", err) "failed: %s", err)

View File

@ -11,17 +11,12 @@ type Context interface {
accessor() (database.DataAccessor, error) accessor() (database.DataAccessor, error)
} }
type noTxContext struct{} type noTxContext struct {
backend *DatabaseContext
var noTxContextSingleton = &noTxContext{}
func (*noTxContext) accessor() (database.DataAccessor, error) {
return db()
} }
// NoTx creates and returns an instance of dbaccess.Context without an attached database transaction func (ctx *noTxContext) accessor() (database.DataAccessor, error) {
func NoTx() Context { return ctx.backend.db, nil
return noTxContextSingleton
} }
// TxContext represents a database context with an attached database transaction // TxContext represents a database context with an attached database transaction
@ -29,6 +24,15 @@ type TxContext struct {
dbTransaction database.Transaction dbTransaction database.Transaction
} }
// NewTx returns an instance of TxContext with a new database transaction
func (ctx *DatabaseContext) NewTx() (*TxContext, error) {
dbTransaction, err := ctx.db.Begin()
if err != nil {
return nil, err
}
return &TxContext{dbTransaction: dbTransaction}, nil
}
func (ctx *TxContext) accessor() (database.DataAccessor, error) { func (ctx *TxContext) accessor() (database.DataAccessor, error) {
return ctx.dbTransaction, nil return ctx.dbTransaction, nil
} }
@ -48,16 +52,3 @@ func (ctx *TxContext) Rollback() error {
func (ctx *TxContext) RollbackUnlessClosed() error { func (ctx *TxContext) RollbackUnlessClosed() error {
return ctx.dbTransaction.RollbackUnlessClosed() return ctx.dbTransaction.RollbackUnlessClosed()
} }
// NewTx returns an instance of TxContext with a new database transaction
func NewTx() (*TxContext, error) {
db, err := db()
if err != nil {
return nil, err
}
dbTransaction, err := db.Begin()
if err != nil {
return nil, err
}
return &TxContext{dbTransaction: dbTransaction}, nil
}

View File

@ -3,41 +3,28 @@ package dbaccess
import ( import (
"github.com/kaspanet/kaspad/database" "github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/database/ffldb" "github.com/kaspanet/kaspad/database/ffldb"
"github.com/pkg/errors"
) )
// dbSingleton is a handle to an instance of the kaspad database // DatabaseContext represents a context in which all database queries run
var dbSingleton database.Database type DatabaseContext struct {
db database.Database
// db returns a handle to the database *noTxContext
func db() (database.Database, error) {
if dbSingleton == nil {
return nil, errors.New("database is not open")
}
return dbSingleton, nil
} }
// Open opens the database for given path // New creates a new DatabaseContext with database is in the specified `path`
func Open(path string) error { func New(path string) (*DatabaseContext, error) {
if dbSingleton != nil {
return errors.New("database is already open")
}
db, err := ffldb.Open(path) db, err := ffldb.Open(path)
if err != nil { if err != nil {
return err return nil, err
} }
dbSingleton = db databaseContext := &DatabaseContext{db: db}
return nil databaseContext.noTxContext = &noTxContext{backend: databaseContext}
return databaseContext, nil
} }
// Close closes the database, if it's open // Close closes the DatabaseContext's connection, if it's open
func Close() error { func (ctx *DatabaseContext) Close() error {
if dbSingleton == nil { return ctx.db.Close()
return nil
}
err := dbSingleton.Close()
dbSingleton = nil
return err
} }

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by an ISC // Use of this source code is governed by an ISC
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package connmgr package dnsseed
import ( import (
"github.com/kaspanet/kaspad/logger" "github.com/kaspanet/kaspad/logger"

View File

@ -2,16 +2,17 @@
// Use of this source code is governed by an ISC // Use of this source code is governed by an ISC
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package connmgr package dnsseed
import ( import (
"fmt" "fmt"
mrand "math/rand" "math/rand"
"net" "net"
"strconv" "strconv"
"time" "time"
"github.com/kaspanet/kaspad/config" "github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid" "github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/dagconfig"
@ -32,20 +33,19 @@ const (
) )
// OnSeed is the signature of the callback function which is invoked when DNS // OnSeed is the signature of the callback function which is invoked when DNS
// seeding is succesfull. // seeding is successful.
type OnSeed func(addrs []*wire.NetAddress) type OnSeed func(addrs []*wire.NetAddress)
// LookupFunc is the signature of the DNS lookup function. // LookupFunc is the signature of the DNS lookup function.
type LookupFunc func(string) ([]net.IP, error) type LookupFunc func(string) ([]net.IP, error)
// SeedFromDNS uses DNS seeding to populate the address manager with peers. // SeedFromDNS uses DNS seeding to populate the address manager with peers.
func SeedFromDNS(dagParams *dagconfig.Params, reqServices wire.ServiceFlag, includeAllSubnetworks bool, func SeedFromDNS(dagParams *dagconfig.Params, customSeed string, reqServices wire.ServiceFlag, includeAllSubnetworks bool,
subnetworkID *subnetworkid.SubnetworkID, lookupFn LookupFunc, seedFn OnSeed) { subnetworkID *subnetworkid.SubnetworkID, lookupFn LookupFunc, seedFn OnSeed) {
var dnsSeeds []string var dnsSeeds []string
mainConfig := config.ActiveConfig() if customSeed != "" {
if mainConfig != nil && mainConfig.DNSSeed != "" { dnsSeeds = []string{customSeed}
dnsSeeds = []string{mainConfig.DNSSeed}
} else { } else {
dnsSeeds = dagParams.DNSSeeds dnsSeeds = dagParams.DNSSeeds
} }
@ -66,29 +66,29 @@ func SeedFromDNS(dagParams *dagconfig.Params, reqServices wire.ServiceFlag, incl
} }
} }
spawn(func() { spawn("SeedFromDNS", func() {
randSource := mrand.New(mrand.NewSource(time.Now().UnixNano())) randSource := rand.New(rand.NewSource(time.Now().UnixNano()))
seedpeers, err := lookupFn(host) seedPeers, err := lookupFn(host)
if err != nil { if err != nil {
log.Infof("DNS discovery failed on seed %s: %s", host, err) log.Infof("DNS discovery failed on seed %s: %s", host, err)
return return
} }
numPeers := len(seedpeers) numPeers := len(seedPeers)
log.Infof("%d addresses found from DNS seed %s", numPeers, host) log.Infof("%d addresses found from DNS seed %s", numPeers, host)
if numPeers == 0 { if numPeers == 0 {
return return
} }
addresses := make([]*wire.NetAddress, len(seedpeers)) addresses := make([]*wire.NetAddress, len(seedPeers))
// if this errors then we have *real* problems // if this errors then we have *real* problems
intPort, _ := strconv.Atoi(dagParams.DefaultPort) intPort, _ := strconv.Atoi(dagParams.DefaultPort)
for i, peer := range seedpeers { for i, peer := range seedPeers {
addresses[i] = wire.NewNetAddressTimestamp( addresses[i] = wire.NewNetAddressTimestamp(
// seed with addresses from a time randomly selected // seed with addresses from a time randomly selected
// between 3 and 7 days ago. // between 3 and 7 days ago.
time.Now().Add(-1*time.Second*time.Duration(secondsIn3Days+ mstime.Now().Add(-1*time.Second*time.Duration(secondsIn3Days+
randSource.Int31n(secondsIn4Days))), randSource.Int31n(secondsIn4Days))),
0, peer, uint16(intPort)) 0, peer, uint16(intPort))
} }

2
doc.go
View File

@ -6,7 +6,7 @@ Copyright (c) 2013-2014 Conformal Systems LLC.
Use of this source code is governed by an ISC Use of this source code is governed by an ISC
license that can be found in the LICENSE file. license that can be found in the LICENSE file.
kaspad is a full-node kaspa implementation written in Go. Kaspad is a full-node kaspa implementation written in Go.
The default options are sane for most users. This means kaspad will work 'out of The default options are sane for most users. This means kaspad will work 'out of
the box' for most users. However, there are also a wide variety of flags that the box' for most users. However, there are also a wide variety of flags that

4
go.mod
View File

@ -7,7 +7,7 @@ require (
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792
github.com/btcsuite/winsvc v1.0.0 github.com/btcsuite/winsvc v1.0.0
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/golang/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.4.1
github.com/golang/snappy v0.0.1 // indirect github.com/golang/snappy v0.0.1 // indirect
github.com/jessevdk/go-flags v1.4.0 github.com/jessevdk/go-flags v1.4.0
github.com/jrick/logrotate v1.0.0 github.com/jrick/logrotate v1.0.0
@ -18,6 +18,8 @@ require (
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59
golang.org/x/sys v0.0.0-20190426135247-a129542de9ae // indirect golang.org/x/sys v0.0.0-20190426135247-a129542de9ae // indirect
golang.org/x/text v0.3.2 // indirect golang.org/x/text v0.3.2 // indirect
google.golang.org/grpc v1.30.0
google.golang.org/protobuf v1.25.0
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/yaml.v2 v2.2.2 // indirect gopkg.in/yaml.v2 v2.2.2 // indirect
) )

64
go.sum
View File

@ -1,20 +1,44 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
@ -36,6 +60,7 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs=
@ -45,13 +70,24 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -61,8 +97,34 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20200228224639-71482053b885/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200228224639-71482053b885/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -74,3 +136,5 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -0,0 +1,61 @@
package integration
import (
"fmt"
"sync"
"testing"
"time"
"github.com/kaspanet/kaspad/util/locks"
"github.com/kaspanet/kaspad/wire"
)
func Test64IncomingConnections(t *testing.T) {
// Much more than 64 hosts creates a risk of running out of available file descriptors for leveldb
const numBullies = 64
harnessesParams := make([]*harnessParams, numBullies+1)
for i := 0; i < numBullies+1; i++ {
harnessesParams[i] = &harnessParams{
p2pAddress: fmt.Sprintf("127.0.0.1:%d", 12345+i),
rpcAddress: fmt.Sprintf("127.0.0.1:%d", 22345+i),
miningAddress: miningAddress1,
miningAddressPrivateKey: miningAddress1PrivateKey,
}
}
appHarnesses, teardown := setupHarnesses(t, harnessesParams)
defer teardown()
victim, bullies := appHarnesses[0], appHarnesses[1:]
for _, bully := range bullies {
connect(t, victim, bully)
}
blockAddedWG := sync.WaitGroup{}
blockAddedWG.Add(numBullies)
for _, bully := range bullies {
err := bully.rpcClient.NotifyBlocks()
if err != nil {
t.Fatalf("Error from NotifyBlocks: %+v", err)
}
blockAdded := false
bully.rpcClient.onBlockAdded = func(header *wire.BlockHeader) {
if blockAdded {
t.Fatalf("Single bully reported block added twice")
}
blockAdded = true
blockAddedWG.Done()
}
}
_ = mineNextBlock(t, victim)
select {
case <-time.After(defaultTimeout):
t.Fatalf("Timeout waiting for block added notification from the bullies")
case <-locks.ReceiveFromChanWhenDone(func() { blockAddedWG.Wait() }):
}
}

View File

@ -0,0 +1,32 @@
package integration
import (
"testing"
)
func TestAddressExchange(t *testing.T) {
appHarness1, appHarness2, appHarness3, teardown := standardSetup(t)
defer teardown()
testAddress := "1.2.3.4:6789"
err := appHarness1.app.AddressManager().AddAddressByIP(testAddress, nil)
if err != nil {
t.Fatalf("Error adding address to addressManager: %+v", err)
}
connect(t, appHarness1, appHarness2)
connect(t, appHarness2, appHarness3)
peerAddresses, err := appHarness3.rpcClient.GetPeerAddresses()
if err != nil {
t.Fatalf("Error getting peer addresses: %+v", err)
}
for _, peerAddress := range peerAddresses.Addresses {
if peerAddress.Addr == testAddress {
return
}
}
t.Errorf("Didn't find testAddress in list of addresses of appHarness3")
}

View File

@ -0,0 +1,51 @@
package integration
import (
"testing"
"time"
"github.com/kaspanet/kaspad/wire"
)
func TestIntegrationBasicSync(t *testing.T) {
appHarness1, appHarness2, appHarness3, teardown := standardSetup(t)
defer teardown()
// Connect nodes in chain: 1 <--> 2 <--> 3
// So that node 3 doesn't directly get blocks from node 1
connect(t, appHarness1, appHarness2)
connect(t, appHarness2, appHarness3)
app2OnBlockAddedChan := make(chan *wire.BlockHeader)
setOnBlockAddedHandler(t, appHarness2, func(header *wire.BlockHeader) {
app2OnBlockAddedChan <- header
})
app3OnBlockAddedChan := make(chan *wire.BlockHeader)
setOnBlockAddedHandler(t, appHarness3, func(header *wire.BlockHeader) {
app3OnBlockAddedChan <- header
})
block := mineNextBlock(t, appHarness1)
var header *wire.BlockHeader
select {
case header = <-app2OnBlockAddedChan:
case <-time.After(defaultTimeout):
t.Fatalf("Timeout waiting for block added notification on node directly connected to miner")
}
if !header.BlockHash().IsEqual(block.Hash()) {
t.Errorf("Expected block with hash '%s', but got '%s'", block.Hash(), header.BlockHash())
}
select {
case header = <-app3OnBlockAddedChan:
case <-time.After(defaultTimeout):
t.Fatalf("Timeout waiting for block added notification on node indirectly connected to miner")
}
if !header.BlockHash().IsEqual(block.Hash()) {
t.Errorf("Expected block with hash '%s', but got '%s'", block.Hash(), header.BlockHash())
}
}

View File

@ -0,0 +1,65 @@
package integration
import (
"io/ioutil"
"testing"
"time"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
)
const (
p2pAddress1 = "127.0.0.1:54321"
p2pAddress2 = "127.0.0.1:54322"
p2pAddress3 = "127.0.0.1:54323"
rpcAddress1 = "127.0.0.1:12345"
rpcAddress2 = "127.0.0.1:12346"
rpcAddress3 = "127.0.0.1:12347"
rpcUser = "user"
rpcPass = "pass"
miningAddress1 = "kaspasim:qzmdkk8ay8sgvp8cnwts8gtdylz9j7572slwdh85qv"
miningAddress1PrivateKey = "be9e9884f03e687166479e22d21b064db7903d69b5a46878aae66521c01a6094"
miningAddress2 = "kaspasim:qze20hwkc4lzq37jt0hrym5emlsxxs8j3qyf3y4ghs"
miningAddress2PrivateKey = "98bd8d8e1f7078abefd017839f83edd0e3c8226ed4989e4d7a8bceb5935de193"
miningAddress3 = "kaspasim:qretklduvhg5h2aj7jd8w4heq7pvtkpv9q6w4sqfen"
miningAddress3PrivateKey = "eb0af684f2cdbb4ed2d85fbfe0b7f40654a7777fb2c47f142ffb5543b594d1e4"
defaultTimeout = 10 * time.Second
)
func setConfig(t *testing.T, harness *appHarness) {
harness.config = commonConfig()
harness.config.DataDir = randomDirectory(t)
harness.config.Listeners = []string{harness.p2pAddress}
harness.config.RPCListeners = []string{harness.rpcAddress}
}
func commonConfig() *config.Config {
commonConfig := config.DefaultConfig()
*commonConfig.ActiveNetParams = dagconfig.SimnetParams // Copy so that we can make changes safely
commonConfig.ActiveNetParams.BlockCoinbaseMaturity = 10
commonConfig.TargetOutboundPeers = 0
commonConfig.DisableDNSSeed = true
commonConfig.RPCUser = rpcUser
commonConfig.RPCPass = rpcPass
commonConfig.DisableTLS = true
commonConfig.Simnet = true
return commonConfig
}
func randomDirectory(t *testing.T) string {
dir, err := ioutil.TempDir("", "integration-test")
if err != nil {
t.Fatalf("Error creating temporary directory for test: %+v", err)
}
return dir
}

View File

@ -0,0 +1,70 @@
package integration
import (
"testing"
"time"
)
func connect(t *testing.T, incoming, outgoing *appHarness) {
err := outgoing.rpcClient.ConnectNode(incoming.p2pAddress)
if err != nil {
t.Fatalf("Error connecting the nodes")
}
onConnectedChan := make(chan struct{})
abortConnectionChan := make(chan struct{})
defer close(abortConnectionChan)
spawn("integration.connect-Wait for connection", func() {
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
if isConnected(t, incoming, outgoing) {
close(onConnectedChan)
return
}
select {
case <-abortConnectionChan:
return
default:
}
}
})
select {
case <-onConnectedChan:
case <-time.After(defaultTimeout):
t.Fatalf("Timed out waiting for the apps to connect")
}
}
func isConnected(t *testing.T, appHarness1, appHarness2 *appHarness) bool {
connectedPeerInfo1, err := appHarness1.rpcClient.GetConnectedPeerInfo()
if err != nil {
t.Fatalf("Error getting connected peer info for app1: %+v", err)
}
connectedPeerInfo2, err := appHarness2.rpcClient.GetConnectedPeerInfo()
if err != nil {
t.Fatalf("Error getting connected peer info for app2: %+v", err)
}
var incomingConnected, outgoingConnected bool
app1ID, app2ID := appHarness1.app.P2PNodeID().String(), appHarness2.app.P2PNodeID().String()
for _, connectedPeer := range connectedPeerInfo1 {
if connectedPeer.ID == app2ID {
incomingConnected = true
break
}
}
for _, connectedPeer := range connectedPeerInfo2 {
if connectedPeer.ID == app1ID {
outgoingConnected = true
break
}
}
return incomingConnected && outgoingConnected
}

51
integration/ibd_test.go Normal file
View File

@ -0,0 +1,51 @@
package integration
import (
"sync"
"testing"
"time"
"github.com/kaspanet/kaspad/util/locks"
"github.com/kaspanet/kaspad/wire"
)
func TestIBD(t *testing.T) {
const numBlocks = 100
syncer, syncee, _, teardown := standardSetup(t)
defer teardown()
for i := 0; i < numBlocks; i++ {
mineNextBlock(t, syncer)
}
blockAddedWG := sync.WaitGroup{}
blockAddedWG.Add(numBlocks)
receivedBlocks := 0
setOnBlockAddedHandler(t, syncee, func(header *wire.BlockHeader) {
receivedBlocks++
blockAddedWG.Done()
})
connect(t, syncer, syncee)
select {
case <-time.After(defaultTimeout):
t.Fatalf("Timeout waiting for IBD to finish. Received %d blocks out of %d", receivedBlocks, numBlocks)
case <-locks.ReceiveFromChanWhenDone(func() { blockAddedWG.Wait() }):
}
tip1, err := syncer.rpcClient.GetSelectedTip()
if err != nil {
t.Fatalf("Error getting tip for syncer")
}
tip2, err := syncee.rpcClient.GetSelectedTip()
if err != nil {
t.Fatalf("Error getting tip for syncee")
}
if tip1.Hash != tip2.Hash {
t.Errorf("Tips of syncer: '%s' and syncee '%s' are not equal", tip1.Hash, tip2.Hash)
}
}

View File

@ -1,14 +1,14 @@
// Copyright (c) 2015-2016 The btcsuite developers // Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2017 The Decred developers
// Use of this source code is governed by an ISC // Use of this source code is governed by an ISC
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package peer package integration
import ( import (
"github.com/kaspanet/kaspad/logger" "github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics" "github.com/kaspanet/kaspad/util/panics"
) )
var log, _ = logger.Get(logger.SubsystemTags.PEER) var log, _ = logger.Get(logger.SubsystemTags.KASD)
var spawn = panics.GoroutineWrapperFunc(log) var spawn = panics.GoroutineWrapperFunc(log)
var spawnAfter = panics.AfterFuncWrapperFunc(log)

14
integration/main_test.go Normal file
View File

@ -0,0 +1,14 @@
package integration
import (
"os"
"testing"
"github.com/kaspanet/kaspad/logger"
)
func TestMain(m *testing.M) {
logger.SetLogLevels("debug")
os.Exit(m.Run())
}

View File

@ -0,0 +1,48 @@
package integration
import (
"math/rand"
"testing"
clientpkg "github.com/kaspanet/kaspad/rpc/client"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
func solveBlock(block *util.Block) *wire.MsgBlock {
msgBlock := block.MsgBlock()
targetDifficulty := util.CompactToBig(msgBlock.Header.Bits)
initialNonce := rand.Uint64()
for i := initialNonce; i != initialNonce-1; i++ {
msgBlock.Header.Nonce = i
hash := msgBlock.BlockHash()
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
return msgBlock
}
}
panic("Failed to solve block! This should never happen")
}
func mineNextBlock(t *testing.T, harness *appHarness) *util.Block {
blockTemplate, err := harness.rpcClient.GetBlockTemplate(harness.miningAddress, "")
if err != nil {
t.Fatalf("Error getting block template: %+v", err)
}
block, err := clientpkg.ConvertGetBlockTemplateResultToBlock(blockTemplate)
if err != nil {
t.Fatalf("Error parsing blockTemplate: %s", err)
}
solveBlock(block)
err = harness.rpcClient.SubmitBlock(block, nil)
if err != nil {
t.Fatalf("Error submitting block: %s", err)
}
return block
}

View File

@ -0,0 +1,15 @@
package integration
import (
"testing"
"github.com/kaspanet/kaspad/wire"
)
func setOnBlockAddedHandler(t *testing.T, harness *appHarness, handler func(header *wire.BlockHeader)) {
err := harness.rpcClient.NotifyBlocks()
if err != nil {
t.Fatalf("Error from NotifyBlocks: %s", err)
}
harness.rpcClient.onBlockAdded = handler
}

37
integration/rpc_test.go Normal file
View File

@ -0,0 +1,37 @@
package integration
import (
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
rpcclient "github.com/kaspanet/kaspad/rpc/client"
)
type rpcClient struct {
*rpcclient.Client
onBlockAdded func(*wire.BlockHeader)
}
func newRPCClient(rpcAddress string) (*rpcClient, error) {
client := &rpcClient{}
notificationHandlers := &rpcclient.NotificationHandlers{
OnFilteredBlockAdded: func(height uint64, header *wire.BlockHeader, txs []*util.Tx) {
if client.onBlockAdded != nil {
client.onBlockAdded(header)
}
},
}
connConfig := &rpcclient.ConnConfig{
Host: rpcAddress,
Endpoint: "ws",
User: rpcUser,
Pass: rpcPass,
DisableTLS: true,
RequestTimeout: defaultTimeout,
}
var err error
client.Client, err = rpcclient.New(connConfig, notificationHandlers)
return client, err
}

132
integration/setup_test.go Normal file
View File

@ -0,0 +1,132 @@
package integration
import (
"path/filepath"
"testing"
"github.com/kaspanet/kaspad/app"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dbaccess"
)
type appHarness struct {
app *app.App
rpcClient *rpcClient
p2pAddress string
rpcAddress string
miningAddress string
miningAddressPrivateKey string
config *config.Config
databaseContext *dbaccess.DatabaseContext
}
type harnessParams struct {
p2pAddress string
rpcAddress string
miningAddress string
miningAddressPrivateKey string
}
// setupHarness creates a single appHarness with given parameters
func setupHarness(t *testing.T, params *harnessParams) (harness *appHarness, teardownFunc func()) {
harness = &appHarness{
p2pAddress: params.p2pAddress,
rpcAddress: params.rpcAddress,
miningAddress: params.miningAddress,
miningAddressPrivateKey: params.miningAddressPrivateKey,
}
setConfig(t, harness)
setDatabaseContext(t, harness)
setApp(t, harness)
harness.app.Start()
setRPCClient(t, harness)
return harness, func() {
teardownHarness(t, harness)
}
}
// setupHarnesses creates multiple appHarnesses, according to number of parameters passed
func setupHarnesses(t *testing.T, harnessesParams []*harnessParams) (harnesses []*appHarness, teardownFunc func()) {
var teardowns []func()
for _, params := range harnessesParams {
harness, teardownFunc := setupHarness(t, params)
harnesses = append(harnesses, harness)
teardowns = append(teardowns, teardownFunc)
}
return harnesses, func() {
for _, teardownFunc := range teardowns {
teardownFunc()
}
}
}
// standardSetup creates a standard setup of 3 appHarnesses that should work for most tests
func standardSetup(t *testing.T) (appHarness1, appHarness2, appHarness3 *appHarness, teardownFunc func()) {
harnesses, teardown := setupHarnesses(t, []*harnessParams{
{
p2pAddress: p2pAddress1,
rpcAddress: rpcAddress1,
miningAddress: miningAddress1,
miningAddressPrivateKey: miningAddress1PrivateKey,
},
{
p2pAddress: p2pAddress2,
rpcAddress: rpcAddress2,
miningAddress: miningAddress2,
miningAddressPrivateKey: miningAddress2PrivateKey,
}, {
p2pAddress: p2pAddress3,
rpcAddress: rpcAddress3,
miningAddress: miningAddress3,
miningAddressPrivateKey: miningAddress3PrivateKey,
},
})
return harnesses[0], harnesses[1], harnesses[2], teardown
}
func setRPCClient(t *testing.T, harness *appHarness) {
var err error
harness.rpcClient, err = newRPCClient(harness.rpcAddress)
if err != nil {
t.Fatalf("Error getting RPC client %+v", err)
}
}
func teardownHarness(t *testing.T, harness *appHarness) {
err := harness.app.Stop()
if err != nil {
t.Errorf("Error stopping App: %+v", err)
}
harness.app.WaitForShutdown()
err = harness.databaseContext.Close()
if err != nil {
t.Errorf("Error closing database context: %+v", err)
}
}
func setApp(t *testing.T, harness *appHarness) {
var err error
harness.app, err = app.New(harness.config, harness.databaseContext, make(chan struct{}))
if err != nil {
t.Fatalf("Error creating app: %+v", err)
}
}
func setDatabaseContext(t *testing.T, harness *appHarness) {
var err error
harness.databaseContext, err = openDB(harness.config)
if err != nil {
t.Fatalf("Error openning database: %+v", err)
}
}
func openDB(cfg *config.Config) (*dbaccess.DatabaseContext, error) {
dbPath := filepath.Join(cfg.DataDir, "db")
return dbaccess.New(dbPath)
}

View File

@ -0,0 +1,116 @@
package integration
import (
"encoding/hex"
"strings"
"testing"
"time"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
)
func TestTxRelay(t *testing.T) {
payer, mediator, payee, teardown := standardSetup(t)
defer teardown()
// Connect nodes in chain: payer <--> mediator <--> payee
// So that payee doesn't directly get transactions from payer
connect(t, payer, mediator)
connect(t, mediator, payee)
payeeBlockAddedChan := make(chan *wire.BlockHeader)
setOnBlockAddedHandler(t, payee, func(header *wire.BlockHeader) {
payeeBlockAddedChan <- header
})
// skip the first block because it's paying to genesis script
mineNextBlock(t, payer)
waitForPayeeToReceiveBlock(t, payeeBlockAddedChan)
// use the second block to get money to pay with
secondBlock := mineNextBlock(t, payer)
waitForPayeeToReceiveBlock(t, payeeBlockAddedChan)
// Mine BlockCoinbaseMaturity more blocks for our money to mature
for i := uint64(0); i < payer.config.ActiveNetParams.BlockCoinbaseMaturity; i++ {
mineNextBlock(t, payer)
waitForPayeeToReceiveBlock(t, payeeBlockAddedChan)
}
tx := generateTx(t, secondBlock.CoinbaseTransaction().MsgTx(), payer, payee)
txID, err := payer.rpcClient.SendRawTransaction(tx, true)
if err != nil {
t.Fatalf("Error submitting transaction: %+v", err)
}
txAddedToMempoolChan := make(chan struct{})
spawn("TestTxRelay-WaitForTransactionPropagation", func() {
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
_, err := payee.rpcClient.GetMempoolEntry(txID.String())
if err != nil {
if strings.Contains(err.Error(), "-32603: transaction is not in the pool") {
continue
}
t.Fatalf("Error getting mempool entry: %+v", err)
}
close(txAddedToMempoolChan)
}
})
select {
case <-txAddedToMempoolChan:
case <-time.After(defaultTimeout):
t.Fatalf("Timeout waiting for transaction to be accepted into mempool")
}
}
func waitForPayeeToReceiveBlock(t *testing.T, payeeBlockAddedChan chan *wire.BlockHeader) {
select {
case <-payeeBlockAddedChan:
case <-time.After(defaultTimeout):
t.Fatalf("Timeout waiting for block added")
}
}
func generateTx(t *testing.T, firstBlockCoinbase *wire.MsgTx, payer, payee *appHarness) *wire.MsgTx {
txIns := make([]*wire.TxIn, 1)
txIns[0] = wire.NewTxIn(wire.NewOutpoint(firstBlockCoinbase.TxID(), 0), []byte{})
payeeAddress, err := util.DecodeAddress(payee.miningAddress, util.Bech32PrefixKaspaSim)
if err != nil {
t.Fatalf("Error decoding payeeAddress: %+v", err)
}
toScript, err := txscript.PayToAddrScript(payeeAddress)
if err != nil {
t.Fatalf("Error generating script: %+v", err)
}
txOuts := []*wire.TxOut{wire.NewTxOut(firstBlockCoinbase.TxOut[0].Value-1, toScript)}
fromScript := firstBlockCoinbase.TxOut[0].ScriptPubKey
tx := wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
privateKeyBytes, err := hex.DecodeString(payer.miningAddressPrivateKey)
if err != nil {
t.Fatalf("Error decoding private key: %+v", err)
}
privateKey, err := secp256k1.DeserializePrivateKeyFromSlice(privateKeyBytes)
if err != nil {
t.Fatalf("Error deserializing private key: %+v", err)
}
signatureScript, err := txscript.SignatureScript(tx, 0, fromScript, txscript.SigHashAll, privateKey, true)
if err != nil {
t.Fatalf("Error signing transaction: %+v", err)
}
tx.TxIn[0].SignatureScript = signatureScript
return tx
}

Some files were not shown because too many files have changed in this diff Show More