Merge remote-tracking branch 'origin/v0.5.0-dev'

This commit is contained in:
Mike Zak 2020-07-01 15:05:02 +03:00
commit c62bdb2fa1
161 changed files with 7499 additions and 3973 deletions

View File

@ -5,16 +5,16 @@
package addrmgr
import (
"bytes"
"container/list"
crand "crypto/rand" // for seeding
"encoding/binary"
"encoding/json"
"encoding/gob"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"io"
"math/rand"
"net"
"os"
"path/filepath"
"strconv"
"sync"
"sync/atomic"
@ -26,14 +26,13 @@ import (
"github.com/kaspanet/kaspad/wire"
)
type newBucket [newBucketCount]map[string]*KnownAddress
type triedBucket [triedBucketCount]*list.List
type newBucket [NewBucketCount]map[string]*KnownAddress
type triedBucket [TriedBucketCount]*list.List
// AddrManager provides a concurrency safe address manager for caching potential
// peers on the Kaspa network.
type AddrManager struct {
mtx sync.Mutex
peersFile string
lookupFunc func(string) ([]net.IP, error)
rand *rand.Rand
key [32]byte
@ -66,10 +65,12 @@ type serializedKnownAddress struct {
// no refcount or tried, that is available from context.
}
type serializedNewBucket [newBucketCount][]string
type serializedTriedBucket [triedBucketCount][]string
type serializedNewBucket [NewBucketCount][]string
type serializedTriedBucket [TriedBucketCount][]string
type serializedAddrManager struct {
// PeersStateForSerialization is the data model that is used to
// serialize the peers state to any encoding.
type PeersStateForSerialization struct {
Version int
Key [32]byte
Addresses []*serializedKnownAddress
@ -118,17 +119,17 @@ const (
// tried address bucket.
triedBucketSize = 256
// triedBucketCount is the number of buckets we split tried
// TriedBucketCount is the number of buckets we split tried
// addresses over.
triedBucketCount = 64
TriedBucketCount = 64
// newBucketSize is the maximum number of addresses in each new address
// bucket.
newBucketSize = 64
// newBucketCount is the number of buckets that we spread new addresses
// NewBucketCount is the number of buckets that we spread new addresses
// over.
newBucketCount = 1024
NewBucketCount = 1024
// triedBucketsPerGroup is the number of tried buckets over which an
// address group will be spread.
@ -162,17 +163,17 @@ const (
// to a getAddr. If we have less than this amount, we send everything.
getAddrMin = 50
// getAddrMax is the most addresses that we will send in response
// GetAddrMax is the most addresses that we will send in response
// to a getAddr (in practise the most addresses we will return from a
// call to AddressCache()).
getAddrMax = 2500
GetAddrMax = 2500
// getAddrPercent is the percentage of total addresses known that we
// will share with a call to AddressCache.
getAddrPercent = 23
// serialisationVersion is the current version of the on-disk format.
serialisationVersion = 1
// serializationVersion is the current version of the on-disk format.
serializationVersion = 1
)
// updateAddress is a helper function to either update an address already known
@ -392,7 +393,7 @@ func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int {
data2 = append(data2, hashbuf[:]...)
hash2 := daghash.DoubleHashB(data2)
return int(binary.LittleEndian.Uint64(hash2) % newBucketCount)
return int(binary.LittleEndian.Uint64(hash2) % NewBucketCount)
}
func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int {
@ -411,7 +412,7 @@ func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int {
data2 = append(data2, hashbuf[:]...)
hash2 := daghash.DoubleHashB(data2)
return int(binary.LittleEndian.Uint64(hash2) % triedBucketCount)
return int(binary.LittleEndian.Uint64(hash2) % TriedBucketCount)
}
// addressHandler is the main handler for the address manager. It must be run
@ -423,30 +424,62 @@ out:
for {
select {
case <-dumpAddressTicker.C:
a.savePeers()
err := a.savePeers()
if err != nil {
panic(errors.Wrap(err, "error saving peers"))
}
case <-a.quit:
break out
}
}
a.savePeers()
err := a.savePeers()
if err != nil {
panic(errors.Wrap(err, "error saving peers"))
}
a.wg.Done()
log.Trace("Address handler done")
}
// savePeers saves all the known addresses to a file so they can be read back
// savePeers saves all the known addresses to the database so they can be read back
// in at next run.
func (a *AddrManager) savePeers() {
func (a *AddrManager) savePeers() error {
serializedPeersState, err := a.serializePeersState()
if err != nil {
return err
}
return dbaccess.StorePeersState(dbaccess.NoTx(), serializedPeersState)
}
func (a *AddrManager) serializePeersState() ([]byte, error) {
peersState, err := a.PeersStateForSerialization()
if err != nil {
return nil, err
}
w := &bytes.Buffer{}
encoder := gob.NewEncoder(w)
err = encoder.Encode(&peersState)
if err != nil {
return nil, errors.Wrap(err, "failed to encode peers state")
}
return w.Bytes(), nil
}
// PeersStateForSerialization returns the data model that is used to serialize the peers state to any encoding.
func (a *AddrManager) PeersStateForSerialization() (*PeersStateForSerialization, error) {
a.mtx.Lock()
defer a.mtx.Unlock()
// First we make a serialisable datastructure so we can encode it to
// json.
sam := new(serializedAddrManager)
sam.Version = serialisationVersion
copy(sam.Key[:], a.key[:])
// First we make a serializable data structure so we can encode it to
// gob.
peersState := new(PeersStateForSerialization)
peersState.Version = serializationVersion
copy(peersState.Key[:], a.key[:])
sam.Addresses = make([]*serializedKnownAddress, len(a.addrIndex))
peersState.Addresses = make([]*serializedKnownAddress, len(a.addrIndex))
i := 0
for k, v := range a.addrIndex {
ska := new(serializedKnownAddress)
@ -463,119 +496,104 @@ func (a *AddrManager) savePeers() {
ska.LastSuccess = v.lastsuccess.Unix()
// Tried and refs are implicit in the rest of the structure
// and will be worked out from context on unserialisation.
sam.Addresses[i] = ska
peersState.Addresses[i] = ska
i++
}
sam.NewBuckets = make(map[string]*serializedNewBucket)
peersState.NewBuckets = make(map[string]*serializedNewBucket)
for subnetworkID := range a.addrNew {
subnetworkIDStr := subnetworkID.String()
sam.NewBuckets[subnetworkIDStr] = &serializedNewBucket{}
peersState.NewBuckets[subnetworkIDStr] = &serializedNewBucket{}
for i := range a.addrNew[subnetworkID] {
sam.NewBuckets[subnetworkIDStr][i] = make([]string, len(a.addrNew[subnetworkID][i]))
peersState.NewBuckets[subnetworkIDStr][i] = make([]string, len(a.addrNew[subnetworkID][i]))
j := 0
for k := range a.addrNew[subnetworkID][i] {
sam.NewBuckets[subnetworkIDStr][i][j] = k
peersState.NewBuckets[subnetworkIDStr][i][j] = k
j++
}
}
}
for i := range a.addrNewFullNodes {
sam.NewBucketFullNodes[i] = make([]string, len(a.addrNewFullNodes[i]))
peersState.NewBucketFullNodes[i] = make([]string, len(a.addrNewFullNodes[i]))
j := 0
for k := range a.addrNewFullNodes[i] {
sam.NewBucketFullNodes[i][j] = k
peersState.NewBucketFullNodes[i][j] = k
j++
}
}
sam.TriedBuckets = make(map[string]*serializedTriedBucket)
peersState.TriedBuckets = make(map[string]*serializedTriedBucket)
for subnetworkID := range a.addrTried {
subnetworkIDStr := subnetworkID.String()
sam.TriedBuckets[subnetworkIDStr] = &serializedTriedBucket{}
peersState.TriedBuckets[subnetworkIDStr] = &serializedTriedBucket{}
for i := range a.addrTried[subnetworkID] {
sam.TriedBuckets[subnetworkIDStr][i] = make([]string, a.addrTried[subnetworkID][i].Len())
peersState.TriedBuckets[subnetworkIDStr][i] = make([]string, a.addrTried[subnetworkID][i].Len())
j := 0
for e := a.addrTried[subnetworkID][i].Front(); e != nil; e = e.Next() {
ka := e.Value.(*KnownAddress)
sam.TriedBuckets[subnetworkIDStr][i][j] = NetAddressKey(ka.na)
peersState.TriedBuckets[subnetworkIDStr][i][j] = NetAddressKey(ka.na)
j++
}
}
}
for i := range a.addrTriedFullNodes {
sam.TriedBucketFullNodes[i] = make([]string, a.addrTriedFullNodes[i].Len())
peersState.TriedBucketFullNodes[i] = make([]string, a.addrTriedFullNodes[i].Len())
j := 0
for e := a.addrTriedFullNodes[i].Front(); e != nil; e = e.Next() {
ka := e.Value.(*KnownAddress)
sam.TriedBucketFullNodes[i][j] = NetAddressKey(ka.na)
peersState.TriedBucketFullNodes[i][j] = NetAddressKey(ka.na)
j++
}
}
w, err := os.Create(a.peersFile)
if err != nil {
log.Errorf("Error opening file %s: %s", a.peersFile, err)
return
}
enc := json.NewEncoder(w)
defer w.Close()
if err := enc.Encode(&sam); err != nil {
log.Errorf("Failed to encode file %s: %s", a.peersFile, err)
return
}
return peersState, nil
}
// loadPeers loads the known address from the saved file. If empty, missing, or
// malformed file, just don't load anything and start fresh
func (a *AddrManager) loadPeers() {
// loadPeers loads the known address from the database. If missing,
// just don't load anything and start fresh.
func (a *AddrManager) loadPeers() error {
a.mtx.Lock()
defer a.mtx.Unlock()
err := a.deserializePeers(a.peersFile)
if err != nil {
log.Errorf("Failed to parse file %s: %s", a.peersFile, err)
// if it is invalid we nuke the old one unconditionally.
err = os.Remove(a.peersFile)
if err != nil {
log.Warnf("Failed to remove corrupt peers file %s: %s",
a.peersFile, err)
}
serializedPeerState, err := dbaccess.FetchPeersState(dbaccess.NoTx())
if dbaccess.IsNotFoundError(err) {
a.reset()
return
}
log.Infof("Loaded %d addresses from file '%s'", a.totalNumAddresses(), a.peersFile)
}
func (a *AddrManager) deserializePeers(filePath string) error {
_, err := os.Stat(filePath)
if os.IsNotExist(err) {
log.Info("No peers state was found in the database. Created a new one", a.totalNumAddresses())
return nil
}
r, err := os.Open(filePath)
if err != nil {
return errors.Errorf("%s error opening file: %s", filePath, err)
}
defer r.Close()
var sam serializedAddrManager
dec := json.NewDecoder(r)
err = dec.Decode(&sam)
if err != nil {
return errors.Errorf("error reading %s: %s", filePath, err)
return err
}
if sam.Version != serialisationVersion {
err = a.deserializePeersState(serializedPeerState)
if err != nil {
return err
}
log.Infof("Loaded %d addresses from database", a.totalNumAddresses())
return nil
}
func (a *AddrManager) deserializePeersState(serializedPeerState []byte) error {
var peersState PeersStateForSerialization
r := bytes.NewBuffer(serializedPeerState)
dec := gob.NewDecoder(r)
err := dec.Decode(&peersState)
if err != nil {
return errors.Wrap(err, "error deserializing peers state")
}
if peersState.Version != serializationVersion {
return errors.Errorf("unknown version %d in serialized "+
"addrmanager", sam.Version)
"peers state", peersState.Version)
}
copy(a.key[:], sam.Key[:])
copy(a.key[:], peersState.Key[:])
for _, v := range sam.Addresses {
for _, v := range peersState.Addresses {
ka := new(KnownAddress)
ka.na, err = a.DeserializeNetAddress(v.Addr)
if err != nil {
@ -600,12 +618,12 @@ func (a *AddrManager) deserializePeers(filePath string) error {
a.addrIndex[NetAddressKey(ka.na)] = ka
}
for subnetworkIDStr := range sam.NewBuckets {
for subnetworkIDStr := range peersState.NewBuckets {
subnetworkID, err := subnetworkid.NewFromStr(subnetworkIDStr)
if err != nil {
return err
}
for i, subnetworkNewBucket := range sam.NewBuckets[subnetworkIDStr] {
for i, subnetworkNewBucket := range peersState.NewBuckets[subnetworkIDStr] {
for _, val := range subnetworkNewBucket {
ka, ok := a.addrIndex[val]
if !ok {
@ -622,7 +640,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
}
}
for i, newBucket := range sam.NewBucketFullNodes {
for i, newBucket := range peersState.NewBucketFullNodes {
for _, val := range newBucket {
ka, ok := a.addrIndex[val]
if !ok {
@ -638,12 +656,12 @@ func (a *AddrManager) deserializePeers(filePath string) error {
}
}
for subnetworkIDStr := range sam.TriedBuckets {
for subnetworkIDStr := range peersState.TriedBuckets {
subnetworkID, err := subnetworkid.NewFromStr(subnetworkIDStr)
if err != nil {
return err
}
for i, subnetworkTriedBucket := range sam.TriedBuckets[subnetworkIDStr] {
for i, subnetworkTriedBucket := range peersState.TriedBuckets[subnetworkIDStr] {
for _, val := range subnetworkTriedBucket {
ka, ok := a.addrIndex[val]
if !ok {
@ -658,7 +676,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
}
}
for i, triedBucket := range sam.TriedBucketFullNodes {
for i, triedBucket := range peersState.TriedBucketFullNodes {
for _, val := range triedBucket {
ka, ok := a.addrIndex[val]
if !ok {
@ -704,20 +722,24 @@ func (a *AddrManager) DeserializeNetAddress(addr string) (*wire.NetAddress, erro
// Start begins the core address handler which manages a pool of known
// addresses, timeouts, and interval based writes.
func (a *AddrManager) Start() {
func (a *AddrManager) Start() error {
// Already started?
if atomic.AddInt32(&a.started, 1) != 1 {
return
return nil
}
log.Trace("Starting address manager")
// Load peers we already know about from file.
a.loadPeers()
// Load peers we already know about from the database.
err := a.loadPeers()
if err != nil {
return err
}
// Start the address ticker to save addresses periodically.
a.wg.Add(1)
spawn(a.addressHandler)
return nil
}
// Stop gracefully shuts down the address manager by stopping the main handler.
@ -839,8 +861,8 @@ func (a *AddrManager) AddressCache(includeAllSubnetworks bool, subnetworkID *sub
}
numAddresses := len(allAddr) * getAddrPercent / 100
if numAddresses > getAddrMax {
numAddresses = getAddrMax
if numAddresses > GetAddrMax {
numAddresses = GetAddrMax
}
if len(allAddr) < getAddrMin {
numAddresses = len(allAddr)
@ -1333,9 +1355,8 @@ func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.Net
// New returns a new Kaspa address manager.
// Use Start to begin processing asynchronous address updates.
func New(dataDir string, lookupFunc func(string) ([]net.IP, error), subnetworkID *subnetworkid.SubnetworkID) *AddrManager {
func New(lookupFunc func(string) ([]net.IP, error), subnetworkID *subnetworkid.SubnetworkID) *AddrManager {
am := AddrManager{
peersFile: filepath.Join(dataDir, "peers.json"),
lookupFunc: lookupFunc,
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
quit: make(chan struct{}),

View File

@ -8,7 +8,9 @@ import (
"fmt"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/subnetworkid"
"io/ioutil"
"net"
"reflect"
"testing"
@ -101,14 +103,41 @@ func addNaTest(ip string, port uint16, want string) {
naTests = append(naTests, test)
}
func lookupFunc(host string) ([]net.IP, error) {
func lookupFuncForTest(host string) ([]net.IP, error) {
return nil, errors.New("not implemented")
}
func newAddrManagerForTest(t *testing.T, testName string,
localSubnetworkID *subnetworkid.SubnetworkID) (addressManager *AddrManager, teardown func()) {
dbPath, err := ioutil.TempDir("", testName)
if err != nil {
t.Fatalf("Error creating temporary directory: %s", err)
}
err = dbaccess.Open(dbPath)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
addressManager = New(lookupFuncForTest, localSubnetworkID)
return addressManager, func() {
err := dbaccess.Close()
if err != nil {
t.Fatalf("error closing the database: %s", err)
}
}
}
func TestStartStop(t *testing.T) {
n := New("teststartstop", lookupFunc, nil)
n.Start()
err := n.Stop()
amgr, teardown := newAddrManagerForTest(t, "TestStartStop", nil)
defer teardown()
err := amgr.Start()
if err != nil {
t.Fatalf("Address Manager failed to start: %v", err)
}
err = amgr.Stop()
if err != nil {
t.Fatalf("Address Manager failed to stop: %v", err)
}
@ -148,7 +177,8 @@ func TestAddAddressByIP(t *testing.T) {
},
}
amgr := New("testaddressbyip", nil, nil)
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
defer teardown()
for i, test := range tests {
err := amgr.AddAddressByIP(test.addrIP, nil)
if test.err != nil && err == nil {
@ -213,7 +243,8 @@ func TestAddLocalAddress(t *testing.T) {
true,
},
}
amgr := New("testaddlocaladdress", nil, nil)
amgr, teardown := newAddrManagerForTest(t, "TestAddLocalAddress", nil)
defer teardown()
for x, test := range tests {
result := amgr.AddLocalAddress(&test.address, test.priority)
if result == nil && !test.valid {
@ -239,21 +270,22 @@ func TestAttempt(t *testing.T) {
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testattempt", lookupFunc, nil)
amgr, teardown := newAddrManagerForTest(t, "TestAttempt", nil)
defer teardown()
// Add a new address and get it
err := n.AddAddressByIP(someIP+":8333", nil)
err := amgr.AddAddressByIP(someIP+":8333", nil)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka := n.GetAddress()
ka := amgr.GetAddress()
if !ka.LastAttempt().IsZero() {
t.Errorf("Address should not have attempts, but does")
}
na := ka.NetAddress()
n.Attempt(na)
amgr.Attempt(na)
if ka.LastAttempt().IsZero() {
t.Errorf("Address should have an attempt, but does not")
@ -270,19 +302,20 @@ func TestConnected(t *testing.T) {
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testconnected", lookupFunc, nil)
amgr, teardown := newAddrManagerForTest(t, "TestConnected", nil)
defer teardown()
// Add a new address and get it
err := n.AddAddressByIP(someIP+":8333", nil)
err := amgr.AddAddressByIP(someIP+":8333", nil)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka := n.GetAddress()
ka := amgr.GetAddress()
na := ka.NetAddress()
// make it an hour ago
na.Timestamp = time.Unix(time.Now().Add(time.Hour*-1).Unix(), 0)
n.Connected(na)
amgr.Connected(na)
if !ka.NetAddress().Timestamp.After(na.Timestamp) {
t.Errorf("Address should have a new timestamp, but does not")
@ -299,9 +332,10 @@ func TestNeedMoreAddresses(t *testing.T) {
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testneedmoreaddresses", lookupFunc, nil)
amgr, teardown := newAddrManagerForTest(t, "TestNeedMoreAddresses", nil)
defer teardown()
addrsToAdd := 1500
b := n.NeedMoreAddresses()
b := amgr.NeedMoreAddresses()
if !b {
t.Errorf("Expected that we need more addresses")
}
@ -310,7 +344,7 @@ func TestNeedMoreAddresses(t *testing.T) {
var err error
for i := 0; i < addrsToAdd; i++ {
s := fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60)
addrs[i], err = n.DeserializeNetAddress(s)
addrs[i], err = amgr.DeserializeNetAddress(s)
if err != nil {
t.Errorf("Failed to turn %s into an address: %v", s, err)
}
@ -318,13 +352,13 @@ func TestNeedMoreAddresses(t *testing.T) {
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
n.AddAddresses(addrs, srcAddr, nil)
numAddrs := n.TotalNumAddresses()
amgr.AddAddresses(addrs, srcAddr, nil)
numAddrs := amgr.TotalNumAddresses()
if numAddrs > addrsToAdd {
t.Errorf("Number of addresses is too many %d vs %d", numAddrs, addrsToAdd)
}
b = n.NeedMoreAddresses()
b = amgr.NeedMoreAddresses()
if b {
t.Errorf("Expected that we don't need more addresses")
}
@ -340,7 +374,8 @@ func TestGood(t *testing.T) {
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testgood", lookupFunc, nil)
amgr, teardown := newAddrManagerForTest(t, "TestGood", nil)
defer teardown()
addrsToAdd := 64 * 64
addrs := make([]*wire.NetAddress, addrsToAdd)
subnetworkCount := 32
@ -349,7 +384,7 @@ func TestGood(t *testing.T) {
var err error
for i := 0; i < addrsToAdd; i++ {
s := fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60)
addrs[i], err = n.DeserializeNetAddress(s)
addrs[i], err = amgr.DeserializeNetAddress(s)
if err != nil {
t.Errorf("Failed to turn %s into an address: %v", s, err)
}
@ -361,24 +396,24 @@ func TestGood(t *testing.T) {
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
n.AddAddresses(addrs, srcAddr, nil)
amgr.AddAddresses(addrs, srcAddr, nil)
for i, addr := range addrs {
n.Good(addr, subnetworkIDs[i%subnetworkCount])
amgr.Good(addr, subnetworkIDs[i%subnetworkCount])
}
numAddrs := n.TotalNumAddresses()
numAddrs := amgr.TotalNumAddresses()
if numAddrs >= addrsToAdd {
t.Errorf("Number of addresses is too many: %d vs %d", numAddrs, addrsToAdd)
}
numCache := len(n.AddressCache(true, nil))
numCache := len(amgr.AddressCache(true, nil))
if numCache == 0 || numCache >= numAddrs/4 {
t.Errorf("Number of addresses in cache: got %d, want positive and less than %d",
numCache, numAddrs/4)
}
for i := 0; i < subnetworkCount; i++ {
numCache = len(n.AddressCache(false, subnetworkIDs[i]))
numCache = len(amgr.AddressCache(false, subnetworkIDs[i]))
if numCache == 0 || numCache >= numAddrs/subnetworkCount {
t.Errorf("Number of addresses in subnetwork cache: got %d, want positive and less than %d",
numCache, numAddrs/4/subnetworkCount)
@ -396,17 +431,18 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("test_good_change_subnetwork_id", lookupFunc, nil)
amgr, teardown := newAddrManagerForTest(t, "TestGoodChangeSubnetworkID", nil)
defer teardown()
addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
addrKey := NetAddressKey(addr)
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
oldSubnetwork := subnetworkid.SubnetworkIDNative
n.AddAddress(addr, srcAddr, oldSubnetwork)
n.Good(addr, oldSubnetwork)
amgr.AddAddress(addr, srcAddr, oldSubnetwork)
amgr.Good(addr, oldSubnetwork)
// make sure address was saved to addrIndex under oldSubnetwork
ka := n.find(addr)
ka := amgr.find(addr)
if ka == nil {
t.Fatalf("Address was not found after first time .Good called")
}
@ -415,7 +451,7 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
}
// make sure address was added to correct bucket under oldSubnetwork
bucket := n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)]
bucket := amgr.addrTried[*oldSubnetwork][amgr.getTriedBucket(addr)]
wasFound := false
for e := bucket.Front(); e != nil; e = e.Next() {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
@ -428,10 +464,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
// now call .Good again with a different subnetwork
newSubnetwork := subnetworkid.SubnetworkIDRegistry
n.Good(addr, newSubnetwork)
amgr.Good(addr, newSubnetwork)
// make sure address was updated in addrIndex under newSubnetwork
ka = n.find(addr)
ka = amgr.find(addr)
if ka == nil {
t.Fatalf("Address was not found after second time .Good called")
}
@ -440,7 +476,7 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
}
// make sure address was removed from bucket under oldSubnetwork
bucket = n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)]
bucket = amgr.addrTried[*oldSubnetwork][amgr.getTriedBucket(addr)]
wasFound = false
for e := bucket.Front(); e != nil; e = e.Next() {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
@ -452,7 +488,7 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
}
// make sure address was added to correct bucket under newSubnetwork
bucket = n.addrTried[*newSubnetwork][n.getTriedBucket(addr)]
bucket = amgr.addrTried[*newSubnetwork][amgr.getTriedBucket(addr)]
wasFound = false
for e := bucket.Front(); e != nil; e = e.Next() {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
@ -475,34 +511,35 @@ func TestGetAddress(t *testing.T) {
defer config.SetActiveConfig(originalActiveCfg)
localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
n := New("testgetaddress", lookupFunc, localSubnetworkID)
amgr, teardown := newAddrManagerForTest(t, "TestGetAddress", localSubnetworkID)
defer teardown()
// Get an address from an empty set (should error)
if rv := n.GetAddress(); rv != nil {
if rv := amgr.GetAddress(); rv != nil {
t.Errorf("GetAddress failed: got: %v want: %v\n", rv, nil)
}
// Add a new address and get it
err := n.AddAddressByIP(someIP+":8332", localSubnetworkID)
err := amgr.AddAddressByIP(someIP+":8332", localSubnetworkID)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka := n.GetAddress()
ka := amgr.GetAddress()
if ka == nil {
t.Fatalf("Did not get an address where there is one in the pool")
}
n.Attempt(ka.NetAddress())
amgr.Attempt(ka.NetAddress())
// Checks that we don't get it if we find that it has other subnetwork ID than expected.
actualSubnetworkID := &subnetworkid.SubnetworkID{0xfe}
n.Good(ka.NetAddress(), actualSubnetworkID)
ka = n.GetAddress()
amgr.Good(ka.NetAddress(), actualSubnetworkID)
ka = amgr.GetAddress()
if ka != nil {
t.Errorf("Didn't expect to get an address because there shouldn't be any address from subnetwork ID %s or nil", localSubnetworkID)
}
// Checks that the total number of addresses incremented although the new address is not full node or a partial node of the same subnetwork as the local node.
numAddrs := n.TotalNumAddresses()
numAddrs := amgr.TotalNumAddresses()
if numAddrs != 1 {
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
}
@ -510,11 +547,11 @@ func TestGetAddress(t *testing.T) {
// Now we repeat the same process, but now the address has the expected subnetwork ID.
// Add a new address and get it
err = n.AddAddressByIP(someIP+":8333", localSubnetworkID)
err = amgr.AddAddressByIP(someIP+":8333", localSubnetworkID)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka = n.GetAddress()
ka = amgr.GetAddress()
if ka == nil {
t.Fatalf("Did not get an address where there is one in the pool")
}
@ -524,11 +561,11 @@ func TestGetAddress(t *testing.T) {
if !ka.SubnetworkID().IsEqual(localSubnetworkID) {
t.Errorf("Wrong Subnetwork ID: got %v, want %v", *ka.SubnetworkID(), localSubnetworkID)
}
n.Attempt(ka.NetAddress())
amgr.Attempt(ka.NetAddress())
// Mark this as a good address and get it
n.Good(ka.NetAddress(), localSubnetworkID)
ka = n.GetAddress()
amgr.Good(ka.NetAddress(), localSubnetworkID)
ka = amgr.GetAddress()
if ka == nil {
t.Fatalf("Did not get an address where there is one in the pool")
}
@ -539,7 +576,7 @@ func TestGetAddress(t *testing.T) {
t.Errorf("Wrong Subnetwork ID: got %v, want %v", ka.SubnetworkID(), localSubnetworkID)
}
numAddrs = n.TotalNumAddresses()
numAddrs = amgr.TotalNumAddresses()
if numAddrs != 2 {
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
}
@ -604,7 +641,8 @@ func TestGetBestLocalAddress(t *testing.T) {
*/
}
amgr := New("testgetbestlocaladdress", nil, nil)
amgr, teardown := newAddrManagerForTest(t, "TestGetBestLocalAddress", nil)
defer teardown()
// Test against default when there's no address
for x, test := range tests {

View File

@ -105,8 +105,6 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
}
}
block.SetBlueScore(newNode.blueScore)
// Connect the passed block to the DAG. This also handles validation of the
// transaction scripts.
chainUpdates, err := dag.addBlock(newNode, block, selectedParentAnticone, flags)
@ -133,17 +131,17 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
return nil
}
func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) {
func lookupParentNodes(block *util.Block, dag *BlockDAG) (blockSet, error) {
header := block.MsgBlock().Header
parentHashes := header.ParentHashes
nodes := newBlockSet()
for _, parentHash := range parentHashes {
node := blockDAG.index.LookupNode(parentHash)
if node == nil {
node, ok := dag.index.LookupNode(parentHash)
if !ok {
str := fmt.Sprintf("parent block %s is unknown", parentHash)
return nil, ruleError(ErrParentBlockUnknown, str)
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
} else if dag.index.NodeStatus(node).KnownInvalid() {
str := fmt.Sprintf("parent block %s is known to be invalid", parentHash)
return nil, ruleError(ErrInvalidAncestorBlock, str)
}

View File

@ -63,7 +63,10 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
if isOrphan {
t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan")
}
blockNode1 := dag.index.LookupNode(block1.Hash())
blockNode1, ok := dag.index.LookupNode(block1.Hash())
if !ok {
t.Fatalf("block %s does not exist in the DAG", block1.Hash())
}
dag.index.SetStatusFlags(blockNode1, statusValidateFailed)
block2 := blocks[2]

View File

@ -11,14 +11,14 @@ import (
func TestBlockHeap(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlockHeap", true, Config{
DAGParams: &dagconfig.MainnetParams,
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestBlockHeap: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
block0Header := dagconfig.MainnetParams.GenesisBlock.Header
block0Header := dagconfig.SimnetParams.GenesisBlock.Header
block0, _ := dag.newBlockNode(&block0Header, newBlockSet())
block100000Header := Block100000.Header

View File

@ -50,11 +50,11 @@ func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool {
// return nil if there is no entry for the hash.
//
// This function is safe for concurrent access.
func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
func (bi *blockIndex) LookupNode(hash *daghash.Hash) (*blockNode, bool) {
bi.RLock()
defer bi.RUnlock()
node := bi.index[*hash]
return node
node, ok := bi.index[*hash]
return node, ok
}
// AddNode adds the provided node to the block index and marks it as dirty.

View File

@ -29,8 +29,15 @@ func (dag *BlockDAG) BlockLocatorFromHashes(highHash, lowHash *daghash.Hash) (Bl
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
highNode := dag.index.LookupNode(highHash)
lowNode := dag.index.LookupNode(lowHash)
highNode, ok := dag.index.LookupNode(highHash)
if !ok {
return nil, errors.Errorf("block %s is unknown", highHash)
}
lowNode, ok := dag.index.LookupNode(lowHash)
if !ok {
return nil, errors.Errorf("block %s is unknown", lowHash)
}
return dag.blockLocator(highNode, lowNode)
}
@ -88,8 +95,8 @@ func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (highHash,
lowNode := dag.genesis
nextBlockLocatorIndex := int64(len(locator) - 1)
for i, hash := range locator {
node := dag.index.LookupNode(hash)
if node != nil {
node, ok := dag.index.LookupNode(hash)
if ok {
lowNode = node
nextBlockLocatorIndex = int64(i) - 1
break

View File

@ -2,6 +2,7 @@ package blockdag
import (
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/bigintpool"
"github.com/pkg/errors"
"math"
"math/big"
@ -53,13 +54,19 @@ func (window blockWindow) minMaxTimestamps() (min, max int64) {
return
}
func (window blockWindow) averageTarget() *big.Int {
averageTarget := big.NewInt(0)
func (window blockWindow) averageTarget(averageTarget *big.Int) {
averageTarget.SetInt64(0)
target := bigintpool.Acquire(0)
defer bigintpool.Release(target)
for _, node := range window {
target := util.CompactToBig(node.bits)
util.CompactToBigWithDestination(node.bits, target)
averageTarget.Add(averageTarget, target)
}
return averageTarget.Div(averageTarget, big.NewInt(int64(len(window))))
windowLen := bigintpool.Acquire(int64(len(window)))
defer bigintpool.Release(windowLen)
averageTarget.Div(averageTarget, windowLen)
}
func (window blockWindow) medianTimestamp() (int64, error) {

View File

@ -53,12 +53,12 @@ func TestBlueBlockWindow(t *testing.T) {
{
parents: []string{"C", "D"},
id: "E",
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"C", "D"},
id: "F",
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"A"},
@ -73,37 +73,37 @@ func TestBlueBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindowWithGenesisPadding: []string{"F", "C", "D", "B", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"I"},
id: "J",
expectedWindowWithGenesisPadding: []string{"I", "F", "C", "D", "B", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"},
},
{
parents: []string{"J"},
id: "K",
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "C", "D", "B", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"},
},
{
parents: []string{"K"},
id: "L",
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "C", "D", "B", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"},
},
{
parents: []string{"L"},
id: "M",
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "C", "D", "B", "A", "A"},
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"},
},
{
parents: []string{"M"},
id: "N",
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "C", "D", "B", "A"},
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"},
},
{
parents: []string{"N"},
id: "O",
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "B"},
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"},
},
}
@ -133,7 +133,10 @@ func TestBlueBlockWindow(t *testing.T) {
t.Fatalf("block %v was unexpectedly orphan", blockData.id)
}
node := dag.index.LookupNode(utilBlock.Hash())
node, ok := dag.index.LookupNode(utilBlock.Hash())
if !ok {
t.Fatalf("block %s does not exist in the DAG", utilBlock.Hash())
}
blockByIDMap[blockData.id] = node
idByBlockMap[node] = blockData.id

View File

@ -5,15 +5,14 @@ import (
"bytes"
"encoding/binary"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/coinbasepayload"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/util/txsort"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"io"
)
// compactFeeData is a specialized data type to store a compact list of fees
@ -98,7 +97,10 @@ func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Bl
return nil
}
blockCoinbaseTx := block.CoinbaseTransaction().MsgTx()
scriptPubKey, extraData, err := DeserializeCoinbasePayload(blockCoinbaseTx)
_, scriptPubKey, extraData, err := coinbasepayload.DeserializeCoinbasePayload(blockCoinbaseTx)
if errors.Is(err, coinbasepayload.ErrIncorrectScriptPubKeyLen) {
return ruleError(ErrBadCoinbaseTransaction, err.Error())
}
if err != nil {
return err
}
@ -125,16 +127,15 @@ func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceD
txOuts := []*wire.TxOut{}
for _, blue := range node.blues {
txIn, txOut, err := coinbaseInputAndOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
txOut, err := coinbaseOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
if err != nil {
return nil, err
}
txIns = append(txIns, txIn)
if txOut != nil {
txOuts = append(txOuts, txOut)
}
}
payload, err := SerializeCoinbasePayload(scriptPubKey, extraData)
payload, err := coinbasepayload.SerializeCoinbasePayload(node.blueScore, scriptPubKey, extraData)
if err != nil {
return nil, err
}
@ -143,83 +144,33 @@ func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceD
return util.NewTx(sortedCoinbaseTx), nil
}
// SerializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data.
func SerializeCoinbasePayload(scriptPubKey []byte, extraData []byte) ([]byte, error) {
w := &bytes.Buffer{}
err := wire.WriteVarInt(w, uint64(len(scriptPubKey)))
if err != nil {
return nil, err
}
_, err = w.Write(scriptPubKey)
if err != nil {
return nil, err
}
_, err = w.Write(extraData)
if err != nil {
return nil, err
}
return w.Bytes(), nil
}
// DeserializeCoinbasePayload deserialize the coinbase payload to its component (scriptPubKey and extra data).
func DeserializeCoinbasePayload(tx *wire.MsgTx) (scriptPubKey []byte, extraData []byte, err error) {
r := bytes.NewReader(tx.Payload)
scriptPubKeyLen, err := wire.ReadVarInt(r)
if err != nil {
return nil, nil, err
}
scriptPubKey = make([]byte, scriptPubKeyLen)
_, err = r.Read(scriptPubKey)
if err != nil {
return nil, nil, err
}
extraData = make([]byte, r.Len())
if r.Len() != 0 {
_, err = r.Read(extraData)
if err != nil {
return nil, nil, err
}
}
return scriptPubKey, extraData, nil
}
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the coinbase transaction of blueBlock
// If blueBlock gets no fee - returns only txIn and nil for txOut
func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
*wire.TxIn, *wire.TxOut, error) {
// coinbaseOutputForBlueBlock calculates the output that should go into the coinbase transaction of blueBlock
// If blueBlock gets no fee - returns nil for txOut
func coinbaseOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (*wire.TxOut, error) {
blockTxsAcceptanceData, ok := txsAcceptanceData.FindAcceptanceData(blueBlock.hash)
if !ok {
return nil, nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
return nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
}
blockFeeData, ok := feeData[*blueBlock.hash]
if !ok {
return nil, nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
return nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
}
if len(blockTxsAcceptanceData.TxAcceptanceData) != blockFeeData.Len() {
return nil, nil, errors.Errorf(
return nil, errors.Errorf(
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
len(blockTxsAcceptanceData.TxAcceptanceData), blockFeeData.Len(), blueBlock.hash)
}
txIn := &wire.TxIn{
SignatureScript: []byte{},
PreviousOutpoint: wire.Outpoint{
TxID: daghash.TxID(*blueBlock.hash),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
}
totalFees := uint64(0)
feeIterator := blockFeeData.iterator()
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
fee, err := feeIterator.next()
if err != nil {
return nil, nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
return nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
}
if txAcceptanceData.IsAccepted {
totalFees += fee
@ -229,13 +180,13 @@ func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.dagParams) + totalFees
if totalReward == 0 {
return txIn, nil, nil
return nil, nil
}
// the ScriptPubKey for the coinbase is parsed from the coinbase payload
scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx())
_, scriptPubKey, _, err := coinbasepayload.DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx())
if err != nil {
return nil, nil, err
return nil, err
}
txOut := &wire.TxOut{
@ -243,5 +194,5 @@ func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
ScriptPubKey: scriptPubKey,
}
return txIn, txOut, nil
return txOut, nil
}

View File

@ -7,15 +7,15 @@ package blockdag
import (
"compress/bzip2"
"encoding/binary"
"github.com/pkg/errors"
"io"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
@ -144,61 +144,42 @@ func addNodeAsChildToParents(node *blockNode) {
// same type (either both nil or both of type RuleError) and their error codes
// match when not nil.
func checkRuleError(gotErr, wantErr error) error {
// Ensure the error code is of the expected type and the error
// code matches the value specified in the test instance.
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
return errors.Errorf("wrong error - got %T (%[1]v), want %T",
gotErr, wantErr)
}
if gotErr == nil {
if wantErr == nil && gotErr == nil {
return nil
}
// Ensure the want error type is a script error.
werr, ok := wantErr.(RuleError)
if !ok {
return errors.Errorf("unexpected test error type %T", wantErr)
var gotRuleErr RuleError
if ok := errors.As(gotErr, &gotRuleErr); !ok {
return errors.Errorf("gotErr expected to be RuleError, but got %+v instead", gotErr)
}
var wantRuleErr RuleError
if ok := errors.As(wantErr, &wantRuleErr); !ok {
return errors.Errorf("wantErr expected to be RuleError, but got %+v instead", wantErr)
}
// Ensure the error codes match. It's safe to use a raw type assert
// here since the code above already proved they are the same type and
// the want error is a script error.
gotErrorCode := gotErr.(RuleError).ErrorCode
if gotErrorCode != werr.ErrorCode {
if gotRuleErr.ErrorCode != wantRuleErr.ErrorCode {
return errors.Errorf("mismatched error code - got %v (%v), want %v",
gotErrorCode, gotErr, werr.ErrorCode)
gotRuleErr.ErrorCode, gotErr, wantRuleErr.ErrorCode)
}
return nil
}
func prepareAndProcessBlock(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock {
func prepareAndProcessBlockByParentMsgBlocks(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock {
parentHashes := make([]*daghash.Hash, len(parents))
for i, parent := range parents {
parentHashes[i] = parent.BlockHash()
}
daghash.Sort(parentHashes)
block, err := PrepareBlockForTest(dag, parentHashes, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
utilBlock := util.NewBlock(block)
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("unexpected error in ProcessBlock: %s", err)
}
if isDelayed {
t.Fatalf("block is too far in the future")
}
if isOrphan {
t.Fatalf("block was unexpectedly orphan")
}
return block
return PrepareAndProcessBlockForTest(t, dag, parentHashes, nil)
}
func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNode {
node := dag.index.LookupNode(block.BlockHash())
if node == nil {
node, ok := dag.index.LookupNode(block.BlockHash())
if !ok {
t.Fatalf("couldn't find block node with hash %s", block.BlockHash())
}
return node

View File

@ -6,12 +6,13 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dbaccess"
"math"
"sort"
"sync"
"time"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util/subnetworkid"
@ -150,9 +151,13 @@ type BlockDAG struct {
lastFinalityPoint *blockNode
utxoDiffStore *utxoDiffStore
reachabilityStore *reachabilityStore
multisetStore *multisetStore
utxoDiffStore *utxoDiffStore
multisetStore *multisetStore
reachabilityTree *reachabilityTree
recentBlockProcessingTimestamps []time.Time
startTime time.Time
}
// IsKnownBlock returns whether or not the DAG instance has the block represented
@ -161,7 +166,7 @@ type BlockDAG struct {
//
// This function is safe for concurrent access.
func (dag *BlockDAG) IsKnownBlock(hash *daghash.Hash) bool {
return dag.IsInDAG(hash) || dag.IsKnownOrphan(hash) || dag.isKnownDelayedBlock(hash)
return dag.IsInDAG(hash) || dag.IsKnownOrphan(hash) || dag.isKnownDelayedBlock(hash) || dag.IsKnownInvalid(hash)
}
// AreKnownBlocks returns whether or not the DAG instances has all blocks represented
@ -205,8 +210,8 @@ func (dag *BlockDAG) IsKnownOrphan(hash *daghash.Hash) bool {
//
// This function is safe for concurrent access.
func (dag *BlockDAG) IsKnownInvalid(hash *daghash.Hash) bool {
node := dag.index.LookupNode(hash)
if node == nil {
node, ok := dag.index.LookupNode(hash)
if !ok {
return false
}
return dag.index.NodeStatus(node).KnownInvalid()
@ -547,8 +552,8 @@ func (node *blockNode) validateAcceptedIDMerkleRoot(dag *BlockDAG, txsAcceptance
func (dag *BlockDAG) connectBlock(node *blockNode,
block *util.Block, selectedParentAnticone []*blockNode, fastAdd bool) (*chainUpdates, error) {
// No warnings about unknown rules or versions until the DAG is
// current.
if dag.isCurrent() {
// synced.
if dag.isSynced() {
// Warn if any unknown new rules are either about to activate or
// have already been activated.
if err := dag.warnUnknownRuleActivations(node); err != nil {
@ -562,7 +567,7 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
}
}
if err := dag.checkFinalityRules(node); err != nil {
if err := dag.checkFinalityViolation(node); err != nil {
return nil, err
}
@ -570,12 +575,9 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
return nil, err
}
newBlockUTXO, txsAcceptanceData, newBlockFeeData, newBlockMultiSet, err := node.verifyAndBuildUTXO(dag, block.Transactions(), fastAdd)
newBlockPastUTXO, txsAcceptanceData, newBlockFeeData, newBlockMultiSet, err :=
node.verifyAndBuildUTXO(dag, block.Transactions(), fastAdd)
if err != nil {
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); ok {
return nil, ruleError(ruleErr.ErrorCode, fmt.Sprintf("error verifying UTXO for %s: %s", node, err))
}
return nil, errors.Wrapf(err, "error verifying UTXO for %s", node)
}
@ -585,7 +587,8 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
}
// Apply all changes to the DAG.
virtualUTXODiff, chainUpdates, err := dag.applyDAGChanges(node, newBlockUTXO, newBlockMultiSet, selectedParentAnticone)
virtualUTXODiff, chainUpdates, err :=
dag.applyDAGChanges(node, newBlockPastUTXO, newBlockMultiSet, selectedParentAnticone)
if err != nil {
// Since all validation logic has already ran, if applyDAGChanges errors out,
// this means we have a problem in the internal structure of the DAG - a problem which is
@ -602,29 +605,43 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
return chainUpdates, nil
}
// calcMultiset returns the multiset of the UTXO of the given block with the given transactions.
func (node *blockNode) calcMultiset(dag *BlockDAG, transactions []*util.Tx, acceptanceData MultiBlockTxsAcceptanceData, selectedParentUTXO, pastUTXO UTXOSet) (*secp256k1.MultiSet, error) {
ms, err := node.pastUTXOMultiSet(dag, acceptanceData, selectedParentUTXO)
// calcMultiset returns the multiset of the past UTXO of the given block.
func (node *blockNode) calcMultiset(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData,
selectedParentPastUTXO UTXOSet) (*secp256k1.MultiSet, error) {
return node.pastUTXOMultiSet(dag, acceptanceData, selectedParentPastUTXO)
}
func (node *blockNode) pastUTXOMultiSet(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData,
selectedParentPastUTXO UTXOSet) (*secp256k1.MultiSet, error) {
ms, err := node.selectedParentMultiset(dag)
if err != nil {
return nil, err
}
for _, tx := range transactions {
ms, err = addTxToMultiset(ms, tx.MsgTx(), pastUTXO, UnacceptedBlueScore)
if err != nil {
return nil, err
for _, blockAcceptanceData := range acceptanceData {
for _, txAcceptanceData := range blockAcceptanceData.TxAcceptanceData {
if !txAcceptanceData.IsAccepted {
continue
}
tx := txAcceptanceData.Tx.MsgTx()
var err error
ms, err = addTxToMultiset(ms, tx, selectedParentPastUTXO, node.blueScore)
if err != nil {
return nil, err
}
}
}
return ms, nil
}
// acceptedSelectedParentMultiset takes the multiset of the selected
// parent, replaces all the selected parent outputs' blue score with
// the block blue score and returns the result.
func (node *blockNode) acceptedSelectedParentMultiset(dag *BlockDAG,
acceptanceData MultiBlockTxsAcceptanceData) (*secp256k1.MultiSet, error) {
// selectedParentMultiset returns the multiset of the node's selected
// parent. If the node is the genesis blockNode then it does not have
// a selected parent, in which case return a new, empty multiset.
func (node *blockNode) selectedParentMultiset(dag *BlockDAG) (*secp256k1.MultiSet, error) {
if node.isGenesis() {
return secp256k1.NewMultiset(), nil
}
@ -634,81 +651,24 @@ func (node *blockNode) acceptedSelectedParentMultiset(dag *BlockDAG,
return nil, err
}
selectedParentAcceptanceData, exists := acceptanceData.FindAcceptanceData(node.selectedParent.hash)
if !exists {
return nil, errors.Errorf("couldn't find selected parent acceptance data for block %s", node)
}
for _, txAcceptanceData := range selectedParentAcceptanceData.TxAcceptanceData {
tx := txAcceptanceData.Tx
msgTx := tx.MsgTx()
isCoinbase := tx.IsCoinBase()
for i, txOut := range msgTx.TxOut {
outpoint := *wire.NewOutpoint(tx.ID(), uint32(i))
unacceptedEntry := NewUTXOEntry(txOut, isCoinbase, UnacceptedBlueScore)
acceptedEntry := NewUTXOEntry(txOut, isCoinbase, node.blueScore)
var err error
ms, err = removeUTXOFromMultiset(ms, unacceptedEntry, &outpoint)
if err != nil {
return nil, err
}
ms, err = addUTXOToMultiset(ms, acceptedEntry, &outpoint)
if err != nil {
return nil, err
}
}
}
return ms, nil
}
func (node *blockNode) pastUTXOMultiSet(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData, selectedParentUTXO UTXOSet) (*secp256k1.MultiSet, error) {
ms, err := node.acceptedSelectedParentMultiset(dag, acceptanceData)
if err != nil {
return nil, err
}
for _, blockAcceptanceData := range acceptanceData {
if blockAcceptanceData.BlockHash.IsEqual(node.selectedParent.hash) {
continue
}
for _, txAcceptanceData := range blockAcceptanceData.TxAcceptanceData {
if !txAcceptanceData.IsAccepted {
continue
}
tx := txAcceptanceData.Tx.MsgTx()
var err error
ms, err = addTxToMultiset(ms, tx, selectedParentUTXO, node.blueScore)
if err != nil {
return nil, err
}
}
}
return ms, nil
}
func addTxToMultiset(ms *secp256k1.MultiSet, tx *wire.MsgTx, pastUTXO UTXOSet, blockBlueScore uint64) (*secp256k1.MultiSet, error) {
isCoinbase := tx.IsCoinBase()
if !isCoinbase {
for _, txIn := range tx.TxIn {
entry, ok := pastUTXO.Get(txIn.PreviousOutpoint)
if !ok {
return nil, errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
}
for _, txIn := range tx.TxIn {
entry, ok := pastUTXO.Get(txIn.PreviousOutpoint)
if !ok {
return nil, errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
}
var err error
ms, err = removeUTXOFromMultiset(ms, entry, &txIn.PreviousOutpoint)
if err != nil {
return nil, err
}
var err error
ms, err = removeUTXOFromMultiset(ms, entry, &txIn.PreviousOutpoint)
if err != nil {
return nil, err
}
}
isCoinbase := tx.IsCoinBase()
for i, txOut := range tx.TxOut {
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
entry := NewUTXOEntry(txOut, isCoinbase, blockBlueScore)
@ -741,7 +701,7 @@ func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UT
return err
}
err = dag.reachabilityStore.flushToDB(dbTx)
err = dag.reachabilityTree.storeState(dbTx)
if err != nil {
return err
}
@ -800,7 +760,8 @@ func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UT
dag.index.clearDirtyEntries()
dag.utxoDiffStore.clearDirtyEntries()
dag.reachabilityStore.clearDirtyEntries()
dag.utxoDiffStore.clearOldEntries()
dag.reachabilityTree.store.clearDirtyEntries()
dag.multisetStore.clearNewEntries()
return nil
@ -856,20 +817,40 @@ func (dag *BlockDAG) LastFinalityPointHash() *daghash.Hash {
return dag.lastFinalityPoint.hash
}
// checkFinalityRules checks the new block does not violate the finality rules
// specifically - the new block selectedParent chain should contain the old finality point
func (dag *BlockDAG) checkFinalityRules(newNode *blockNode) error {
// isInSelectedParentChainOf returns whether `node` is in the selected parent chain of `other`.
func (dag *BlockDAG) isInSelectedParentChainOf(node *blockNode, other *blockNode) (bool, error) {
// By definition, a node is not in the selected parent chain of itself.
if node == other {
return false, nil
}
return dag.reachabilityTree.isReachabilityTreeAncestorOf(node, other)
}
// checkFinalityViolation checks the new block does not violate the finality rules
// specifically - the new block selectedParent chain should contain the old finality point.
func (dag *BlockDAG) checkFinalityViolation(newNode *blockNode) error {
// the genesis block can not violate finality rules
if newNode.isGenesis() {
return nil
}
for currentNode := newNode; currentNode != dag.lastFinalityPoint; currentNode = currentNode.selectedParent {
// If we went past dag's last finality point without encountering it -
// the new block has violated finality.
if currentNode.blueScore <= dag.lastFinalityPoint.blueScore {
return ruleError(ErrFinality, "The last finality point is not in the selected chain of this block")
}
// Because newNode doesn't have reachability data we
// need to check if the last finality point is in the
// selected parent chain of newNode.selectedParent, so
// we explicitly check if newNode.selectedParent is
// the finality point.
if dag.lastFinalityPoint == newNode.selectedParent {
return nil
}
isInSelectedChain, err := dag.isInSelectedParentChainOf(dag.lastFinalityPoint, newNode.selectedParent)
if err != nil {
return err
}
if !isInSelectedChain {
return ruleError(ErrFinality, "the last finality point is not in the selected parent chain of this block")
}
return nil
}
@ -906,9 +887,9 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
for parent := range dag.lastFinalityPoint.parents {
queue = append(queue, parent)
}
var blockHashesToDelete []*daghash.Hash
var nodesToDelete []*blockNode
if deleteDiffData {
blockHashesToDelete = make([]*daghash.Hash, 0, dag.dagParams.FinalityInterval)
nodesToDelete = make([]*blockNode, 0, dag.dagParams.FinalityInterval)
}
for len(queue) > 0 {
var current *blockNode
@ -916,7 +897,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
if !current.isFinalized {
current.isFinalized = true
if deleteDiffData {
blockHashesToDelete = append(blockHashesToDelete, current.hash)
nodesToDelete = append(nodesToDelete, current)
}
for parent := range current.parents {
queue = append(queue, parent)
@ -924,7 +905,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
}
}
if deleteDiffData {
err := dag.utxoDiffStore.removeBlocksDiffData(dbaccess.NoTx(), blockHashesToDelete)
err := dag.utxoDiffStore.removeBlocksDiffData(dbaccess.NoTx(), nodesToDelete)
if err != nil {
panic(fmt.Sprintf("Error removing diff data from utxoDiffStore: %s", err))
}
@ -934,10 +915,10 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
// IsKnownFinalizedBlock returns whether the block is below the finality point.
// IsKnownFinalizedBlock might be false-negative because node finality status is
// updated in a separate goroutine. To get a definite answer if a block
// is finalized or not, use dag.checkFinalityRules.
// is finalized or not, use dag.checkFinalityViolation.
func (dag *BlockDAG) IsKnownFinalizedBlock(blockHash *daghash.Hash) bool {
node := dag.index.LookupNode(blockHash)
return node != nil && node.isFinalized
node, ok := dag.index.LookupNode(blockHash)
return ok && node.isFinalized
}
// NextBlockCoinbaseTransaction prepares the coinbase transaction for the next mined block
@ -985,8 +966,8 @@ func (dag *BlockDAG) TxsAcceptedByVirtual() (MultiBlockTxsAcceptanceData, error)
//
// This function MUST be called with the DAG read-lock held
func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlockTxsAcceptanceData, error) {
node := dag.index.LookupNode(blockHash)
if node == nil {
node, ok := dag.index.LookupNode(blockHash)
if !ok {
return nil, errors.Errorf("Couldn't find block %s", blockHash)
}
_, _, txsAcceptanceData, err := dag.pastUTXO(node)
@ -1006,18 +987,19 @@ func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlock
// It returns the diff in the virtual block's UTXO set.
//
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, newBlockMultiset *secp256k1.MultiSet, selectedParentAnticone []*blockNode) (
func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockPastUTXO UTXOSet,
newBlockMultiset *secp256k1.MultiSet, selectedParentAnticone []*blockNode) (
virtualUTXODiff *UTXODiff, chainUpdates *chainUpdates, err error) {
// Add the block to the reachability structures
err = dag.updateReachability(node, selectedParentAnticone)
// Add the block to the reachability tree
err = dag.reachabilityTree.addBlock(node, selectedParentAnticone)
if err != nil {
return nil, nil, errors.Wrap(err, "failed updating reachability")
return nil, nil, errors.Wrap(err, "failed adding block to the reachability tree")
}
dag.multisetStore.setMultiset(node, newBlockMultiset)
if err = node.updateParents(dag, newBlockUTXO); err != nil {
if err = node.updateParents(dag, newBlockPastUTXO); err != nil {
return nil, nil, errors.Wrapf(err, "failed updating parents of %s", node)
}
@ -1058,21 +1040,23 @@ func (dag *BlockDAG) meldVirtualUTXO(newVirtualUTXODiffSet *DiffUTXOSet) error {
return newVirtualUTXODiffSet.meldToBase()
}
func (node *blockNode) diffFromTxs(pastUTXO UTXOSet, transactions []*util.Tx) (*UTXODiff, error) {
diff := NewUTXODiff()
for _, tx := range transactions {
txDiff, err := pastUTXO.diffFromTx(tx.MsgTx(), UnacceptedBlueScore)
if err != nil {
return nil, err
// checkDoubleSpendsWithBlockPast checks that each block transaction
// has a corresponding UTXO in the block pastUTXO.
func checkDoubleSpendsWithBlockPast(pastUTXO UTXOSet, blockTransactions []*util.Tx) error {
for _, tx := range blockTransactions {
if tx.IsCoinBase() {
continue
}
diff, err = diff.WithDiff(txDiff)
if err != nil {
return nil, err
for _, txIn := range tx.MsgTx().TxIn {
if _, ok := pastUTXO.Get(txIn.PreviousOutpoint); !ok {
return ruleError(ErrMissingTxOut, fmt.Sprintf("missing transaction "+
"output %s in the utxo set", txIn.PreviousOutpoint))
}
}
}
return diff, nil
return nil
}
// verifyAndBuildUTXO verifies all transactions in the given block and builds its UTXO
@ -1081,7 +1065,7 @@ func (node *blockNode) diffFromTxs(pastUTXO UTXOSet, transactions []*util.Tx) (*
func (node *blockNode) verifyAndBuildUTXO(dag *BlockDAG, transactions []*util.Tx, fastAdd bool) (
newBlockUTXO UTXOSet, txsAcceptanceData MultiBlockTxsAcceptanceData, newBlockFeeData compactFeeData, multiset *secp256k1.MultiSet, err error) {
pastUTXO, selectedParentUTXO, txsAcceptanceData, err := dag.pastUTXO(node)
pastUTXO, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(node)
if err != nil {
return nil, nil, nil, nil, err
}
@ -1096,16 +1080,7 @@ func (node *blockNode) verifyAndBuildUTXO(dag *BlockDAG, transactions []*util.Tx
return nil, nil, nil, nil, err
}
diffFromTxs, err := node.diffFromTxs(pastUTXO, transactions)
if err != nil {
return nil, nil, nil, nil, err
}
utxo, err := pastUTXO.WithDiff(diffFromTxs)
if err != nil {
return nil, nil, nil, nil, err
}
multiset, err = node.calcMultiset(dag, transactions, txsAcceptanceData, selectedParentUTXO, pastUTXO)
multiset, err = node.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO)
if err != nil {
return nil, nil, nil, nil, err
}
@ -1118,7 +1093,7 @@ func (node *blockNode) verifyAndBuildUTXO(dag *BlockDAG, transactions []*util.Tx
return nil, nil, nil, nil, ruleError(ErrBadUTXOCommitment, str)
}
return utxo, txsAcceptanceData, feeData, multiset, nil
return pastUTXO, txsAcceptanceData, feeData, multiset, nil
}
// TxAcceptanceData stores a transaction together with an indication
@ -1174,34 +1149,33 @@ func (node *blockNode) fetchBlueBlocks() ([]*util.Block, error) {
return blueBlocks, nil
}
// applyBlueBlocks adds all transactions in the blue blocks to the selectedParent's UTXO set
// applyBlueBlocks adds all transactions in the blue blocks to the selectedParent's past UTXO set
// Purposefully ignoring failures - these are just unaccepted transactions
// Writing down which transactions were accepted or not in txsAcceptanceData
func (node *blockNode) applyBlueBlocks(acceptedSelectedParentUTXO UTXOSet, selectedParentAcceptanceData []TxAcceptanceData, blueBlocks []*util.Block) (
func (node *blockNode) applyBlueBlocks(selectedParentPastUTXO UTXOSet, blueBlocks []*util.Block) (
pastUTXO UTXOSet, multiBlockTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
pastUTXO = acceptedSelectedParentUTXO
multiBlockTxsAcceptanceData = MultiBlockTxsAcceptanceData{BlockTxsAcceptanceData{
BlockHash: *node.selectedParent.hash,
TxAcceptanceData: selectedParentAcceptanceData,
}}
pastUTXO = selectedParentPastUTXO.(*DiffUTXOSet).cloneWithoutBase()
multiBlockTxsAcceptanceData = make(MultiBlockTxsAcceptanceData, len(blueBlocks))
// Add blueBlocks to multiBlockTxsAcceptanceData in topological order. This
// is so that anyone who iterates over it would process blocks (and transactions)
// in their order of appearance in the DAG.
// We skip the selected parent, because we calculated its UTXO in acceptSelectedParentTransactions.
for i := 1; i < len(blueBlocks); i++ {
for i := 0; i < len(blueBlocks); i++ {
blueBlock := blueBlocks[i]
transactions := blueBlock.Transactions()
blockTxsAcceptanceData := BlockTxsAcceptanceData{
BlockHash: *blueBlock.Hash(),
TxAcceptanceData: make([]TxAcceptanceData, len(transactions)),
}
for i, tx := range blueBlock.Transactions() {
isSelectedParent := i == 0
for j, tx := range blueBlock.Transactions() {
var isAccepted bool
// Coinbase transaction outputs are added to the UTXO
// only if they are in the selected parent chain.
if tx.IsCoinBase() {
if !isSelectedParent && tx.IsCoinBase() {
isAccepted = false
} else {
isAccepted, err = pastUTXO.AddTx(tx.MsgTx(), node.blueScore)
@ -1209,9 +1183,9 @@ func (node *blockNode) applyBlueBlocks(acceptedSelectedParentUTXO UTXOSet, selec
return nil, nil, err
}
}
blockTxsAcceptanceData.TxAcceptanceData[i] = TxAcceptanceData{Tx: tx, IsAccepted: isAccepted}
blockTxsAcceptanceData.TxAcceptanceData[j] = TxAcceptanceData{Tx: tx, IsAccepted: isAccepted}
}
multiBlockTxsAcceptanceData = append(multiBlockTxsAcceptanceData, blockTxsAcceptanceData)
multiBlockTxsAcceptanceData[i] = blockTxsAcceptanceData
}
return pastUTXO, multiBlockTxsAcceptanceData, nil
@ -1242,7 +1216,7 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e
return err
}
if diffChild == nil {
parentUTXO, err := dag.restoreUTXO(parent)
parentPastUTXO, err := dag.restorePastUTXO(parent)
if err != nil {
return err
}
@ -1250,7 +1224,7 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e
if err != nil {
return err
}
diff, err := newBlockUTXO.diffFrom(parentUTXO)
diff, err := newBlockUTXO.diffFrom(parentPastUTXO)
if err != nil {
return err
}
@ -1268,12 +1242,13 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e
// To save traversals over the blue blocks, it also returns the transaction acceptance data for
// all blue blocks
func (dag *BlockDAG) pastUTXO(node *blockNode) (
pastUTXO, selectedParentUTXO UTXOSet, bluesTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
pastUTXO, selectedParentPastUTXO UTXOSet, bluesTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
if node.isGenesis() {
return genesisPastUTXO(dag.virtual), NewFullUTXOSet(), MultiBlockTxsAcceptanceData{}, nil
return genesisPastUTXO(dag.virtual), nil, MultiBlockTxsAcceptanceData{}, nil
}
selectedParentUTXO, err = dag.restoreUTXO(node.selectedParent)
selectedParentPastUTXO, err = dag.restorePastUTXO(node.selectedParent)
if err != nil {
return nil, nil, nil, err
}
@ -1283,46 +1258,16 @@ func (dag *BlockDAG) pastUTXO(node *blockNode) (
return nil, nil, nil, err
}
selectedParent := blueBlocks[0]
acceptedSelectedParentUTXO, selectedParentAcceptanceData, err := node.acceptSelectedParentTransactions(selectedParent, selectedParentUTXO)
pastUTXO, bluesTxsAcceptanceData, err = node.applyBlueBlocks(selectedParentPastUTXO, blueBlocks)
if err != nil {
return nil, nil, nil, err
}
pastUTXO, bluesTxsAcceptanceData, err = node.applyBlueBlocks(acceptedSelectedParentUTXO, selectedParentAcceptanceData, blueBlocks)
if err != nil {
return nil, nil, nil, err
}
return pastUTXO, selectedParentUTXO, bluesTxsAcceptanceData, nil
return pastUTXO, selectedParentPastUTXO, bluesTxsAcceptanceData, nil
}
func (node *blockNode) acceptSelectedParentTransactions(selectedParent *util.Block, selectedParentUTXO UTXOSet) (acceptedSelectedParentUTXO UTXOSet, txAcceptanceData []TxAcceptanceData, err error) {
diff := NewUTXODiff()
txAcceptanceData = make([]TxAcceptanceData, len(selectedParent.Transactions()))
for i, tx := range selectedParent.Transactions() {
txAcceptanceData[i] = TxAcceptanceData{
Tx: tx,
IsAccepted: true,
}
acceptanceDiff, err := selectedParentUTXO.diffFromAcceptedTx(tx.MsgTx(), node.blueScore)
if err != nil {
return nil, nil, err
}
diff, err = diff.WithDiff(acceptanceDiff)
if err != nil {
return nil, nil, err
}
}
acceptedSelectedParentUTXO, err = selectedParentUTXO.WithDiff(diff)
if err != nil {
return nil, nil, err
}
return acceptedSelectedParentUTXO, txAcceptanceData, nil
}
// restoreUTXO restores the UTXO of a given block from its diff
func (dag *BlockDAG) restoreUTXO(node *blockNode) (UTXOSet, error) {
// restorePastUTXO restores the UTXO of a given block from its diff
func (dag *BlockDAG) restorePastUTXO(node *blockNode) (UTXOSet, error) {
stack := []*blockNode{}
// Iterate over the chain of diff-childs from node till virtual and add them
@ -1363,11 +1308,11 @@ func (dag *BlockDAG) restoreUTXO(node *blockNode) (UTXOSet, error) {
// updateTipsUTXO builds and applies new diff UTXOs for all the DAG's tips
func updateTipsUTXO(dag *BlockDAG, virtualUTXO UTXOSet) error {
for tip := range dag.virtual.parents {
tipUTXO, err := dag.restoreUTXO(tip)
tipPastUTXO, err := dag.restorePastUTXO(tip)
if err != nil {
return err
}
diff, err := virtualUTXO.diffFrom(tipUTXO)
diff, err := virtualUTXO.diffFrom(tipPastUTXO)
if err != nil {
return err
}
@ -1380,18 +1325,18 @@ func updateTipsUTXO(dag *BlockDAG, virtualUTXO UTXOSet) error {
return nil
}
// isCurrent returns whether or not the DAG believes it is current. Several
// isSynced returns whether or not the DAG believes it is synced. Several
// factors are used to guess, but the key factors that allow the DAG to
// believe it is current are:
// believe it is synced are:
// - Latest block has a timestamp newer than 24 hours ago
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) isCurrent() bool {
// Not current if the virtual's selected parent has a timestamp
func (dag *BlockDAG) isSynced() bool {
// Not synced if the virtual's selected parent has a timestamp
// before 24 hours ago. If the DAG is empty, we take the genesis
// block timestamp.
//
// The DAG appears to be current if none of the checks reported
// The DAG appears to be syncned if none of the checks reported
// otherwise.
var dagTimestamp int64
selectedTip := dag.selectedTip()
@ -1411,17 +1356,17 @@ func (dag *BlockDAG) Now() time.Time {
return dag.timeSource.Now()
}
// IsCurrent returns whether or not the DAG believes it is current. Several
// IsSynced returns whether or not the DAG believes it is synced. Several
// factors are used to guess, but the key factors that allow the DAG to
// believe it is current are:
// believe it is synced are:
// - Latest block has a timestamp newer than 24 hours ago
//
// This function is safe for concurrent access.
func (dag *BlockDAG) IsCurrent() bool {
func (dag *BlockDAG) IsSynced() bool {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
return dag.isCurrent()
return dag.isSynced()
}
// selectedTip returns the current selected tip for the DAG.
@ -1477,14 +1422,29 @@ func (dag *BlockDAG) GetUTXOEntry(outpoint wire.Outpoint) (*UTXOEntry, bool) {
// BlueScoreByBlockHash returns the blue score of a block with the given hash.
func (dag *BlockDAG) BlueScoreByBlockHash(hash *daghash.Hash) (uint64, error) {
node := dag.index.LookupNode(hash)
if node == nil {
node, ok := dag.index.LookupNode(hash)
if !ok {
return 0, errors.Errorf("block %s is unknown", hash)
}
return node.blueScore, nil
}
// BluesByBlockHash returns the blues of the block for the given hash.
func (dag *BlockDAG) BluesByBlockHash(hash *daghash.Hash) ([]*daghash.Hash, error) {
node, ok := dag.index.LookupNode(hash)
if !ok {
return nil, errors.Errorf("block %s is unknown", hash)
}
hashes := make([]*daghash.Hash, len(node.blues))
for i, blue := range node.blues {
hashes[i] = blue.hash
}
return hashes, nil
}
// BlockConfirmationsByHash returns the confirmations number for a block with the
// given hash. See blockConfirmations for further details.
//
@ -1504,8 +1464,8 @@ func (dag *BlockDAG) BlockConfirmationsByHashNoLock(hash *daghash.Hash) (uint64,
return 0, nil
}
node := dag.index.LookupNode(hash)
if node == nil {
node, ok := dag.index.LookupNode(hash)
if !ok {
return 0, errors.Errorf("block %s is unknown", hash)
}
@ -1620,8 +1580,8 @@ func (dag *BlockDAG) oldestChainBlockWithBlueScoreGreaterThan(blueScore uint64)
//
// This method MUST be called with the DAG lock held
func (dag *BlockDAG) IsInSelectedParentChain(blockHash *daghash.Hash) (bool, error) {
blockNode := dag.index.LookupNode(blockHash)
if blockNode == nil {
blockNode, ok := dag.index.LookupNode(blockHash)
if !ok {
str := fmt.Sprintf("block %s is not in the DAG", blockHash)
return false, errNotInDAG(str)
}
@ -1653,7 +1613,10 @@ func (dag *BlockDAG) SelectedParentChain(blockHash *daghash.Hash) ([]*daghash.Ha
for !isBlockInSelectedParentChain {
removedChainHashes = append(removedChainHashes, blockHash)
node := dag.index.LookupNode(blockHash)
node, ok := dag.index.LookupNode(blockHash)
if !ok {
return nil, nil, errors.Errorf("block %s does not exist in the DAG", blockHash)
}
blockHash = node.selectedParent.hash
isBlockInSelectedParentChain, err = dag.IsInSelectedParentChain(blockHash)
@ -1716,8 +1679,8 @@ func (dag *BlockDAG) CurrentBits() uint32 {
// HeaderByHash returns the block header identified by the given hash or an
// error if it doesn't exist.
func (dag *BlockDAG) HeaderByHash(hash *daghash.Hash) (*wire.BlockHeader, error) {
node := dag.index.LookupNode(hash)
if node == nil {
node, ok := dag.index.LookupNode(hash)
if !ok {
err := errors.Errorf("block %s is not known", hash)
return &wire.BlockHeader{}, err
}
@ -1730,8 +1693,8 @@ func (dag *BlockDAG) HeaderByHash(hash *daghash.Hash) (*wire.BlockHeader, error)
//
// This function is safe for concurrent access.
func (dag *BlockDAG) ChildHashesByHash(hash *daghash.Hash) ([]*daghash.Hash, error) {
node := dag.index.LookupNode(hash)
if node == nil {
node, ok := dag.index.LookupNode(hash)
if !ok {
str := fmt.Sprintf("block %s is not in the DAG", hash)
return nil, errNotInDAG(str)
@ -1745,8 +1708,8 @@ func (dag *BlockDAG) ChildHashesByHash(hash *daghash.Hash) ([]*daghash.Hash, err
//
// This function is safe for concurrent access.
func (dag *BlockDAG) SelectedParentHash(blockHash *daghash.Hash) (*daghash.Hash, error) {
node := dag.index.LookupNode(blockHash)
if node == nil {
node, ok := dag.index.LookupNode(blockHash)
if !ok {
str := fmt.Sprintf("block %s is not in the DAG", blockHash)
return nil, errNotInDAG(str)
@ -1780,12 +1743,12 @@ func (dag *BlockDAG) antiPastHashesBetween(lowHash, highHash *daghash.Hash, maxH
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) antiPastBetween(lowHash, highHash *daghash.Hash, maxEntries uint64) ([]*blockNode, error) {
lowNode := dag.index.LookupNode(lowHash)
if lowNode == nil {
lowNode, ok := dag.index.LookupNode(lowHash)
if !ok {
return nil, errors.Errorf("Couldn't find low hash %s", lowHash)
}
highNode := dag.index.LookupNode(highHash)
if highNode == nil {
highNode, ok := dag.index.LookupNode(highHash)
if !ok {
return nil, errors.Errorf("Couldn't find high hash %s", highHash)
}
if lowNode.blueScore >= highNode.blueScore {
@ -1818,7 +1781,7 @@ func (dag *BlockDAG) antiPastBetween(lowHash, highHash *daghash.Hash, maxEntries
continue
}
visited.add(current)
isCurrentAncestorOfLowNode, err := dag.isAncestorOf(current, lowNode)
isCurrentAncestorOfLowNode, err := dag.isInPast(current, lowNode)
if err != nil {
return nil, err
}
@ -1844,6 +1807,10 @@ func (dag *BlockDAG) antiPastBetween(lowHash, highHash *daghash.Hash, maxEntries
return nodes, nil
}
func (dag *BlockDAG) isInPast(this *blockNode, other *blockNode) (bool, error) {
return dag.reachabilityTree.isInPast(this, other)
}
// AntiPastHashesBetween returns the hashes of the blocks between the
// lowHash's antiPast and highHash's antiPast, or up to the provided
// max number of block hashes.
@ -1880,8 +1847,9 @@ func (dag *BlockDAG) antiPastHeadersBetween(lowHash, highHash *daghash.Hash, max
func (dag *BlockDAG) GetTopHeaders(highHash *daghash.Hash, maxHeaders uint64) ([]*wire.BlockHeader, error) {
highNode := &dag.virtual.blockNode
if highHash != nil {
highNode = dag.index.LookupNode(highHash)
if highNode == nil {
var ok bool
highNode, ok = dag.index.LookupNode(highHash)
if !ok {
return nil, errors.Errorf("Couldn't find the high hash %s in the dag", highHash)
}
}
@ -2063,10 +2031,10 @@ type Config struct {
func New(config *Config) (*BlockDAG, error) {
// Enforce required config fields.
if config.DAGParams == nil {
return nil, AssertError("BlockDAG.New DAG parameters nil")
return nil, errors.New("BlockDAG.New DAG parameters nil")
}
if config.TimeSource == nil {
return nil, AssertError("BlockDAG.New timesource is nil")
return nil, errors.New("BlockDAG.New timesource is nil")
}
params := config.DAGParams
@ -2091,12 +2059,13 @@ func New(config *Config) (*BlockDAG, error) {
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
blockCount: 0,
subnetworkID: config.SubnetworkID,
startTime: time.Now(),
}
dag.virtual = newVirtualBlock(dag, nil)
dag.utxoDiffStore = newUTXODiffStore(dag)
dag.reachabilityStore = newReachabilityStore(dag)
dag.multisetStore = newMultisetStore(dag)
dag.reachabilityTree = newReachabilityTree(dag)
// Initialize the DAG state from the passed database. When the db
// does not yet contain any DAG state, both it and the DAG state
@ -2115,9 +2084,9 @@ func New(config *Config) (*BlockDAG, error) {
}
}
genesis := index.LookupNode(params.GenesisHash)
genesis, ok := index.LookupNode(params.GenesisHash)
if genesis == nil {
if !ok {
genesisBlock := util.NewBlock(dag.dagParams.GenesisBlock)
// To prevent the creation of a new err variable unintentionally so the
// defered function above could read err - declare isOrphan and isDelayed explicitly.
@ -2127,12 +2096,15 @@ func New(config *Config) (*BlockDAG, error) {
return nil, err
}
if isDelayed {
return nil, errors.New("Genesis block shouldn't be in the future")
return nil, errors.New("genesis block shouldn't be in the future")
}
if isOrphan {
return nil, errors.New("Genesis block is unexpectedly orphan")
return nil, errors.New("genesis block is unexpectedly orphan")
}
genesis, ok = index.LookupNode(params.GenesisHash)
if !ok {
return nil, errors.New("genesis is not found in the DAG after it was proccessed")
}
genesis = index.LookupNode(params.GenesisHash)
}
// Save a reference to the genesis block.

View File

@ -6,10 +6,13 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"math"
"os"
"path/filepath"
"reflect"
"testing"
"time"
@ -204,7 +207,7 @@ func TestIsKnownBlock(t *testing.T) {
{hash: dagconfig.SimnetParams.GenesisHash.String(), want: true},
// Block 3b should be present (as a second child of Block 2).
{hash: "216301e3fc03cf89973b9192b4ecdd732bf3b677cf1ca4f6c340a56f1533fb4f", want: true},
{hash: "2eb8903d3eb7f977ab329649f56f4125afa532662f7afe5dba0d4a3f1b93746f", want: true},
// Block 100000 should be present (as an orphan).
{hash: "65b20b048a074793ebfd1196e49341c8d194dabfc6b44a4fd0c607406e122baf", want: true},
@ -618,7 +621,10 @@ func TestAcceptingInInit(t *testing.T) {
testBlock := blocks[1]
// Create a test blockNode with an unvalidated status
genesisNode := dag.index.LookupNode(genesisBlock.Hash())
genesisNode, ok := dag.index.LookupNode(genesisBlock.Hash())
if !ok {
t.Fatalf("genesis block does not exist in the DAG")
}
testNode, _ := dag.newBlockNode(&testBlock.MsgBlock().Header, blockSetFromSlice(genesisNode))
testNode.status = statusDataStored
@ -656,7 +662,11 @@ func TestAcceptingInInit(t *testing.T) {
}
// Make sure that the test node's status is valid
testNode = dag.index.LookupNode(testBlock.Hash())
testNode, ok = dag.index.LookupNode(testBlock.Hash())
if !ok {
t.Fatalf("block %s does not exist in the DAG", testBlock.Hash())
}
if testNode.status&statusValid == 0 {
t.Fatalf("testNode is unexpectedly invalid")
}
@ -688,7 +698,7 @@ func TestConfirmations(t *testing.T) {
chainBlocks := make([]*wire.MsgBlock, 5)
chainBlocks[0] = dag.dagParams.GenesisBlock
for i := uint32(1); i < 5; i++ {
chainBlocks[i] = prepareAndProcessBlock(t, dag, chainBlocks[i-1])
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
}
// Make sure that each one of the chain blocks has the expected confirmations number
@ -707,8 +717,8 @@ func TestConfirmations(t *testing.T) {
branchingBlocks := make([]*wire.MsgBlock, 2)
// Add two branching blocks
branchingBlocks[0] = prepareAndProcessBlock(t, dag, chainBlocks[1])
branchingBlocks[1] = prepareAndProcessBlock(t, dag, branchingBlocks[0])
branchingBlocks[0] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[1])
branchingBlocks[1] = prepareAndProcessBlockByParentMsgBlocks(t, dag, branchingBlocks[0])
// Check that the genesis has a confirmations number == len(chainBlocks)
genesisConfirmations, err = dag.blockConfirmations(dag.genesis)
@ -738,7 +748,7 @@ func TestConfirmations(t *testing.T) {
// Generate 100 blocks to force the "main" chain to become red
branchingChainTip := branchingBlocks[1]
for i := uint32(0); i < 100; i++ {
nextBranchingChainTip := prepareAndProcessBlock(t, dag, branchingChainTip)
nextBranchingChainTip := prepareAndProcessBlockByParentMsgBlocks(t, dag, branchingChainTip)
branchingChainTip = nextBranchingChainTip
}
@ -797,7 +807,7 @@ func TestAcceptingBlock(t *testing.T) {
chainBlocks := make([]*wire.MsgBlock, numChainBlocks)
chainBlocks[0] = dag.dagParams.GenesisBlock
for i := uint32(1); i <= numChainBlocks-1; i++ {
chainBlocks[i] = prepareAndProcessBlock(t, dag, chainBlocks[i-1])
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
}
// Make sure that each chain block (including the genesis) is accepted by its child
@ -825,7 +835,7 @@ func TestAcceptingBlock(t *testing.T) {
// Generate a chain tip that will be in the anticone of the selected tip and
// in dag.virtual.blues.
branchingChainTip := prepareAndProcessBlock(t, dag, chainBlocks[len(chainBlocks)-3])
branchingChainTip := prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[len(chainBlocks)-3])
// Make sure that branchingChainTip is not in the selected parent chain
isBranchingChainTipInSelectedParentChain, err := dag.IsInSelectedParentChain(branchingChainTip.BlockHash())
@ -863,7 +873,7 @@ func TestAcceptingBlock(t *testing.T) {
intersectionBlock := chainBlocks[1]
sideChainTip := intersectionBlock
for i := 0; i < len(chainBlocks)-3; i++ {
sideChainTip = prepareAndProcessBlock(t, dag, sideChainTip)
sideChainTip = prepareAndProcessBlockByParentMsgBlocks(t, dag, sideChainTip)
}
// Make sure that the accepting block of the parent of the branching block didn't change
@ -879,7 +889,7 @@ func TestAcceptingBlock(t *testing.T) {
// Make sure that a block that is found in the red set of the selected tip
// doesn't have an accepting block
prepareAndProcessBlock(t, dag, sideChainTip, chainBlocks[len(chainBlocks)-1])
prepareAndProcessBlockByParentMsgBlocks(t, dag, sideChainTip, chainBlocks[len(chainBlocks)-1])
sideChainTipAcceptingBlock, err := acceptingBlockByMsgBlock(sideChainTip)
if err != nil {
@ -953,6 +963,11 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
// Manually set the last finality point
dag.lastFinalityPoint = nodes[finalityInterval-1]
// Don't unload diffData
currentDifference := maxBlueScoreDifferenceToKeepLoaded
maxBlueScoreDifferenceToKeepLoaded = math.MaxUint64
defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }()
dag.finalizeNodesBelowFinalityPoint(deleteDiffData)
flushUTXODiffStore()
@ -960,7 +975,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
if !node.isFinalized {
t.Errorf("Node with blue score %d expected to be finalized", node.blueScore)
}
if _, ok := dag.utxoDiffStore.loaded[*node.hash]; deleteDiffData && ok {
if _, ok := dag.utxoDiffStore.loaded[node]; deleteDiffData && ok {
t.Errorf("The diff data of node with blue score %d should have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
} else if !deleteDiffData && !ok {
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
@ -988,7 +1003,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
if node.isFinalized {
t.Errorf("Node with blue score %d wasn't expected to be finalized", node.blueScore)
}
if _, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok {
if _, ok := dag.utxoDiffStore.loaded[node]; !ok {
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded", node.blueScore)
}
if diffData, err := dag.utxoDiffStore.diffDataFromDB(node.hash); err != nil {
@ -1037,8 +1052,8 @@ func TestDAGIndexFailedStatus(t *testing.T) {
"is an orphan\n")
}
invalidBlockNode := dag.index.LookupNode(invalidBlock.Hash())
if invalidBlockNode == nil {
invalidBlockNode, ok := dag.index.LookupNode(invalidBlock.Hash())
if !ok {
t.Fatalf("invalidBlockNode wasn't added to the block index as expected")
}
if invalidBlockNode.status&statusValidateFailed != statusValidateFailed {
@ -1066,8 +1081,8 @@ func TestDAGIndexFailedStatus(t *testing.T) {
t.Fatalf("ProcessBlock incorrectly returned invalidBlockChild " +
"is an orphan\n")
}
invalidBlockChildNode := dag.index.LookupNode(invalidBlockChild.Hash())
if invalidBlockChildNode == nil {
invalidBlockChildNode, ok := dag.index.LookupNode(invalidBlockChild.Hash())
if !ok {
t.Fatalf("invalidBlockChild wasn't added to the block index as expected")
}
if invalidBlockChildNode.status&statusInvalidAncestor != statusInvalidAncestor {
@ -1094,8 +1109,8 @@ func TestDAGIndexFailedStatus(t *testing.T) {
t.Fatalf("ProcessBlock incorrectly returned invalidBlockGrandChild " +
"is an orphan\n")
}
invalidBlockGrandChildNode := dag.index.LookupNode(invalidBlockGrandChild.Hash())
if invalidBlockGrandChildNode == nil {
invalidBlockGrandChildNode, ok := dag.index.LookupNode(invalidBlockGrandChild.Hash())
if !ok {
t.Fatalf("invalidBlockGrandChild wasn't added to the block index as expected")
}
if invalidBlockGrandChildNode.status&statusInvalidAncestor != statusInvalidAncestor {
@ -1117,3 +1132,277 @@ func TestIsDAGCurrentMaxDiff(t *testing.T) {
}
}
}
func testProcessBlockRuleError(t *testing.T, dag *BlockDAG, block *wire.MsgBlock, expectedRuleErr error) {
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block), BFNoPoWCheck)
err = checkRuleError(err, expectedRuleErr)
if err != nil {
t.Errorf("checkRuleError: %s", err)
}
if isDelayed {
t.Fatalf("ProcessBlock: block " +
"is too far in the future")
}
if isOrphan {
t.Fatalf("ProcessBlock: block got unexpectedly orphaned")
}
}
func TestDoubleSpends(t *testing.T) {
params := dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestDoubleSpends", true, Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("Failed to setup dag instance: %v", err)
}
defer teardownFunc()
fundingBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{params.GenesisHash}, nil)
cbTx := fundingBlock.Transactions[0]
signatureScript, err := txscript.PayToScriptHashSignatureScript(OpTrueScript, nil)
if err != nil {
t.Fatalf("Failed to build signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
SignatureScript: signatureScript,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
ScriptPubKey: OpTrueScript,
Value: uint64(1),
}
tx1 := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
doubleSpendTxOut := &wire.TxOut{
ScriptPubKey: OpTrueScript,
Value: uint64(2),
}
doubleSpendTx1 := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{doubleSpendTxOut})
blockWithTx1 := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*wire.MsgTx{tx1})
// Check that a block will be rejected if it has a transaction that already exists in its past.
anotherBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add tx1.
anotherBlockWithTx1.Transactions = append(anotherBlockWithTx1.Transactions, tx1)
anotherBlockWithTx1UtilTxs := make([]*util.Tx, len(anotherBlockWithTx1.Transactions))
for i, tx := range anotherBlockWithTx1.Transactions {
anotherBlockWithTx1UtilTxs[i] = util.NewTx(tx)
}
anotherBlockWithTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(anotherBlockWithTx1UtilTxs).Root()
testProcessBlockRuleError(t, dag, anotherBlockWithTx1, ruleError(ErrOverwriteTx, ""))
// Check that a block will be rejected if it has a transaction that double spends
// a transaction from its past.
blockWithDoubleSpendForTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add a transaction that double spends the block past.
blockWithDoubleSpendForTx1.Transactions = append(blockWithDoubleSpendForTx1.Transactions, doubleSpendTx1)
blockWithDoubleSpendForTx1UtilTxs := make([]*util.Tx, len(blockWithDoubleSpendForTx1.Transactions))
for i, tx := range blockWithDoubleSpendForTx1.Transactions {
blockWithDoubleSpendForTx1UtilTxs[i] = util.NewTx(tx)
}
blockWithDoubleSpendForTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendForTx1UtilTxs).Root()
testProcessBlockRuleError(t, dag, blockWithDoubleSpendForTx1, ruleError(ErrMissingTxOut, ""))
blockInAnticoneOfBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*wire.MsgTx{doubleSpendTx1})
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Check that a block will not get rejected if it has a transaction that double spends
// a transaction from its anticone.
testProcessBlockRuleError(t, dag, blockInAnticoneOfBlockWithTx1, nil)
// Check that a block will be rejected if it has two transactions that spend the same UTXO.
blockWithDoubleSpendWithItself, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add tx1 and doubleSpendTx1.
blockWithDoubleSpendWithItself.Transactions = append(blockWithDoubleSpendWithItself.Transactions, tx1, doubleSpendTx1)
blockWithDoubleSpendWithItselfUtilTxs := make([]*util.Tx, len(blockWithDoubleSpendWithItself.Transactions))
for i, tx := range blockWithDoubleSpendWithItself.Transactions {
blockWithDoubleSpendWithItselfUtilTxs[i] = util.NewTx(tx)
}
blockWithDoubleSpendWithItself.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendWithItselfUtilTxs).Root()
testProcessBlockRuleError(t, dag, blockWithDoubleSpendWithItself, ruleError(ErrDoubleSpendInSameBlock, ""))
// Check that a block will be rejected if it has the same transaction twice.
blockWithDuplicateTransaction, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add tx1 twice.
blockWithDuplicateTransaction.Transactions = append(blockWithDuplicateTransaction.Transactions, tx1, tx1)
blockWithDuplicateTransactionUtilTxs := make([]*util.Tx, len(blockWithDuplicateTransaction.Transactions))
for i, tx := range blockWithDuplicateTransaction.Transactions {
blockWithDuplicateTransactionUtilTxs[i] = util.NewTx(tx)
}
blockWithDuplicateTransaction.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDuplicateTransactionUtilTxs).Root()
testProcessBlockRuleError(t, dag, blockWithDuplicateTransaction, ruleError(ErrDuplicateTx, ""))
}
func TestUTXOCommitment(t *testing.T) {
// Create a new database and dag instance to run tests against.
params := dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := DAGSetup("TestUTXOCommitment", true, Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("TestUTXOCommitment: Failed to setup dag instance: %v", err)
}
defer teardownFunc()
resetExtraNonceForTest()
createTx := func(txToSpend *wire.MsgTx) *wire.MsgTx {
scriptPubKey, err := txscript.PayToScriptHashScript(OpTrueScript)
if err != nil {
t.Fatalf("TestUTXOCommitment: failed to build script pub key: %s", err)
}
signatureScript, err := txscript.PayToScriptHashSignatureScript(OpTrueScript, nil)
if err != nil {
t.Fatalf("TestUTXOCommitment: failed to build signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{TxID: *txToSpend.TxID(), Index: 0},
SignatureScript: signatureScript,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
ScriptPubKey: scriptPubKey,
Value: uint64(1),
}
return wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
}
// Build the following DAG:
// G <- A <- B <- D
// <- C <-
genesis := params.GenesisBlock
// Block A:
blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{genesis.BlockHash()}, nil)
// Block B:
blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil)
// Block C:
txSpendBlockACoinbase := createTx(blockA.Transactions[0])
blockCTxs := []*wire.MsgTx{txSpendBlockACoinbase}
blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, blockCTxs)
// Block D:
txSpendTxInBlockC := createTx(txSpendBlockACoinbase)
blockDTxs := []*wire.MsgTx{txSpendTxInBlockC}
blockD := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash(), blockC.BlockHash()}, blockDTxs)
// Get the pastUTXO of blockD
blockNodeD, ok := dag.index.LookupNode(blockD.BlockHash())
if !ok {
t.Fatalf("TestUTXOCommitment: blockNode for block D not found")
}
blockDPastUTXO, _, _, _ := dag.pastUTXO(blockNodeD)
blockDPastDiffUTXOSet := blockDPastUTXO.(*DiffUTXOSet)
// Build a Multiset for block D
multiset := secp256k1.NewMultiset()
for outpoint, entry := range blockDPastDiffUTXOSet.base.utxoCollection {
var err error
multiset, err = addUTXOToMultiset(multiset, entry, &outpoint)
if err != nil {
t.Fatalf("TestUTXOCommitment: addUTXOToMultiset unexpectedly failed")
}
}
for outpoint, entry := range blockDPastDiffUTXOSet.UTXODiff.toAdd {
var err error
multiset, err = addUTXOToMultiset(multiset, entry, &outpoint)
if err != nil {
t.Fatalf("TestUTXOCommitment: addUTXOToMultiset unexpectedly failed")
}
}
for outpoint, entry := range blockDPastDiffUTXOSet.UTXODiff.toRemove {
var err error
multiset, err = removeUTXOFromMultiset(multiset, entry, &outpoint)
if err != nil {
t.Fatalf("TestUTXOCommitment: removeUTXOFromMultiset unexpectedly failed")
}
}
// Turn the multiset into a UTXO commitment
utxoCommitment := daghash.Hash(*multiset.Finalize())
// Make sure that the two commitments are equal
if !utxoCommitment.IsEqual(blockNodeD.utxoCommitment) {
t.Fatalf("TestUTXOCommitment: calculated UTXO commitment and "+
"actual UTXO commitment don't match. Want: %s, got: %s",
utxoCommitment, blockNodeD.utxoCommitment)
}
}
func TestPastUTXOMultiSet(t *testing.T) {
// Create a new database and dag instance to run tests against.
params := dagconfig.SimnetParams
dag, teardownFunc, err := DAGSetup("TestPastUTXOMultiSet", true, Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("TestPastUTXOMultiSet: Failed to setup dag instance: %v", err)
}
defer teardownFunc()
// Build a short chain
genesis := params.GenesisBlock
blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{genesis.BlockHash()}, nil)
blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil)
blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash()}, nil)
// Take blockC's selectedParentMultiset
blockNodeC, ok := dag.index.LookupNode(blockC.BlockHash())
if !ok {
t.Fatalf("TestPastUTXOMultiSet: blockNode for blockC not found")
}
blockCSelectedParentMultiset, err := blockNodeC.selectedParentMultiset(dag)
if err != nil {
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err)
}
// Copy the multiset
blockCSelectedParentMultisetCopy := *blockCSelectedParentMultiset
blockCSelectedParentMultiset = &blockCSelectedParentMultisetCopy
// Add a block on top of blockC
PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockC.BlockHash()}, nil)
// Get blockC's selectedParentMultiset again
blockCSelectedParentMultiSetAfterAnotherBlock, err := blockNodeC.selectedParentMultiset(dag)
if err != nil {
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err)
}
// Make sure that blockC's selectedParentMultiset had not changed
if !reflect.DeepEqual(blockCSelectedParentMultiset, blockCSelectedParentMultiSetAfterAnotherBlock) {
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset appears to have changed")
}
}

View File

@ -10,7 +10,6 @@ import (
"encoding/json"
"fmt"
"io"
"sync"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
@ -18,7 +17,6 @@ import (
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/buffers"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
@ -46,14 +44,6 @@ func isNotInDAGErr(err error) bool {
return errors.As(err, &notInDAGErr)
}
// outpointKeyPool defines a concurrent safe free list of byte buffers used to
// provide temporary buffers for outpoint database keys.
var outpointKeyPool = sync.Pool{
New: func() interface{} {
return &bytes.Buffer{} // Pointer to a buffer to avoid boxing alloc.
},
}
// outpointIndexByteOrder is the byte order for serializing the outpoint index.
// It uses big endian to ensure that when outpoint is used as database key, the
// keys will be iterated in an ascending order by the outpoint index.
@ -91,42 +81,41 @@ func deserializeOutpoint(r io.Reader) (*wire.Outpoint, error) {
// updateUTXOSet updates the UTXO set in the database based on the provided
// UTXO diff.
func updateUTXOSet(dbContext dbaccess.Context, virtualUTXODiff *UTXODiff) error {
outpointBuff := bytes.NewBuffer(make([]byte, outpointSerializeSize))
for outpoint := range virtualUTXODiff.toRemove {
w := outpointKeyPool.Get().(*bytes.Buffer)
w.Reset()
err := serializeOutpoint(w, &outpoint)
outpointBuff.Reset()
err := serializeOutpoint(outpointBuff, &outpoint)
if err != nil {
return err
}
key := w.Bytes()
key := outpointBuff.Bytes()
err = dbaccess.RemoveFromUTXOSet(dbContext, key)
if err != nil {
return err
}
outpointKeyPool.Put(w)
}
// We are preallocating for P2PKH entries because they are the most common ones.
// If we have entries with a compressed script bigger than P2PKH's, the buffer will grow.
bytesToPreallocate := (p2pkhUTXOEntrySerializeSize + outpointSerializeSize) * len(virtualUTXODiff.toAdd)
buff := bytes.NewBuffer(make([]byte, bytesToPreallocate))
utxoEntryBuff := bytes.NewBuffer(make([]byte, p2pkhUTXOEntrySerializeSize))
for outpoint, entry := range virtualUTXODiff.toAdd {
utxoEntryBuff.Reset()
outpointBuff.Reset()
// Serialize and store the UTXO entry.
sBuff := buffers.NewSubBuffer(buff)
err := serializeUTXOEntry(sBuff, entry)
err := serializeUTXOEntry(utxoEntryBuff, entry)
if err != nil {
return err
}
serializedEntry := sBuff.Bytes()
serializedEntry := utxoEntryBuff.Bytes()
sBuff = buffers.NewSubBuffer(buff)
err = serializeOutpoint(sBuff, &outpoint)
err = serializeOutpoint(outpointBuff, &outpoint)
if err != nil {
return err
}
key := sBuff.Bytes()
key := outpointBuff.Bytes()
err = dbaccess.AddToUTXOSet(dbContext, key, serializedEntry)
if err != nil {
return err
@ -201,28 +190,92 @@ func (dag *BlockDAG) initDAGState() error {
if err != nil {
return err
}
if !dagState.LocalSubnetworkID.IsEqual(dag.subnetworkID) {
return errors.Errorf("Cannot start kaspad with subnetwork ID %s because"+
" its database is already built with subnetwork ID %s. If you"+
" want to switch to a new database, please reset the"+
" database by starting kaspad with --reset-db flag", dag.subnetworkID, dagState.LocalSubnetworkID)
err = dag.validateLocalSubnetworkID(dagState)
if err != nil {
return err
}
log.Debugf("Loading block index...")
var unprocessedBlockNodes []*blockNode
blockIndexCursor, err := dbaccess.BlockIndexCursor(dbaccess.NoTx())
unprocessedBlockNodes, err := dag.initBlockIndex()
if err != nil {
return err
}
log.Debugf("Loading UTXO set...")
fullUTXOCollection, err := dag.initUTXOSet()
if err != nil {
return err
}
log.Debugf("Loading reachability data...")
err = dag.reachabilityTree.init(dbaccess.NoTx())
if err != nil {
return err
}
log.Debugf("Loading multiset data...")
err = dag.multisetStore.init(dbaccess.NoTx())
if err != nil {
return err
}
log.Debugf("Applying the loaded utxoCollection to the virtual block...")
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
if err != nil {
return errors.Wrap(err, "Error loading UTXOSet")
}
log.Debugf("Applying the stored tips to the virtual block...")
err = dag.initVirtualBlockTips(dagState)
if err != nil {
return err
}
log.Debugf("Setting the last finality point...")
var ok bool
dag.lastFinalityPoint, ok = dag.index.LookupNode(dagState.LastFinalityPoint)
if !ok {
return errors.Errorf("finality point block %s "+
"does not exist in the DAG", dagState.LastFinalityPoint)
}
dag.finalizeNodesBelowFinalityPoint(false)
log.Debugf("Processing unprocessed blockNodes...")
err = dag.processUnprocessedBlockNodes(unprocessedBlockNodes)
if err != nil {
return err
}
log.Infof("DAG state initialized.")
return nil
}
func (dag *BlockDAG) validateLocalSubnetworkID(state *dagState) error {
if !state.LocalSubnetworkID.IsEqual(dag.subnetworkID) {
return errors.Errorf("Cannot start kaspad with subnetwork ID %s because"+
" its database is already built with subnetwork ID %s. If you"+
" want to switch to a new database, please reset the"+
" database by starting kaspad with --reset-db flag", dag.subnetworkID, state.LocalSubnetworkID)
}
return nil
}
func (dag *BlockDAG) initBlockIndex() (unprocessedBlockNodes []*blockNode, err error) {
blockIndexCursor, err := dbaccess.BlockIndexCursor(dbaccess.NoTx())
if err != nil {
return nil, err
}
defer blockIndexCursor.Close()
for blockIndexCursor.Next() {
serializedDBNode, err := blockIndexCursor.Value()
if err != nil {
return err
return nil, err
}
node, err := dag.deserializeBlockNode(serializedDBNode)
if err != nil {
return err
return nil, err
}
// Check to see if this node had been stored in the the block DB
@ -241,14 +294,14 @@ func (dag *BlockDAG) initDAGState() error {
if dag.blockCount == 0 {
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
return AssertError(fmt.Sprintf("initDAGState: Expected "+
return nil, errors.Errorf("Expected "+
"first entry in block index to be genesis block, "+
"found %s", node.hash))
"found %s", node.hash)
}
} else {
if len(node.parents) == 0 {
return AssertError(fmt.Sprintf("initDAGState: block %s "+
"has no parents but it's not the genesis block", node.hash))
return nil, errors.Errorf("block %s "+
"has no parents but it's not the genesis block", node.hash)
}
}
@ -259,12 +312,14 @@ func (dag *BlockDAG) initDAGState() error {
dag.blockCount++
}
return unprocessedBlockNodes, nil
}
log.Debugf("Loading UTXO set...")
fullUTXOCollection := make(utxoCollection)
func (dag *BlockDAG) initUTXOSet() (fullUTXOCollection utxoCollection, err error) {
fullUTXOCollection = make(utxoCollection)
cursor, err := dbaccess.UTXOSetCursor(dbaccess.NoTx())
if err != nil {
return err
return nil, err
}
defer cursor.Close()
@ -272,72 +327,55 @@ func (dag *BlockDAG) initDAGState() error {
// Deserialize the outpoint
key, err := cursor.Key()
if err != nil {
return err
return nil, err
}
outpoint, err := deserializeOutpoint(bytes.NewReader(key))
outpoint, err := deserializeOutpoint(bytes.NewReader(key.Suffix()))
if err != nil {
return err
return nil, err
}
// Deserialize the utxo entry
value, err := cursor.Value()
if err != nil {
return err
return nil, err
}
entry, err := deserializeUTXOEntry(bytes.NewReader(value))
if err != nil {
return err
return nil, err
}
fullUTXOCollection[*outpoint] = entry
}
log.Debugf("Loading reachability data...")
err = dag.reachabilityStore.init(dbaccess.NoTx())
if err != nil {
return err
}
return fullUTXOCollection, nil
}
log.Debugf("Loading multiset data...")
err = dag.multisetStore.init(dbaccess.NoTx())
if err != nil {
return err
}
log.Debugf("Applying the loaded utxoCollection to the virtual block...")
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
if err != nil {
return AssertError(fmt.Sprintf("Error loading UTXOSet: %s", err))
}
log.Debugf("Applying the stored tips to the virtual block...")
func (dag *BlockDAG) initVirtualBlockTips(state *dagState) error {
tips := newBlockSet()
for _, tipHash := range dagState.TipHashes {
tip := dag.index.LookupNode(tipHash)
if tip == nil {
return AssertError(fmt.Sprintf("initDAGState: cannot find "+
"DAG tip %s in block index", dagState.TipHashes))
for _, tipHash := range state.TipHashes {
tip, ok := dag.index.LookupNode(tipHash)
if !ok {
return errors.Errorf("cannot find "+
"DAG tip %s in block index", state.TipHashes)
}
tips.add(tip)
}
dag.virtual.SetTips(tips)
return nil
}
log.Debugf("Setting the last finality point...")
dag.lastFinalityPoint = dag.index.LookupNode(dagState.LastFinalityPoint)
dag.finalizeNodesBelowFinalityPoint(false)
log.Debugf("Processing unprocessed blockNodes...")
func (dag *BlockDAG) processUnprocessedBlockNodes(unprocessedBlockNodes []*blockNode) error {
for _, node := range unprocessedBlockNodes {
// Check to see if the block exists in the block DB. If it
// doesn't, the database has certainly been corrupted.
blockExists, err := dbaccess.HasBlock(dbaccess.NoTx(), node.hash)
if err != nil {
return AssertError(fmt.Sprintf("initDAGState: HasBlock "+
"for block %s failed: %s", node.hash, err))
return errors.Wrapf(err, "HasBlock "+
"for block %s failed: %s", node.hash, err)
}
if !blockExists {
return AssertError(fmt.Sprintf("initDAGState: block %s "+
"exists in block index but not in block db", node.hash))
return errors.Errorf("block %s "+
"exists in block index but not in block db", node.hash)
}
// Attempt to accept the block.
@ -355,19 +393,16 @@ func (dag *BlockDAG) initDAGState() error {
// If the block is an orphan or is delayed then it couldn't have
// possibly been written to the block index in the first place.
if isOrphan {
return AssertError(fmt.Sprintf("Block %s, which was not "+
return errors.Errorf("Block %s, which was not "+
"previously processed, turned out to be an orphan, which is "+
"impossible.", node.hash))
"impossible.", node.hash)
}
if isDelayed {
return AssertError(fmt.Sprintf("Block %s, which was not "+
return errors.Errorf("Block %s, which was not "+
"previously processed, turned out to be delayed, which is "+
"impossible.", node.hash))
"impossible.", node.hash)
}
}
log.Infof("DAG state initialized.")
return nil
}
@ -396,10 +431,10 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
node.parents = newBlockSet()
for _, hash := range header.ParentHashes {
parent := dag.index.LookupNode(hash)
if parent == nil {
return nil, AssertError(fmt.Sprintf("deserializeBlockNode: Could "+
"not find parent %s for block %s", hash, header.BlockHash()))
parent, ok := dag.index.LookupNode(hash)
if !ok {
return nil, errors.Errorf("deserializeBlockNode: Could "+
"not find parent %s for block %s", hash, header.BlockHash())
}
node.parents.add(parent)
}
@ -417,7 +452,11 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
// Because genesis doesn't have selected parent, it's serialized as zero hash
if !selectedParentHash.IsEqual(&daghash.ZeroHash) {
node.selectedParent = dag.index.LookupNode(selectedParentHash)
var ok bool
node.selectedParent, ok = dag.index.LookupNode(selectedParentHash)
if !ok {
return nil, errors.Errorf("block %s does not exist in the DAG", selectedParentHash)
}
}
node.blueScore, err = binaryserializer.Uint64(buffer, byteOrder)
@ -436,7 +475,12 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
if _, err := io.ReadFull(buffer, hash[:]); err != nil {
return nil, err
}
node.blues[i] = dag.index.LookupNode(hash)
var ok bool
node.blues[i], ok = dag.index.LookupNode(hash)
if !ok {
return nil, errors.Errorf("block %s does not exist in the DAG", selectedParentHash)
}
}
bluesAnticoneSizesLen, err := wire.ReadVarInt(buffer)
@ -454,8 +498,8 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
if err != nil {
return nil, err
}
blue := dag.index.LookupNode(hash)
if blue == nil {
blue, ok := dag.index.LookupNode(hash)
if !ok {
return nil, errors.Errorf("couldn't find block with hash %s", hash)
}
node.bluesAnticoneSizes[blue] = dagconfig.KType(bluesAnticoneSize)
@ -560,8 +604,8 @@ func blockHashFromBlockIndexKey(BlockIndexKey []byte) (*daghash.Hash, error) {
// This function is safe for concurrent access.
func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
// Lookup the block hash in block index and ensure it is in the DAG
node := dag.index.LookupNode(hash)
if node == nil {
node, ok := dag.index.LookupNode(hash)
if !ok {
str := fmt.Sprintf("block %s is not in the DAG", hash)
return nil, errNotInDAG(str)
}
@ -609,7 +653,7 @@ func (dag *BlockDAG) BlockHashesFrom(lowHash *daghash.Hash, limit int) ([]*dagha
if err != nil {
return nil, err
}
blockHash, err := blockHashFromBlockIndexKey(key)
blockHash, err := blockHashFromBlockIndexKey(key.Suffix())
if err != nil {
return nil, err
}

View File

@ -66,7 +66,7 @@ func TestUTXOSerialization(t *testing.T) {
blockBlueScore: 1,
packedFlags: tfCoinbase,
},
serialized: hexToBytes("030000000000000000f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
serialized: hexToBytes("01000000000000000100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
},
{
name: "blue score 100001, not coinbase",
@ -76,7 +76,7 @@ func TestUTXOSerialization(t *testing.T) {
blockBlueScore: 100001,
packedFlags: 0,
},
serialized: hexToBytes("420d03000000000040420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
serialized: hexToBytes("a1860100000000000040420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
},
}

View File

@ -5,7 +5,7 @@
package blockdag
import (
"math/big"
"github.com/kaspanet/kaspad/util/bigintpool"
"time"
"github.com/kaspanet/kaspad/util"
@ -30,11 +30,20 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ti
// averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize))
// The result uses integer division which means it will be slightly
// rounded down.
newTarget := targetsWindow.averageTarget()
newTarget := bigintpool.Acquire(0)
defer bigintpool.Release(newTarget)
windowTimeStampDifference := bigintpool.Acquire(windowMaxTimeStamp - windowMinTimestamp)
defer bigintpool.Release(windowTimeStampDifference)
targetTimePerBlock := bigintpool.Acquire(dag.targetTimePerBlock)
defer bigintpool.Release(targetTimePerBlock)
difficultyAdjustmentWindowSize := bigintpool.Acquire(int64(dag.difficultyAdjustmentWindowSize))
defer bigintpool.Release(difficultyAdjustmentWindowSize)
targetsWindow.averageTarget(newTarget)
newTarget.
Mul(newTarget, big.NewInt(windowMaxTimeStamp-windowMinTimestamp)).
Div(newTarget, big.NewInt(dag.targetTimePerBlock)).
Div(newTarget, big.NewInt(int64(dag.difficultyAdjustmentWindowSize)))
Mul(newTarget, windowTimeStampDifference).
Div(newTarget, targetTimePerBlock).
Div(newTarget, difficultyAdjustmentWindowSize)
if newTarget.Cmp(dag.dagParams.PowMax) > 0 {
return dag.powMaxBits
}

View File

@ -114,7 +114,11 @@ func TestDifficulty(t *testing.T) {
if isOrphan {
t.Fatalf("block was unexpectedly orphan")
}
return dag.index.LookupNode(block.BlockHash())
node, ok := dag.index.LookupNode(block.BlockHash())
if !ok {
t.Fatalf("block %s does not exist in the DAG", block.BlockHash())
}
return node
}
tip := dag.genesis
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {

View File

@ -6,28 +6,10 @@ package blockdag
import (
"fmt"
"github.com/pkg/errors"
)
// DeploymentError identifies an error that indicates a deployment ID was
// specified that does not exist.
type DeploymentError uint32
// Error returns the assertion error as a human-readable string and satisfies
// the error interface.
func (e DeploymentError) Error() string {
return fmt.Sprintf("deployment ID %d does not exist", uint32(e))
}
// AssertError identifies an error that indicates an internal code consistency
// issue and should be treated as a critical and unrecoverable error.
type AssertError string
// Error returns the assertion error as a human-readable string and satisfies
// the error interface.
func (e AssertError) Error() string {
return "assertion failed: " + string(e)
}
// ErrorCode identifies a kind of error.
type ErrorCode int
@ -87,6 +69,9 @@ const (
// the expected value.
ErrBadUTXOCommitment
// ErrInvalidSubnetwork indicates the subnetwork is now allowed.
ErrInvalidSubnetwork
// ErrFinalityPointTimeTooOld indicates a block has a timestamp before the
// last finality point.
ErrFinalityPointTimeTooOld
@ -121,6 +106,11 @@ const (
// either does not exist or has already been spent.
ErrMissingTxOut
// ErrDoubleSpendInSameBlock indicates a transaction
// that spends an output that was already spent by another
// transaction in the same block.
ErrDoubleSpendInSameBlock
// ErrUnfinalizedTx indicates a transaction has not been finalized.
// A valid block may only contain finalized transactions.
ErrUnfinalizedTx
@ -245,6 +235,7 @@ var errorCodeStrings = map[ErrorCode]string{
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
ErrBadTxInput: "ErrBadTxInput",
ErrMissingTxOut: "ErrMissingTxOut",
ErrDoubleSpendInSameBlock: "ErrDoubleSpendInSameBlock",
ErrUnfinalizedTx: "ErrUnfinalizedTx",
ErrDuplicateTx: "ErrDuplicateTx",
ErrOverwriteTx: "ErrOverwriteTx",
@ -294,7 +285,6 @@ func (e RuleError) Error() string {
return e.Description
}
// ruleError creates an RuleError given a set of arguments.
func ruleError(c ErrorCode, desc string) RuleError {
return RuleError{ErrorCode: c, Description: desc}
func ruleError(c ErrorCode, desc string) error {
return errors.WithStack(RuleError{ErrorCode: c, Description: desc})
}

View File

@ -5,7 +5,6 @@
package blockdag
import (
"fmt"
"testing"
)
@ -99,46 +98,3 @@ func TestRuleError(t *testing.T) {
}
}
}
// TestDeploymentError tests the stringized output for the DeploymentError type.
func TestDeploymentError(t *testing.T) {
t.Parallel()
tests := []struct {
in DeploymentError
want string
}{
{
DeploymentError(0),
"deployment ID 0 does not exist",
},
{
DeploymentError(10),
"deployment ID 10 does not exist",
},
{
DeploymentError(123),
"deployment ID 123 does not exist",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.Error()
if result != test.want {
t.Errorf("Error #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
}
func TestAssertError(t *testing.T) {
message := "abc 123"
err := AssertError(message)
expectedMessage := fmt.Sprintf("assertion failed: %s", message)
if expectedMessage != err.Error() {
t.Errorf("Unexpected AssertError message. "+
"Got: %s, want: %s", err.Error(), expectedMessage)
}
}

View File

@ -186,6 +186,7 @@ func TestSubnetworkRegistry(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.BlockCoinbaseMaturity = 0
params.EnableNonNativeSubnetworks = true
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
DAGParams: &params,
})
@ -410,6 +411,7 @@ func TestGasLimit(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.BlockCoinbaseMaturity = 0
params.EnableNonNativeSubnetworks = true
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
DAGParams: &params,
})

View File

@ -57,7 +57,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
// newNode is always in the future of blueCandidate, so there's
// no point in checking it.
if chainBlock != newNode {
if isAncestorOfBlueCandidate, err := dag.isAncestorOf(chainBlock, blueCandidate); err != nil {
if isAncestorOfBlueCandidate, err := dag.isInPast(chainBlock, blueCandidate); err != nil {
return nil, err
} else if isAncestorOfBlueCandidate {
break
@ -66,7 +66,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
for _, block := range chainBlock.blues {
// Skip blocks that exist in the past of blueCandidate.
if isAncestorOfBlueCandidate, err := dag.isAncestorOf(block, blueCandidate); err != nil {
if isAncestorOfBlueCandidate, err := dag.isInPast(block, blueCandidate); err != nil {
return nil, err
} else if isAncestorOfBlueCandidate {
continue
@ -148,7 +148,7 @@ func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, erro
if anticoneSet.contains(parent) || selectedParentPast.contains(parent) {
continue
}
isAncestorOfSelectedParent, err := dag.isAncestorOf(parent, node.selectedParent)
isAncestorOfSelectedParent, err := dag.isInPast(parent, node.selectedParent)
if err != nil {
return nil, err
}

View File

@ -215,7 +215,10 @@ func TestGHOSTDAG(t *testing.T) {
t.Fatalf("TestGHOSTDAG: block %v was unexpectedly orphan", blockData.id)
}
node := dag.index.LookupNode(utilBlock.Hash())
node, ok := dag.index.LookupNode(utilBlock.Hash())
if !ok {
t.Fatalf("block %s does not exist in the DAG", utilBlock.Hash())
}
blockByIDMap[blockData.id] = node
idByBlockMap[node] = blockData.id
@ -293,20 +296,27 @@ func TestBlueAnticoneSizeErrors(t *testing.T) {
// Prepare a block chain with size K beginning with the genesis block
currentBlockA := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
newBlock := prepareAndProcessBlock(t, dag, currentBlockA)
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockA)
currentBlockA = newBlock
}
// Prepare another block chain with size K beginning with the genesis block
currentBlockB := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
newBlock := prepareAndProcessBlock(t, dag, currentBlockB)
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockB)
currentBlockB = newBlock
}
// Get references to the tips of the two chains
blockNodeA := dag.index.LookupNode(currentBlockA.BlockHash())
blockNodeB := dag.index.LookupNode(currentBlockB.BlockHash())
blockNodeA, ok := dag.index.LookupNode(currentBlockA.BlockHash())
if !ok {
t.Fatalf("block %s does not exist in the DAG", currentBlockA.BlockHash())
}
blockNodeB, ok := dag.index.LookupNode(currentBlockB.BlockHash())
if !ok {
t.Fatalf("block %s does not exist in the DAG", currentBlockB.BlockHash())
}
// Try getting the blueAnticoneSize between them. Since the two
// blocks are not in the anticones of eachother, this should fail.
@ -332,14 +342,14 @@ func TestGHOSTDAGErrors(t *testing.T) {
defer teardownFunc()
// Add two child blocks to the genesis
block1 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
block2 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
block1 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
block2 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
// Add a child block to the previous two blocks
block3 := prepareAndProcessBlock(t, dag, block1, block2)
block3 := prepareAndProcessBlockByParentMsgBlocks(t, dag, block1, block2)
// Clear the reachability store
dag.reachabilityStore.loaded = map[daghash.Hash]*reachabilityData{}
dag.reachabilityTree.store.loaded = map[daghash.Hash]*reachabilityData{}
dbTx, err := dbaccess.NewTx()
if err != nil {
@ -359,12 +369,15 @@ func TestGHOSTDAGErrors(t *testing.T) {
// Try to rerun GHOSTDAG on the last block. GHOSTDAG uses
// reachability data, so we expect it to fail.
blockNode3 := dag.index.LookupNode(block3.BlockHash())
blockNode3, ok := dag.index.LookupNode(block3.BlockHash())
if !ok {
t.Fatalf("block %s does not exist in the DAG", block3.BlockHash())
}
_, err = dag.ghostdag(blockNode3)
if err == nil {
t.Fatalf("TestGHOSTDAGErrors: ghostdag unexpectedly succeeded")
}
expectedErrSubstring := "Couldn't find reachability data"
expectedErrSubstring := "couldn't find reachability data"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestGHOSTDAGErrors: ghostdag returned wrong error. "+
"Want: %s, got: %s", expectedErrSubstring, err)

View File

@ -59,13 +59,13 @@ func (idx *AcceptanceIndex) Init(dag *blockdag.BlockDAG) error {
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) recover() error {
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
return idx.dag.ForEachHash(func(hash daghash.Hash) error {
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
err = idx.dag.ForEachHash(func(hash daghash.Hash) error {
exists, err := dbaccess.HasAcceptanceData(dbTx, &hash)
if err != nil {
return err
@ -77,13 +77,13 @@ func (idx *AcceptanceIndex) recover() error {
if err != nil {
return err
}
return idx.ConnectBlock(dbTx, &hash, txAcceptanceData)
})
if err != nil {
return err
}
err = idx.ConnectBlock(dbTx, &hash, txAcceptanceData)
if err != nil {
return err
}
return dbTx.Commit()
return dbTx.Commit()
})
}
// ConnectBlock is invoked by the index manager when a new block has been

View File

@ -38,7 +38,7 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
msgBlock.AddTransaction(tx.MsgTx())
}
multiset, err := dag.NextBlockMultiset(transactions)
multiset, err := dag.NextBlockMultiset()
if err != nil {
return nil, err
}
@ -57,16 +57,16 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
}
// NextBlockMultiset returns the multiset of an assumed next block
// built on top of the current tips, with the given transactions.
// built on top of the current tips.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) NextBlockMultiset(transactions []*util.Tx) (*secp256k1.MultiSet, error) {
pastUTXO, selectedParentUTXO, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
func (dag *BlockDAG) NextBlockMultiset() (*secp256k1.MultiSet, error) {
_, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
if err != nil {
return nil, err
}
return dag.virtual.blockNode.calcMultiset(dag, transactions, txsAcceptanceData, selectedParentUTXO, pastUTXO)
return dag.virtual.blockNode.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO)
}
// CoinbasePayloadExtraData returns coinbase payload extra data parameter

View File

@ -96,7 +96,7 @@ func (store *multisetStore) init(dbContext dbaccess.Context) error {
return err
}
hash, err := daghash.NewHash(key)
hash, err := daghash.NewHash(key.Suffix())
if err != nil {
return err
}

View File

@ -253,6 +253,8 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
}
}
dag.addBlockProcessingTimestamp()
log.Debugf("Accepted block %s", blockHash)
return false, false, nil

View File

@ -63,8 +63,8 @@ func TestProcessOrphans(t *testing.T) {
}
// Make sure that the child block had been rejected
node := dag.index.LookupNode(childBlock.Hash())
if node == nil {
node, ok := dag.index.LookupNode(childBlock.Hash())
if !ok {
t.Fatalf("TestProcessOrphans: child block missing from block index")
}
if !dag.index.NodeStatus(node).KnownInvalid() {

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,8 @@
package blockdag
import (
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash"
"reflect"
"strings"
"testing"
@ -11,19 +13,20 @@ func TestAddChild(t *testing.T) {
// root -> a -> b -> c...
// Create the root node of a new reachability tree
root := newReachabilityTreeNode(&blockNode{})
root.setInterval(newReachabilityInterval(1, 100))
root.interval = newReachabilityInterval(1, 100)
// Add a chain of child nodes just before a reindex occurs (2^6=64 < 100)
currentTip := root
for i := 0; i < 6; i++ {
node := newReachabilityTreeNode(&blockNode{})
modifiedNodes, err := currentTip.addChild(node)
modifiedNodes := newModifiedTreeNodes()
err := currentTip.addChild(node, root, modifiedNodes)
if err != nil {
t.Fatalf("TestAddChild: addChild failed: %s", err)
}
// Expect only the node and its parent to be affected
expectedModifiedNodes := []*reachabilityTreeNode{currentTip, node}
expectedModifiedNodes := newModifiedTreeNodes(currentTip, node)
if !reflect.DeepEqual(modifiedNodes, expectedModifiedNodes) {
t.Fatalf("TestAddChild: unexpected modifiedNodes. "+
"want: %s, got: %s", expectedModifiedNodes, modifiedNodes)
@ -34,7 +37,8 @@ func TestAddChild(t *testing.T) {
// Add another node to the tip of the chain to trigger a reindex (100 < 2^7=128)
lastChild := newReachabilityTreeNode(&blockNode{})
modifiedNodes, err := currentTip.addChild(lastChild)
modifiedNodes := newModifiedTreeNodes()
err := currentTip.addChild(lastChild, root, modifiedNodes)
if err != nil {
t.Fatalf("TestAddChild: addChild failed: %s", err)
}
@ -45,19 +49,23 @@ func TestAddChild(t *testing.T) {
t.Fatalf("TestAddChild: unexpected amount of modifiedNodes.")
}
// Expect the tip to have an interval of 1 and remaining interval of 0
// Expect the tip to have an interval of 1 and remaining interval of 0 both before and after
tipInterval := lastChild.interval.size()
if tipInterval != 1 {
t.Fatalf("TestAddChild: unexpected tip interval size: want: 1, got: %d", tipInterval)
}
tipRemainingInterval := lastChild.remainingInterval.size()
if tipRemainingInterval != 0 {
t.Fatalf("TestAddChild: unexpected tip interval size: want: 0, got: %d", tipRemainingInterval)
tipRemainingIntervalBefore := lastChild.remainingIntervalBefore().size()
if tipRemainingIntervalBefore != 0 {
t.Fatalf("TestAddChild: unexpected tip interval before size: want: 0, got: %d", tipRemainingIntervalBefore)
}
tipRemainingIntervalAfter := lastChild.remainingIntervalAfter().size()
if tipRemainingIntervalAfter != 0 {
t.Fatalf("TestAddChild: unexpected tip interval after size: want: 0, got: %d", tipRemainingIntervalAfter)
}
// Expect all nodes to be descendant nodes of root
currentNode := currentTip
for currentNode != nil {
for currentNode != root {
if !root.isAncestorOf(currentNode) {
t.Fatalf("TestAddChild: currentNode is not a descendant of root")
}
@ -68,19 +76,20 @@ func TestAddChild(t *testing.T) {
// root -> a, b, c...
// Create the root node of a new reachability tree
root = newReachabilityTreeNode(&blockNode{})
root.setInterval(newReachabilityInterval(1, 100))
root.interval = newReachabilityInterval(1, 100)
// Add child nodes to root just before a reindex occurs (2^6=64 < 100)
childNodes := make([]*reachabilityTreeNode, 6)
for i := 0; i < len(childNodes); i++ {
childNodes[i] = newReachabilityTreeNode(&blockNode{})
modifiedNodes, err := root.addChild(childNodes[i])
modifiedNodes := newModifiedTreeNodes()
err := root.addChild(childNodes[i], root, modifiedNodes)
if err != nil {
t.Fatalf("TestAddChild: addChild failed: %s", err)
}
// Expect only the node and the root to be affected
expectedModifiedNodes := []*reachabilityTreeNode{root, childNodes[i]}
expectedModifiedNodes := newModifiedTreeNodes(root, childNodes[i])
if !reflect.DeepEqual(modifiedNodes, expectedModifiedNodes) {
t.Fatalf("TestAddChild: unexpected modifiedNodes. "+
"want: %s, got: %s", expectedModifiedNodes, modifiedNodes)
@ -89,7 +98,8 @@ func TestAddChild(t *testing.T) {
// Add another node to the root to trigger a reindex (100 < 2^7=128)
lastChild = newReachabilityTreeNode(&blockNode{})
modifiedNodes, err = root.addChild(lastChild)
modifiedNodes = newModifiedTreeNodes()
err = root.addChild(lastChild, root, modifiedNodes)
if err != nil {
t.Fatalf("TestAddChild: addChild failed: %s", err)
}
@ -100,14 +110,18 @@ func TestAddChild(t *testing.T) {
t.Fatalf("TestAddChild: unexpected amount of modifiedNodes.")
}
// Expect the last-added child to have an interval of 1 and remaining interval of 0
// Expect the last-added child to have an interval of 1 and remaining interval of 0 both before and after
lastChildInterval := lastChild.interval.size()
if lastChildInterval != 1 {
t.Fatalf("TestAddChild: unexpected lastChild interval size: want: 1, got: %d", lastChildInterval)
}
lastChildRemainingInterval := lastChild.remainingInterval.size()
if lastChildRemainingInterval != 0 {
t.Fatalf("TestAddChild: unexpected lastChild interval size: want: 0, got: %d", lastChildRemainingInterval)
lastChildRemainingIntervalBefore := lastChild.remainingIntervalBefore().size()
if lastChildRemainingIntervalBefore != 0 {
t.Fatalf("TestAddChild: unexpected lastChild interval before size: want: 0, got: %d", lastChildRemainingIntervalBefore)
}
lastChildRemainingIntervalAfter := lastChild.remainingIntervalAfter().size()
if lastChildRemainingIntervalAfter != 0 {
t.Fatalf("TestAddChild: unexpected lastChild interval after size: want: 0, got: %d", lastChildRemainingIntervalAfter)
}
// Expect all nodes to be descendant nodes of root
@ -118,6 +132,91 @@ func TestAddChild(t *testing.T) {
}
}
func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) {
root := newReachabilityTreeNode(&blockNode{})
currentTip := root
const numberOfDescendants = 6
descendants := make([]*reachabilityTreeNode, numberOfDescendants)
for i := 0; i < numberOfDescendants; i++ {
node := newReachabilityTreeNode(&blockNode{})
err := currentTip.addChild(node, root, newModifiedTreeNodes())
if err != nil {
t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: addChild failed: %s", err)
}
descendants[i] = node
currentTip = node
}
// Expect all descendants to be in the future of root
for _, node := range descendants {
if !root.isAncestorOf(node) {
t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: node is not a descendant of root")
}
}
if !root.isAncestorOf(root) {
t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: root is expected to be an ancestor of root")
}
}
func TestIntervalContains(t *testing.T) {
tests := []struct {
name string
this, other *reachabilityInterval
thisContainsOther bool
}{
{
name: "this == other",
this: newReachabilityInterval(10, 100),
other: newReachabilityInterval(10, 100),
thisContainsOther: true,
},
{
name: "this.start == other.start && this.end < other.end",
this: newReachabilityInterval(10, 90),
other: newReachabilityInterval(10, 100),
thisContainsOther: false,
},
{
name: "this.start == other.start && this.end > other.end",
this: newReachabilityInterval(10, 100),
other: newReachabilityInterval(10, 90),
thisContainsOther: true,
},
{
name: "this.start > other.start && this.end == other.end",
this: newReachabilityInterval(20, 100),
other: newReachabilityInterval(10, 100),
thisContainsOther: false,
},
{
name: "this.start < other.start && this.end == other.end",
this: newReachabilityInterval(10, 100),
other: newReachabilityInterval(20, 100),
thisContainsOther: true,
},
{
name: "this.start > other.start && this.end < other.end",
this: newReachabilityInterval(20, 90),
other: newReachabilityInterval(10, 100),
thisContainsOther: false,
},
{
name: "this.start < other.start && this.end > other.end",
this: newReachabilityInterval(10, 100),
other: newReachabilityInterval(20, 90),
thisContainsOther: true,
},
}
for _, test := range tests {
if thisContainsOther := test.this.contains(test.other); thisContainsOther != test.thisContainsOther {
t.Errorf("test.this.contains(test.other) is expected to be %t but got %t",
test.thisContainsOther, thisContainsOther)
}
}
}
func TestSplitFraction(t *testing.T) {
tests := []struct {
interval *reachabilityInterval
@ -346,140 +445,140 @@ func TestSplitWithExponentialBias(t *testing.T) {
}
}
func TestIsInFuture(t *testing.T) {
blocks := futureCoveringBlockSet{
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(2, 3)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
func TestHasAncestorOf(t *testing.T) {
treeNodes := futureCoveringTreeNodeSet{
&reachabilityTreeNode{interval: newReachabilityInterval(2, 3)},
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
}
tests := []struct {
block *futureCoveringBlock
treeNode *reachabilityTreeNode
expectedResult bool
}{
{
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 1)}},
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 1)},
expectedResult: false,
},
{
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)}},
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)},
expectedResult: true,
},
{
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 76)}},
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 76)},
expectedResult: true,
},
{
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(78, 100)}},
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(78, 100)},
expectedResult: false,
},
{
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1980, 2000)}},
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1980, 2000)},
expectedResult: false,
},
{
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1920)}},
treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1920)},
expectedResult: true,
},
}
for i, test := range tests {
result := blocks.isInFuture(test.block)
result := treeNodes.hasAncestorOf(test.treeNode)
if result != test.expectedResult {
t.Errorf("TestIsInFuture: unexpected result in test #%d. Want: %t, got: %t",
t.Errorf("TestHasAncestorOf: unexpected result in test #%d. Want: %t, got: %t",
i, test.expectedResult, result)
}
}
}
func TestInsertBlock(t *testing.T) {
blocks := futureCoveringBlockSet{
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
func TestInsertNode(t *testing.T) {
treeNodes := futureCoveringTreeNodeSet{
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
}
tests := []struct {
toInsert []*futureCoveringBlock
expectedResult futureCoveringBlockSet
toInsert []*reachabilityTreeNode
expectedResult futureCoveringTreeNodeSet
}{
{
toInsert: []*futureCoveringBlock{
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)}},
toInsert: []*reachabilityTreeNode{
{interval: newReachabilityInterval(5, 7)},
},
expectedResult: futureCoveringBlockSet{
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
expectedResult: futureCoveringTreeNodeSet{
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
},
},
{
toInsert: []*futureCoveringBlock{
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(65, 78)}},
toInsert: []*reachabilityTreeNode{
{interval: newReachabilityInterval(65, 78)},
},
expectedResult: futureCoveringBlockSet{
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(65, 78)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
expectedResult: futureCoveringTreeNodeSet{
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
&reachabilityTreeNode{interval: newReachabilityInterval(65, 78)},
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
},
},
{
toInsert: []*futureCoveringBlock{
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
toInsert: []*reachabilityTreeNode{
{interval: newReachabilityInterval(88, 97)},
},
expectedResult: futureCoveringBlockSet{
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
expectedResult: futureCoveringTreeNodeSet{
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
&reachabilityTreeNode{interval: newReachabilityInterval(88, 97)},
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
},
},
{
toInsert: []*futureCoveringBlock{
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)}},
toInsert: []*reachabilityTreeNode{
{interval: newReachabilityInterval(88, 97)},
{interval: newReachabilityInterval(3000, 3010)},
},
expectedResult: futureCoveringBlockSet{
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)}},
expectedResult: futureCoveringTreeNodeSet{
&reachabilityTreeNode{interval: newReachabilityInterval(1, 3)},
&reachabilityTreeNode{interval: newReachabilityInterval(4, 67)},
&reachabilityTreeNode{interval: newReachabilityInterval(67, 77)},
&reachabilityTreeNode{interval: newReachabilityInterval(88, 97)},
&reachabilityTreeNode{interval: newReachabilityInterval(657, 789)},
&reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)},
&reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)},
&reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)},
},
},
}
for i, test := range tests {
// Create a clone of blocks so that we have a clean start for every test
blocksClone := make(futureCoveringBlockSet, len(blocks))
for i, block := range blocks {
blocksClone[i] = block
// Create a clone of treeNodes so that we have a clean start for every test
treeNodesClone := make(futureCoveringTreeNodeSet, len(treeNodes))
for i, treeNode := range treeNodes {
treeNodesClone[i] = treeNode
}
for _, block := range test.toInsert {
blocksClone.insertBlock(block)
for _, treeNode := range test.toInsert {
treeNodesClone.insertNode(treeNode)
}
if !reflect.DeepEqual(blocksClone, test.expectedResult) {
t.Errorf("TestInsertBlock: unexpected result in test #%d. Want: %s, got: %s",
i, test.expectedResult, blocksClone)
if !reflect.DeepEqual(treeNodesClone, test.expectedResult) {
t.Errorf("TestInsertNode: unexpected result in test #%d. Want: %s, got: %s",
i, test.expectedResult, treeNodesClone)
}
}
}
@ -580,14 +679,14 @@ func TestSplitWithExponentialBiasErrors(t *testing.T) {
func TestReindexIntervalErrors(t *testing.T) {
// Create a treeNode and give it size = 100
treeNode := newReachabilityTreeNode(&blockNode{})
treeNode.setInterval(newReachabilityInterval(0, 99))
treeNode.interval = newReachabilityInterval(0, 99)
// Add a chain of 100 child treeNodes to treeNode
var err error
currentTreeNode := treeNode
for i := 0; i < 100; i++ {
childTreeNode := newReachabilityTreeNode(&blockNode{})
_, err = currentTreeNode.addChild(childTreeNode)
err = currentTreeNode.addChild(childTreeNode, treeNode, newModifiedTreeNodes())
if err != nil {
break
}
@ -619,12 +718,12 @@ func BenchmarkReindexInterval(b *testing.B) {
// its first child gets half of the interval, so a reindex
// from the root should happen after adding subTreeSize
// nodes.
root.setInterval(newReachabilityInterval(0, subTreeSize*2))
root.interval = newReachabilityInterval(0, subTreeSize*2)
currentTreeNode := root
for i := 0; i < subTreeSize; i++ {
childTreeNode := newReachabilityTreeNode(&blockNode{})
_, err := currentTreeNode.addChild(childTreeNode)
err := currentTreeNode.addChild(childTreeNode, root, newModifiedTreeNodes())
if err != nil {
b.Fatalf("addChild: %s", err)
}
@ -632,50 +731,47 @@ func BenchmarkReindexInterval(b *testing.B) {
currentTreeNode = childTreeNode
}
remainingIntervalBefore := *root.remainingInterval
originalRemainingInterval := *root.remainingIntervalAfter()
// After we added subTreeSize nodes, adding the next
// node should lead to a reindex from root.
fullReindexTriggeringNode := newReachabilityTreeNode(&blockNode{})
b.StartTimer()
_, err := currentTreeNode.addChild(fullReindexTriggeringNode)
err := currentTreeNode.addChild(fullReindexTriggeringNode, root, newModifiedTreeNodes())
b.StopTimer()
if err != nil {
b.Fatalf("addChild: %s", err)
}
if *root.remainingInterval == remainingIntervalBefore {
if *root.remainingIntervalAfter() == originalRemainingInterval {
b.Fatal("Expected a reindex from root, but it didn't happen")
}
}
}
func TestFutureCoveringBlockSetString(t *testing.T) {
func TestFutureCoveringTreeNodeSetString(t *testing.T) {
treeNodeA := newReachabilityTreeNode(&blockNode{})
treeNodeA.setInterval(newReachabilityInterval(123, 456))
treeNodeA.interval = newReachabilityInterval(123, 456)
treeNodeB := newReachabilityTreeNode(&blockNode{})
treeNodeB.setInterval(newReachabilityInterval(457, 789))
futureCoveringSet := futureCoveringBlockSet{
&futureCoveringBlock{treeNode: treeNodeA},
&futureCoveringBlock{treeNode: treeNodeB},
}
treeNodeB.interval = newReachabilityInterval(457, 789)
futureCoveringSet := futureCoveringTreeNodeSet{treeNodeA, treeNodeB}
str := futureCoveringSet.String()
expectedStr := "[123,456][457,789]"
if str != expectedStr {
t.Fatalf("TestFutureCoveringBlockSetString: unexpected "+
t.Fatalf("TestFutureCoveringTreeNodeSetString: unexpected "+
"string. Want: %s, got: %s", expectedStr, str)
}
}
func TestReachabilityTreeNodeString(t *testing.T) {
treeNodeA := newReachabilityTreeNode(&blockNode{})
treeNodeA.setInterval(newReachabilityInterval(100, 199))
treeNodeA.interval = newReachabilityInterval(100, 199)
treeNodeB1 := newReachabilityTreeNode(&blockNode{})
treeNodeB1.setInterval(newReachabilityInterval(100, 150))
treeNodeB1.interval = newReachabilityInterval(100, 150)
treeNodeB2 := newReachabilityTreeNode(&blockNode{})
treeNodeB2.setInterval(newReachabilityInterval(150, 199))
treeNodeB2.interval = newReachabilityInterval(150, 199)
treeNodeC := newReachabilityTreeNode(&blockNode{})
treeNodeC.setInterval(newReachabilityInterval(100, 149))
treeNodeC.interval = newReachabilityInterval(100, 149)
treeNodeA.children = []*reachabilityTreeNode{treeNodeB1, treeNodeB2}
treeNodeB2.children = []*reachabilityTreeNode{treeNodeC}
@ -686,3 +782,268 @@ func TestReachabilityTreeNodeString(t *testing.T) {
"string. Want: %s, got: %s", expectedStr, str)
}
}
func TestIsInPast(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestIsInPast", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestIsInPast: Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
// Add a chain of two blocks above the genesis. This will be the
// selected parent chain.
blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil)
// Add another block above the genesis
blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
nodeC, ok := dag.index.LookupNode(blockC.BlockHash())
if !ok {
t.Fatalf("TestIsInPast: block C is not in the block index")
}
// Add a block whose parents are the two tips
blockD := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash(), blockC.BlockHash()}, nil)
nodeD, ok := dag.index.LookupNode(blockD.BlockHash())
if !ok {
t.Fatalf("TestIsInPast: block C is not in the block index")
}
// Make sure that node C is in the past of node D
isInFuture, err := dag.reachabilityTree.isInPast(nodeC, nodeD)
if err != nil {
t.Fatalf("TestIsInPast: isInPast unexpectedly failed: %s", err)
}
if !isInFuture {
t.Fatalf("TestIsInPast: node C is unexpectedly not the past of node D")
}
}
func TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot",
true, Config{DAGParams: &dagconfig.SimnetParams})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
// Set the reindex window to a low number to make this test run fast
originalReachabilityReindexWindow := reachabilityReindexWindow
reachabilityReindexWindow = 10
defer func() {
reachabilityReindexWindow = originalReachabilityReindexWindow
}()
// Add a block on top of the genesis block
chainRootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
// Add chain of reachabilityReindexWindow blocks above chainRootBlock.
// This should move the reindex root
chainRootBlockTipHash := chainRootBlock.BlockHash()
for i := uint64(0); i < reachabilityReindexWindow; i++ {
chainBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chainRootBlockTipHash}, nil)
chainRootBlockTipHash = chainBlock.BlockHash()
}
// Add another block over genesis
PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
}
func TestUpdateReindexRoot(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestUpdateReindexRoot", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
// Set the reindex window to a low number to make this test run fast
originalReachabilityReindexWindow := reachabilityReindexWindow
reachabilityReindexWindow = 10
defer func() {
reachabilityReindexWindow = originalReachabilityReindexWindow
}()
// Add two blocks on top of the genesis block
chain1RootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
chain2RootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
// Add chain of reachabilityReindexWindow - 1 blocks above chain1RootBlock and
// chain2RootBlock, respectively. This should not move the reindex root
chain1RootBlockTipHash := chain1RootBlock.BlockHash()
chain2RootBlockTipHash := chain2RootBlock.BlockHash()
genesisTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(dag.genesis.hash)
if err != nil {
t.Fatalf("failed to get tree node: %s", err)
}
for i := uint64(0); i < reachabilityReindexWindow-1; i++ {
chain1Block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain1RootBlockTipHash}, nil)
chain1RootBlockTipHash = chain1Block.BlockHash()
chain2Block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain2RootBlockTipHash}, nil)
chain2RootBlockTipHash = chain2Block.BlockHash()
if dag.reachabilityTree.reindexRoot != genesisTreeNode {
t.Fatalf("reindex root unexpectedly moved")
}
}
// Add another block over chain1. This will move the reindex root to chain1RootBlock
PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain1RootBlockTipHash}, nil)
// Make sure that chain1RootBlock is now the reindex root
chain1RootTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(chain1RootBlock.BlockHash())
if err != nil {
t.Fatalf("failed to get tree node: %s", err)
}
if dag.reachabilityTree.reindexRoot != chain1RootTreeNode {
t.Fatalf("chain1RootBlock is not the reindex root after reindex")
}
// Make sure that tight intervals have been applied to chain2. Since
// we added reachabilityReindexWindow-1 blocks to chain2, the size
// of the interval at its root should be equal to reachabilityReindexWindow
chain2RootTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(chain2RootBlock.BlockHash())
if err != nil {
t.Fatalf("failed to get tree node: %s", err)
}
if chain2RootTreeNode.interval.size() != reachabilityReindexWindow {
t.Fatalf("got unexpected chain2RootNode interval. Want: %d, got: %d",
chain2RootTreeNode.interval.size(), reachabilityReindexWindow)
}
// Make sure that the rest of the interval has been allocated to
// chain1RootNode, minus slack from both sides
expectedChain1RootIntervalSize := genesisTreeNode.interval.size() - 1 -
chain2RootTreeNode.interval.size() - 2*reachabilityReindexSlack
if chain1RootTreeNode.interval.size() != expectedChain1RootIntervalSize {
t.Fatalf("got unexpected chain1RootNode interval. Want: %d, got: %d",
chain1RootTreeNode.interval.size(), expectedChain1RootIntervalSize)
}
}
func TestReindexIntervalsEarlierThanReindexRoot(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestReindexIntervalsEarlierThanReindexRoot", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
// Set the reindex window and slack to low numbers to make this test
// run fast
originalReachabilityReindexWindow := reachabilityReindexWindow
originalReachabilityReindexSlack := reachabilityReindexSlack
reachabilityReindexWindow = 10
reachabilityReindexSlack = 5
defer func() {
reachabilityReindexWindow = originalReachabilityReindexWindow
reachabilityReindexSlack = originalReachabilityReindexSlack
}()
// Add three children to the genesis: leftBlock, centerBlock, rightBlock
leftBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
centerBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
rightBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
// Add a chain of reachabilityReindexWindow blocks above centerBlock.
// This will move the reindex root to centerBlock
centerTipHash := centerBlock.BlockHash()
for i := uint64(0); i < reachabilityReindexWindow; i++ {
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{centerTipHash}, nil)
centerTipHash = block.BlockHash()
}
// Make sure that centerBlock is now the reindex root
centerTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(centerBlock.BlockHash())
if err != nil {
t.Fatalf("failed to get tree node: %s", err)
}
if dag.reachabilityTree.reindexRoot != centerTreeNode {
t.Fatalf("centerBlock is not the reindex root after reindex")
}
// Get the current interval for leftBlock. The reindex should have
// resulted in a tight interval there
leftTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(leftBlock.BlockHash())
if err != nil {
t.Fatalf("failed to get tree node: %s", err)
}
if leftTreeNode.interval.size() != 1 {
t.Fatalf("leftBlock interval not tight after reindex")
}
// Get the current interval for rightBlock. The reindex should have
// resulted in a tight interval there
rightTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(rightBlock.BlockHash())
if err != nil {
t.Fatalf("failed to get tree node: %s", err)
}
if rightTreeNode.interval.size() != 1 {
t.Fatalf("rightBlock interval not tight after reindex")
}
// Get the current interval for centerBlock. Its interval should be:
// genesisInterval - 1 - leftInterval - leftSlack - rightInterval - rightSlack
genesisTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(dag.genesis.hash)
if err != nil {
t.Fatalf("failed to get tree node: %s", err)
}
expectedCenterInterval := genesisTreeNode.interval.size() - 1 -
leftTreeNode.interval.size() - reachabilityReindexSlack -
rightTreeNode.interval.size() - reachabilityReindexSlack
if centerTreeNode.interval.size() != expectedCenterInterval {
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
expectedCenterInterval, centerTreeNode.interval.size())
}
// Add a chain of reachabilityReindexWindow - 1 blocks above leftBlock.
// Each addition will trigger a low-than-reindex-root reindex. We
// expect the centerInterval to shrink by 1 each time, but its child
// to remain unaffected
treeChildOfCenterBlock := centerTreeNode.children[0]
treeChildOfCenterBlockOriginalIntervalSize := treeChildOfCenterBlock.interval.size()
leftTipHash := leftBlock.BlockHash()
for i := uint64(0); i < reachabilityReindexWindow-1; i++ {
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{leftTipHash}, nil)
leftTipHash = block.BlockHash()
expectedCenterInterval--
if centerTreeNode.interval.size() != expectedCenterInterval {
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
expectedCenterInterval, centerTreeNode.interval.size())
}
if treeChildOfCenterBlock.interval.size() != treeChildOfCenterBlockOriginalIntervalSize {
t.Fatalf("the interval of centerBlock's child unexpectedly changed")
}
}
// Add a chain of reachabilityReindexWindow - 1 blocks above rightBlock.
// Each addition will trigger a low-than-reindex-root reindex. We
// expect the centerInterval to shrink by 1 each time, but its child
// to remain unaffected
rightTipHash := rightBlock.BlockHash()
for i := uint64(0); i < reachabilityReindexWindow-1; i++ {
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{rightTipHash}, nil)
rightTipHash = block.BlockHash()
expectedCenterInterval--
if centerTreeNode.interval.size() != expectedCenterInterval {
t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d",
expectedCenterInterval, centerTreeNode.interval.size())
}
if treeChildOfCenterBlock.interval.size() != treeChildOfCenterBlockOriginalIntervalSize {
t.Fatalf("the interval of centerBlock's child unexpectedly changed")
}
}
}

View File

@ -12,7 +12,7 @@ import (
type reachabilityData struct {
treeNode *reachabilityTreeNode
futureCoveringSet futureCoveringBlockSet
futureCoveringSet futureCoveringTreeNodeSet
}
type reachabilityStore struct {
@ -41,11 +41,11 @@ func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) {
store.setBlockAsDirty(node.hash)
}
func (store *reachabilityStore) setFutureCoveringSet(node *blockNode, futureCoveringSet futureCoveringBlockSet) error {
func (store *reachabilityStore) setFutureCoveringSet(node *blockNode, futureCoveringSet futureCoveringTreeNodeSet) error {
// load the reachability data from DB to store.loaded
_, exists := store.reachabilityDataByHash(node.hash)
if !exists {
return reachabilityNotFoundError(node)
return reachabilityNotFoundError(node.hash)
}
store.loaded[*node.hash].futureCoveringSet = futureCoveringSet
@ -57,22 +57,26 @@ func (store *reachabilityStore) setBlockAsDirty(blockHash *daghash.Hash) {
store.dirty[*blockHash] = struct{}{}
}
func reachabilityNotFoundError(node *blockNode) error {
return errors.Errorf("Couldn't find reachability data for block %s", node.hash)
func reachabilityNotFoundError(hash *daghash.Hash) error {
return errors.Errorf("couldn't find reachability data for block %s", hash)
}
func (store *reachabilityStore) treeNodeByBlockNode(node *blockNode) (*reachabilityTreeNode, error) {
reachabilityData, exists := store.reachabilityDataByHash(node.hash)
func (store *reachabilityStore) treeNodeByBlockHash(hash *daghash.Hash) (*reachabilityTreeNode, error) {
reachabilityData, exists := store.reachabilityDataByHash(hash)
if !exists {
return nil, reachabilityNotFoundError(node)
return nil, reachabilityNotFoundError(hash)
}
return reachabilityData.treeNode, nil
}
func (store *reachabilityStore) futureCoveringSetByBlockNode(node *blockNode) (futureCoveringBlockSet, error) {
func (store *reachabilityStore) treeNodeByBlockNode(node *blockNode) (*reachabilityTreeNode, error) {
return store.treeNodeByBlockHash(node.hash)
}
func (store *reachabilityStore) futureCoveringSetByBlockNode(node *blockNode) (futureCoveringTreeNodeSet, error) {
reachabilityData, exists := store.reachabilityDataByHash(node.hash)
if !exists {
return nil, reachabilityNotFoundError(node)
return nil, reachabilityNotFoundError(node.hash)
}
return reachabilityData.futureCoveringSet, nil
}
@ -137,7 +141,7 @@ func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) err
return err
}
hash, err := daghash.NewHash(key)
hash, err := daghash.NewHash(key.Suffix())
if err != nil {
return err
}
@ -155,7 +159,7 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
return err
}
hash, err := daghash.NewHash(key)
hash, err := daghash.NewHash(key.Suffix())
if err != nil {
return err
}
@ -176,7 +180,10 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
}
// Connect the treeNode with its blockNode
reachabilityData.treeNode.blockNode = store.dag.index.LookupNode(hash)
reachabilityData.treeNode.blockNode, ok = store.dag.index.LookupNode(hash)
if !ok {
return errors.Errorf("block %s does not exist in the DAG", hash)
}
return nil
}
@ -212,12 +219,6 @@ func (store *reachabilityStore) serializeTreeNode(w io.Writer, treeNode *reachab
return err
}
// Serialize the remaining interval
err = store.serializeReachabilityInterval(w, treeNode.remainingInterval)
if err != nil {
return err
}
// Serialize the parent
// If this is the genesis block, write the zero hash instead
parentHash := &daghash.ZeroHash
@ -262,16 +263,16 @@ func (store *reachabilityStore) serializeReachabilityInterval(w io.Writer, inter
return nil
}
func (store *reachabilityStore) serializeFutureCoveringSet(w io.Writer, futureCoveringSet futureCoveringBlockSet) error {
func (store *reachabilityStore) serializeFutureCoveringSet(w io.Writer, futureCoveringSet futureCoveringTreeNodeSet) error {
// Serialize the set size
err := wire.WriteVarInt(w, uint64(len(futureCoveringSet)))
if err != nil {
return err
}
// Serialize each block in the set
for _, block := range futureCoveringSet {
err = wire.WriteElement(w, block.blockNode.hash)
// Serialize each node in the set
for _, node := range futureCoveringSet {
err = wire.WriteElement(w, node.blockNode.hash)
if err != nil {
return err
}
@ -308,13 +309,6 @@ func (store *reachabilityStore) deserializeTreeNode(r io.Reader, destination *re
}
destination.treeNode.interval = interval
// Deserialize the remaining interval
remainingInterval, err := store.deserializeReachabilityInterval(r)
if err != nil {
return err
}
destination.treeNode.remainingInterval = remainingInterval
// Deserialize the parent
// If this is the zero hash, this node is the genesis and as such doesn't have a parent
parentHash := &daghash.Hash{}
@ -385,25 +379,18 @@ func (store *reachabilityStore) deserializeFutureCoveringSet(r io.Reader, destin
}
// Deserialize each block in the set
futureCoveringSet := make(futureCoveringBlockSet, setSize)
futureCoveringSet := make(futureCoveringTreeNodeSet, setSize)
for i := uint64(0); i < setSize; i++ {
blockHash := &daghash.Hash{}
err = wire.ReadElement(r, blockHash)
if err != nil {
return err
}
blockNode := store.dag.index.LookupNode(blockHash)
if blockNode == nil {
return errors.Errorf("blockNode not found for hash %s", blockHash)
}
blockReachabilityData, ok := store.reachabilityDataByHash(blockHash)
if !ok {
return errors.Errorf("block reachability data not found for hash: %s", blockHash)
}
futureCoveringSet[i] = &futureCoveringBlock{
blockNode: blockNode,
treeNode: blockReachabilityData.treeNode,
}
futureCoveringSet[i] = blockReachabilityData.treeNode
}
destination.futureCoveringSet = futureCoveringSet

View File

@ -179,11 +179,6 @@ func newTxValidator(utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscr
// ValidateTransactionScripts validates the scripts for the passed transaction
// using multiple goroutines.
func ValidateTransactionScripts(tx *util.Tx, utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscript.SigCache) error {
// Don't validate coinbase transaction scripts.
if tx.IsCoinBase() {
return nil
}
// Collect all of the transaction inputs and required information for
// validation.
txIns := tx.MsgTx().TxIn
@ -213,10 +208,6 @@ func checkBlockScripts(block *blockNode, utxoSet UTXOSet, transactions []*util.T
}
txValItems := make([]*txValidateItem, 0, numInputs)
for _, tx := range transactions {
// Skip coinbase transactions.
if tx.IsCoinBase() {
continue
}
for txInIdx, txIn := range tx.MsgTx().TxIn {
txVI := &txValidateItem{
txInIndex: txInIdx,

57
blockdag/sync_rate.go Normal file
View File

@ -0,0 +1,57 @@
package blockdag
import "time"
const syncRateWindowDuration = 15 * time.Minute
// addBlockProcessingTimestamp adds the last block processing timestamp in order to measure the recent sync rate.
//
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) addBlockProcessingTimestamp() {
now := time.Now()
dag.recentBlockProcessingTimestamps = append(dag.recentBlockProcessingTimestamps, now)
dag.removeNonRecentTimestampsFromRecentBlockProcessingTimestamps()
}
// removeNonRecentTimestampsFromRecentBlockProcessingTimestamps removes timestamps older than syncRateWindowDuration
// from dag.recentBlockProcessingTimestamps
//
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) removeNonRecentTimestampsFromRecentBlockProcessingTimestamps() {
dag.recentBlockProcessingTimestamps = dag.recentBlockProcessingTimestampsRelevantWindow()
}
func (dag *BlockDAG) recentBlockProcessingTimestampsRelevantWindow() []time.Time {
minTime := time.Now().Add(-syncRateWindowDuration)
windowStartIndex := len(dag.recentBlockProcessingTimestamps)
for i, processTime := range dag.recentBlockProcessingTimestamps {
if processTime.After(minTime) {
windowStartIndex = i
break
}
}
return dag.recentBlockProcessingTimestamps[windowStartIndex:]
}
// syncRate returns the rate of processed
// blocks in the last syncRateWindowDuration
// duration.
func (dag *BlockDAG) syncRate() float64 {
dag.RLock()
defer dag.RUnlock()
return float64(len(dag.recentBlockProcessingTimestampsRelevantWindow())) / syncRateWindowDuration.Seconds()
}
// IsSyncRateBelowThreshold checks whether the sync rate
// is below the expected threshold.
func (dag *BlockDAG) IsSyncRateBelowThreshold(maxDeviation float64) bool {
if dag.uptime() < syncRateWindowDuration {
return false
}
return dag.syncRate() < 1/dag.dagParams.TargetTimePerBlock.Seconds()*maxDeviation
}
func (dag *BlockDAG) uptime() time.Duration {
return time.Now().Sub(dag.startTime)
}

View File

@ -5,9 +5,11 @@ package blockdag
import (
"compress/bzip2"
"encoding/binary"
"github.com/kaspanet/kaspad/database/ffldb/ldb"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"io"
"io/ioutil"
"os"
@ -15,6 +17,7 @@ import (
"sort"
"strings"
"sync"
"testing"
"github.com/kaspanet/kaspad/util/subnetworkid"
@ -61,6 +64,15 @@ func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), err
return nil, nil, errors.Errorf("error creating temp dir: %s", err)
}
// We set ldb.Options here to return nil because normally
// the database is initialized with very large caches that
// can make opening/closing the database for every test
// quite heavy.
originalLDBOptions := ldb.Options
ldb.Options = func() *opt.Options {
return nil
}
dbPath := filepath.Join(tmpDir, dbName)
_ = os.RemoveAll(dbPath)
err = dbaccess.Open(dbPath)
@ -74,6 +86,7 @@ func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), err
spawnWaitGroup.Wait()
spawn = realSpawn
dbaccess.Close()
ldb.Options = originalLDBOptions
os.RemoveAll(dbPath)
}
} else {
@ -145,8 +158,8 @@ func SetVirtualForTest(dag *BlockDAG, virtual VirtualForTest) VirtualForTest {
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (VirtualForTest, error) {
parents := newBlockSet()
for _, hash := range parentHashes {
parent := dag.index.LookupNode(hash)
if parent == nil {
parent, ok := dag.index.LookupNode(hash)
if !ok {
return nil, errors.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash)
}
parents.add(parent)
@ -279,6 +292,28 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
return block, nil
}
// PrepareAndProcessBlockForTest prepares a block that points to the given parent
// hashes and process it.
func PrepareAndProcessBlockForTest(t *testing.T, dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*wire.MsgTx) *wire.MsgBlock {
daghash.Sort(parentHashes)
block, err := PrepareBlockForTest(dag, parentHashes, transactions)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
utilBlock := util.NewBlock(block)
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("unexpected error in ProcessBlock: %s", err)
}
if isDelayed {
t.Fatalf("block is too far in the future")
}
if isOrphan {
t.Fatalf("block was unexpectedly orphan")
}
return block
}
// generateDeterministicExtraNonceForTest returns a unique deterministic extra nonce for coinbase data, in order to create unique coinbase transactions.
func generateDeterministicExtraNonceForTest() uint64 {
extraNonceForTest++

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -8,6 +8,7 @@ import (
"fmt"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
// ThresholdState define the various threshold states used when voting on
@ -177,9 +178,9 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
var ok bool
state, ok = cache.Lookup(prevNode.hash)
if !ok {
return ThresholdFailed, AssertError(fmt.Sprintf(
return ThresholdFailed, errors.Errorf(
"thresholdState: cache lookup failed for %s",
prevNode.hash))
prevNode.hash)
}
}
@ -297,7 +298,7 @@ func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
return ThresholdFailed, DeploymentError(deploymentID)
return ThresholdFailed, errors.Errorf("deployment ID %d does not exist", deploymentID)
}
deployment := &dag.dagParams.Deployments[deploymentID]
@ -335,8 +336,8 @@ func (dag *BlockDAG) initThresholdCaches() error {
}
// No warnings about unknown rules or versions until the DAG is
// current.
if dag.isCurrent() {
// synced.
if dag.isSynced() {
// Warn if a high enough percentage of the last blocks have
// unexpected versions.
bestNode := dag.selectedTip()

View File

@ -14,16 +14,16 @@ type blockUTXODiffData struct {
type utxoDiffStore struct {
dag *BlockDAG
dirty map[daghash.Hash]struct{}
loaded map[daghash.Hash]*blockUTXODiffData
dirty map[*blockNode]struct{}
loaded map[*blockNode]*blockUTXODiffData
mtx *locks.PriorityMutex
}
func newUTXODiffStore(dag *BlockDAG) *utxoDiffStore {
return &utxoDiffStore{
dag: dag,
dirty: make(map[daghash.Hash]struct{}),
loaded: make(map[daghash.Hash]*blockUTXODiffData),
dirty: make(map[*blockNode]struct{}),
loaded: make(map[*blockNode]*blockUTXODiffData),
mtx: locks.NewPriorityMutex(),
}
}
@ -32,15 +32,15 @@ func (diffStore *utxoDiffStore) setBlockDiff(node *blockNode, diff *UTXODiff) er
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
// load the diff data from DB to diffStore.loaded
_, err := diffStore.diffDataByHash(node.hash)
_, err := diffStore.diffDataByBlockNode(node)
if dbaccess.IsNotFoundError(err) {
diffStore.loaded[*node.hash] = &blockUTXODiffData{}
diffStore.loaded[node] = &blockUTXODiffData{}
} else if err != nil {
return err
}
diffStore.loaded[*node.hash].diff = diff
diffStore.setBlockAsDirty(node.hash)
diffStore.loaded[node].diff = diff
diffStore.setBlockAsDirty(node)
return nil
}
@ -48,19 +48,19 @@ func (diffStore *utxoDiffStore) setBlockDiffChild(node *blockNode, diffChild *bl
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
// load the diff data from DB to diffStore.loaded
_, err := diffStore.diffDataByHash(node.hash)
_, err := diffStore.diffDataByBlockNode(node)
if err != nil {
return err
}
diffStore.loaded[*node.hash].diffChild = diffChild
diffStore.setBlockAsDirty(node.hash)
diffStore.loaded[node].diffChild = diffChild
diffStore.setBlockAsDirty(node)
return nil
}
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context, blockHashes []*daghash.Hash) error {
for _, hash := range blockHashes {
err := diffStore.removeBlockDiffData(dbContext, hash)
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context, nodes []*blockNode) error {
for _, node := range nodes {
err := diffStore.removeBlockDiffData(dbContext, node)
if err != nil {
return err
}
@ -68,37 +68,37 @@ func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context,
return nil
}
func (diffStore *utxoDiffStore) removeBlockDiffData(dbContext dbaccess.Context, blockHash *daghash.Hash) error {
func (diffStore *utxoDiffStore) removeBlockDiffData(dbContext dbaccess.Context, node *blockNode) error {
diffStore.mtx.LowPriorityWriteLock()
defer diffStore.mtx.LowPriorityWriteUnlock()
delete(diffStore.loaded, *blockHash)
err := dbaccess.RemoveDiffData(dbContext, blockHash)
delete(diffStore.loaded, node)
err := dbaccess.RemoveDiffData(dbContext, node.hash)
if err != nil {
return err
}
return nil
}
func (diffStore *utxoDiffStore) setBlockAsDirty(blockHash *daghash.Hash) {
diffStore.dirty[*blockHash] = struct{}{}
func (diffStore *utxoDiffStore) setBlockAsDirty(node *blockNode) {
diffStore.dirty[node] = struct{}{}
}
func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, error) {
if diffData, ok := diffStore.loaded[*hash]; ok {
func (diffStore *utxoDiffStore) diffDataByBlockNode(node *blockNode) (*blockUTXODiffData, error) {
if diffData, ok := diffStore.loaded[node]; ok {
return diffData, nil
}
diffData, err := diffStore.diffDataFromDB(hash)
diffData, err := diffStore.diffDataFromDB(node.hash)
if err != nil {
return nil, err
}
diffStore.loaded[*hash] = diffData
diffStore.loaded[node] = diffData
return diffData, nil
}
func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) {
diffStore.mtx.HighPriorityReadLock()
defer diffStore.mtx.HighPriorityReadUnlock()
diffData, err := diffStore.diffDataByHash(node.hash)
diffData, err := diffStore.diffDataByBlockNode(node)
if err != nil {
return nil, err
}
@ -108,7 +108,7 @@ func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) {
func (diffStore *utxoDiffStore) diffChildByNode(node *blockNode) (*blockNode, error) {
diffStore.mtx.HighPriorityReadLock()
defer diffStore.mtx.HighPriorityReadUnlock()
diffData, err := diffStore.diffDataByHash(node.hash)
diffData, err := diffStore.diffDataByBlockNode(node)
if err != nil {
return nil, err
}
@ -135,11 +135,10 @@ func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error {
// Allocate a buffer here to avoid needless allocations/grows
// while writing each entry.
buffer := &bytes.Buffer{}
for hash := range diffStore.dirty {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
for node := range diffStore.dirty {
buffer.Reset()
diffData := diffStore.loaded[hash]
err := storeDiffData(dbContext, buffer, &hash, diffData)
diffData := diffStore.loaded[node]
err := storeDiffData(dbContext, buffer, node.hash, diffData)
if err != nil {
return err
}
@ -148,7 +147,39 @@ func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error {
}
func (diffStore *utxoDiffStore) clearDirtyEntries() {
diffStore.dirty = make(map[daghash.Hash]struct{})
diffStore.dirty = make(map[*blockNode]struct{})
}
// maxBlueScoreDifferenceToKeepLoaded is the maximum difference
// between the virtual's blueScore and a blockNode's blueScore
// under which to keep diff data loaded in memory.
var maxBlueScoreDifferenceToKeepLoaded uint64 = 100
// clearOldEntries removes entries whose blue score is lower than
// virtual.blueScore - maxBlueScoreDifferenceToKeepLoaded. Note
// that tips are not removed either even if their blue score is
// lower than the above.
func (diffStore *utxoDiffStore) clearOldEntries() {
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
virtualBlueScore := diffStore.dag.VirtualBlueScore()
minBlueScore := virtualBlueScore - maxBlueScoreDifferenceToKeepLoaded
if maxBlueScoreDifferenceToKeepLoaded > virtualBlueScore {
minBlueScore = 0
}
tips := diffStore.dag.virtual.tips()
toRemove := make(map[*blockNode]struct{})
for node := range diffStore.loaded {
if node.blueScore < minBlueScore && !tips.contains(node) {
toRemove[node] = struct{}{}
}
}
for node := range toRemove {
delete(diffStore.loaded, node)
}
}
// storeDiffData stores the UTXO diff data to the database.
@ -156,7 +187,7 @@ func (diffStore *utxoDiffStore) clearDirtyEntries() {
func storeDiffData(dbContext dbaccess.Context, w *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
// To avoid a ton of allocs, use the io.Writer
// instead of allocating one. We expect the buffer to
// already be initalized and, in most cases, to already
// already be initialized and, in most cases, to already
// be large enough to accommodate the serialized data
// without growing.
err := serializeBlockUTXODiffData(w, diffData)

View File

@ -1,12 +1,13 @@
package blockdag
import (
"reflect"
"testing"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"reflect"
"testing"
)
func TestUTXODiffStore(t *testing.T) {
@ -78,7 +79,7 @@ func TestUTXODiffStore(t *testing.T) {
if err != nil {
t.Fatalf("Failed to commit database transaction: %s", err)
}
delete(dag.utxoDiffStore.loaded, *node.hash)
delete(dag.utxoDiffStore.loaded, node)
if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil {
t.Fatalf("diffByNode: unexpected error: %s", err)
@ -87,9 +88,80 @@ func TestUTXODiffStore(t *testing.T) {
}
// Check if getBlockDiff caches the result in dag.utxoDiffStore.loaded
if loadedDiffData, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok {
if loadedDiffData, ok := dag.utxoDiffStore.loaded[node]; !ok {
t.Errorf("the diff data wasn't added to loaded map after requesting it")
} else if !reflect.DeepEqual(loadedDiffData.diff, diff) {
t.Errorf("Expected diff and loadedDiff to be equal")
}
}
func TestClearOldEntries(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestClearOldEntries", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestClearOldEntries: Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
// Set maxBlueScoreDifferenceToKeepLoaded to 10 to make this test fast to run
currentDifference := maxBlueScoreDifferenceToKeepLoaded
maxBlueScoreDifferenceToKeepLoaded = 10
defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }()
// Add 10 blocks
blockNodes := make([]*blockNode, 10)
for i := 0; i < 10; i++ {
processedBlock := PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
node, ok := dag.index.LookupNode(processedBlock.BlockHash())
if !ok {
t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash())
}
blockNodes[i] = node
}
// Make sure that all of them exist in the loaded set
for _, node := range blockNodes {
_, ok := dag.utxoDiffStore.loaded[node]
if !ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash)
}
}
// Add 10 more blocks on top of the others
for i := 0; i < 10; i++ {
PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
}
// Make sure that all the old nodes no longer exist in the loaded set
for _, node := range blockNodes {
_, ok := dag.utxoDiffStore.loaded[node]
if ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
}
}
// Add a block on top of the genesis to force the retrieval of all diffData
processedBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
node, ok := dag.index.LookupNode(processedBlock.BlockHash())
if !ok {
t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash())
}
// Make sure that the child-of-genesis node is in the loaded set, since it
// is a tip.
_, ok = dag.utxoDiffStore.loaded[node]
if !ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash)
}
// Make sure that all the old nodes still do not exist in the loaded set
for _, node := range blockNodes {
_, ok := dag.utxoDiffStore.loaded[node]
if ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
}
}
}

View File

@ -36,20 +36,6 @@ func serializeBlockUTXODiffData(w io.Writer, diffData *blockUTXODiffData) error
return nil
}
// utxoEntryHeaderCode returns the calculated header code to be used when
// serializing the provided utxo entry.
func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
// As described in the serialization format comments, the header code
// encodes the blue score shifted over one bit and the block reward flag
// in the lowest bit.
headerCode := uint64(entry.BlockBlueScore()) << 1
if entry.IsCoinbase() {
headerCode |= 0x01
}
return headerCode
}
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffData []byte) (*blockUTXODiffData, error) {
diffData := &blockUTXODiffData{}
r := bytes.NewBuffer(serializedDiffData)
@ -66,7 +52,12 @@ func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffData
if err != nil {
return nil, err
}
diffData.diffChild = diffStore.dag.index.LookupNode(hash)
var ok bool
diffData.diffChild, ok = diffStore.dag.index.LookupNode(hash)
if !ok {
return nil, errors.Errorf("block %s does not exist in the DAG", hash)
}
}
diffData.diff, err = deserializeUTXODiff(r)
@ -177,10 +168,14 @@ var p2pkhUTXOEntrySerializeSize = 8 + 8 + wire.VarIntSerializeSize(25) + 25
// serializeUTXOEntry encodes the entry to the given io.Writer and use compression if useCompression is true.
// The compression format is described in detail above.
func serializeUTXOEntry(w io.Writer, entry *UTXOEntry) error {
// Encode the header code.
headerCode := utxoEntryHeaderCode(entry)
// Encode the blueScore.
err := binaryserializer.PutUint64(w, byteOrder, entry.blockBlueScore)
if err != nil {
return err
}
err := binaryserializer.PutUint64(w, byteOrder, headerCode)
// Encode the packedFlags.
err = binaryserializer.PutUint8(w, uint8(entry.packedFlags))
if err != nil {
return err
}
@ -208,26 +203,21 @@ func serializeUTXOEntry(w io.Writer, entry *UTXOEntry) error {
// the entry according to the format that is described in detail
// above.
func deserializeUTXOEntry(r io.Reader) (*UTXOEntry, error) {
// Deserialize the header code.
headerCode, err := binaryserializer.Uint64(r, byteOrder)
// Deserialize the blueScore.
blockBlueScore, err := binaryserializer.Uint64(r, byteOrder)
if err != nil {
return nil, err
}
// Decode the header code.
//
// Bit 0 indicates whether the containing transaction is a coinbase.
// Bits 1-x encode blue score of the containing transaction.
isCoinbase := headerCode&0x01 != 0
blockBlueScore := headerCode >> 1
// Decode the packedFlags.
packedFlags, err := binaryserializer.Uint8(r)
if err != nil {
return nil, err
}
entry := &UTXOEntry{
blockBlueScore: blockBlueScore,
packedFlags: 0,
}
if isCoinbase {
entry.packedFlags |= tfCoinbase
packedFlags: txoFlags(packedFlags),
}
entry.amount, err = binaryserializer.Uint64(r, byteOrder)

View File

@ -293,8 +293,8 @@ func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error {
}
if d.toRemove.contains(outpoint) {
// If already exists - this is an error
return ruleError(ErrWithDiff, fmt.Sprintf(
"withDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint))
return errors.Errorf(
"withDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint)
}
// If not exists neither in toAdd nor in toRemove - add to toRemove
@ -305,9 +305,9 @@ func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error {
if d.toRemove.containsWithBlueScore(outpoint, entryToAdd.blockBlueScore) {
// If already exists in toRemove with the same blueScore - remove from toRemove
if d.toAdd.contains(outpoint) && !diff.toRemove.contains(outpoint) {
return ruleError(ErrWithDiff, fmt.Sprintf(
return errors.Errorf(
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd with no "+
"corresponding entry in diff.toRemove", outpoint))
"corresponding entry in diff.toRemove", outpoint)
}
d.toRemove.remove(outpoint)
continue
@ -316,8 +316,8 @@ func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error {
(existingEntry.blockBlueScore == entryToAdd.blockBlueScore ||
!diff.toRemove.containsWithBlueScore(outpoint, existingEntry.blockBlueScore)) {
// If already exists - this is an error
return ruleError(ErrWithDiff, fmt.Sprintf(
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint))
return errors.Errorf(
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint)
}
// If not exists neither in toAdd nor in toRemove, or exists in toRemove with different blueScore - add to toAdd
@ -399,77 +399,11 @@ type UTXOSet interface {
fmt.Stringer
diffFrom(other UTXOSet) (*UTXODiff, error)
WithDiff(utxoDiff *UTXODiff) (UTXOSet, error)
diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error)
diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error)
AddTx(tx *wire.MsgTx, blockBlueScore uint64) (ok bool, err error)
clone() UTXOSet
Get(outpoint wire.Outpoint) (*UTXOEntry, bool)
}
// diffFromTx is a common implementation for diffFromTx, that works
// for both diff-based and full UTXO sets
// Returns a diff that is equivalent to provided transaction,
// or an error if provided transaction is not valid in the context of this UTXOSet
func diffFromTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
diff := NewUTXODiff()
isCoinbase := tx.IsCoinBase()
if !isCoinbase {
for _, txIn := range tx.TxIn {
if entry, ok := u.Get(txIn.PreviousOutpoint); ok {
err := diff.RemoveEntry(txIn.PreviousOutpoint, entry)
if err != nil {
return nil, err
}
} else {
return nil, ruleError(ErrMissingTxOut, fmt.Sprintf(
"Transaction %s is invalid because spends outpoint %s that is not in utxo set",
tx.TxID(), txIn.PreviousOutpoint))
}
}
}
for i, txOut := range tx.TxOut {
entry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore)
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
err := diff.AddEntry(outpoint, entry)
if err != nil {
return nil, err
}
}
return diff, nil
}
// diffFromAcceptedTx is a common implementation for diffFromAcceptedTx, that works
// for both diff-based and full UTXO sets.
// Returns a diff that replaces an entry's blockBlueScore with the given acceptingBlueScore.
// Returns an error if the provided transaction's entry is not valid in the context
// of this UTXOSet.
func diffFromAcceptedTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
diff := NewUTXODiff()
isCoinbase := tx.IsCoinBase()
for i, txOut := range tx.TxOut {
// Fetch any unaccepted transaction
existingOutpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
existingEntry, ok := u.Get(existingOutpoint)
if !ok {
return nil, errors.Errorf("cannot accept outpoint %s because it doesn't exist in the given UTXO", existingOutpoint)
}
// Remove unaccepted entries
err := diff.RemoveEntry(existingOutpoint, existingEntry)
if err != nil {
return nil, err
}
// Add new entries with their accepting blue score
newEntry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore)
err = diff.AddEntry(existingOutpoint, newEntry)
if err != nil {
return nil, err
}
}
return diff, nil
}
// FullUTXOSet represents a full list of transaction outputs and their values
type FullUTXOSet struct {
utxoCollection
@ -523,17 +457,15 @@ func (fus *FullUTXOSet) WithDiff(other *UTXODiff) (UTXOSet, error) {
//
// This function MUST be called with the DAG lock held.
func (fus *FullUTXOSet) AddTx(tx *wire.MsgTx, blueScore uint64) (isAccepted bool, err error) {
isCoinbase := tx.IsCoinBase()
if !isCoinbase {
if !fus.containsInputs(tx) {
return false, nil
}
for _, txIn := range tx.TxIn {
fus.remove(txIn.PreviousOutpoint)
}
if !fus.containsInputs(tx) {
return false, nil
}
for _, txIn := range tx.TxIn {
fus.remove(txIn.PreviousOutpoint)
}
isCoinbase := tx.IsCoinBase()
for i, txOut := range tx.TxOut {
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
entry := NewUTXOEntry(txOut, isCoinbase, blueScore)
@ -543,12 +475,6 @@ func (fus *FullUTXOSet) AddTx(tx *wire.MsgTx, blueScore uint64) (isAccepted bool
return true, nil
}
// diffFromTx returns a diff that is equivalent to provided transaction,
// or an error if provided transaction is not valid in the context of this UTXOSet
func (fus *FullUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromTx(fus, tx, acceptingBlueScore)
}
func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool {
for _, txIn := range tx.TxIn {
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
@ -560,10 +486,6 @@ func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool {
return true
}
func (fus *FullUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromAcceptedTx(fus, tx, acceptingBlueScore)
}
// clone returns a clone of this utxoSet
func (fus *FullUTXOSet) clone() UTXOSet {
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone()}
@ -619,12 +541,11 @@ func (dus *DiffUTXOSet) WithDiff(other *UTXODiff) (UTXOSet, error) {
// If dus.UTXODiff.useMultiset is true, this function MUST be
// called with the DAG lock held.
func (dus *DiffUTXOSet) AddTx(tx *wire.MsgTx, blockBlueScore uint64) (bool, error) {
isCoinbase := tx.IsCoinBase()
if !isCoinbase && !dus.containsInputs(tx) {
if !dus.containsInputs(tx) {
return false, nil
}
err := dus.appendTx(tx, blockBlueScore, isCoinbase)
err := dus.appendTx(tx, blockBlueScore)
if err != nil {
return false, err
}
@ -632,20 +553,19 @@ func (dus *DiffUTXOSet) AddTx(tx *wire.MsgTx, blockBlueScore uint64) (bool, erro
return true, nil
}
func (dus *DiffUTXOSet) appendTx(tx *wire.MsgTx, blockBlueScore uint64, isCoinbase bool) error {
if !isCoinbase {
for _, txIn := range tx.TxIn {
entry, ok := dus.Get(txIn.PreviousOutpoint)
if !ok {
return errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
}
err := dus.UTXODiff.RemoveEntry(txIn.PreviousOutpoint, entry)
if err != nil {
return err
}
func (dus *DiffUTXOSet) appendTx(tx *wire.MsgTx, blockBlueScore uint64) error {
for _, txIn := range tx.TxIn {
entry, ok := dus.Get(txIn.PreviousOutpoint)
if !ok {
return errors.Errorf("couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
}
err := dus.UTXODiff.RemoveEntry(txIn.PreviousOutpoint, entry)
if err != nil {
return err
}
}
isCoinbase := tx.IsCoinBase()
for i, txOut := range tx.TxOut {
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
entry := NewUTXOEntry(txOut, isCoinbase, blockBlueScore)
@ -689,16 +609,6 @@ func (dus *DiffUTXOSet) meldToBase() error {
return nil
}
// diffFromTx returns a diff that is equivalent to provided transaction,
// or an error if provided transaction is not valid in the context of this UTXOSet
func (dus *DiffUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromTx(dus, tx, acceptingBlueScore)
}
func (dus *DiffUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromAcceptedTx(dus, tx, acceptingBlueScore)
}
func (dus *DiffUTXOSet) String() string {
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove)
}
@ -708,6 +618,12 @@ func (dus *DiffUTXOSet) clone() UTXOSet {
return NewDiffUTXOSet(dus.base.clone().(*FullUTXOSet), dus.UTXODiff.clone())
}
// cloneWithoutBase returns a *DiffUTXOSet with same
// base as this *DiffUTXOSet and a cloned diff.
func (dus *DiffUTXOSet) cloneWithoutBase() UTXOSet {
return NewDiffUTXOSet(dus.base, dus.UTXODiff.clone())
}
// Get returns the UTXOEntry associated with provided outpoint in this UTXOSet.
// Returns false in second output if this UTXOEntry was not found
func (dus *DiffUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {

View File

@ -1,7 +1,6 @@
package blockdag
import (
"math"
"reflect"
"testing"
@ -947,12 +946,9 @@ func TestUTXOSetDiffRules(t *testing.T) {
// TestDiffUTXOSet_addTx makes sure that diffUTXOSet addTx works as expected
func TestDiffUTXOSet_addTx(t *testing.T) {
// coinbaseTX is coinbase. As such, it has exactly one input with hash zero and MaxUInt32 index
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: wire.Outpoint{TxID: *txID0, Index: math.MaxUint32}, Sequence: 0}
txOut0 := &wire.TxOut{ScriptPubKey: []byte{0}, Value: 10}
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
coinbaseTX := wire.NewSubnetworkMsgTx(1, []*wire.TxIn{txIn0}, []*wire.TxOut{txOut0}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
coinbaseTX := wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, []*wire.TxOut{txOut0}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
// transaction1 spends coinbaseTX
id1 := coinbaseTX.TxID()
@ -1110,81 +1106,6 @@ testLoop:
}
}
func TestDiffFromTx(t *testing.T) {
fus := &FullUTXOSet{
utxoCollection: utxoCollection{},
}
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: wire.Outpoint{TxID: *txID0, Index: math.MaxUint32}, Sequence: 0}
txOut0 := &wire.TxOut{ScriptPubKey: []byte{0}, Value: 10}
cbTx := wire.NewSubnetworkMsgTx(1, []*wire.TxIn{txIn0}, []*wire.TxOut{txOut0}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
if isAccepted, err := fus.AddTx(cbTx, 1); err != nil {
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
} else if !isAccepted {
t.Fatalf("AddTx unexpectedly didn't add tx %s", cbTx.TxID())
}
acceptingBlueScore := uint64(2)
cbOutpoint := wire.Outpoint{TxID: *cbTx.TxID(), Index: 0}
txIns := []*wire.TxIn{{
PreviousOutpoint: cbOutpoint,
SignatureScript: nil,
Sequence: wire.MaxTxInSequenceNum,
}}
txOuts := []*wire.TxOut{{
ScriptPubKey: OpTrueScript,
Value: uint64(1),
}}
tx := wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
diff, err := fus.diffFromTx(tx, acceptingBlueScore)
if err != nil {
t.Errorf("diffFromTx: %v", err)
}
if !reflect.DeepEqual(diff.toAdd, utxoCollection{
wire.Outpoint{TxID: *tx.TxID(), Index: 0}: NewUTXOEntry(tx.TxOut[0], false, 2),
}) {
t.Errorf("diff.toAdd doesn't have the expected values")
}
if !reflect.DeepEqual(diff.toRemove, utxoCollection{
wire.Outpoint{TxID: *cbTx.TxID(), Index: 0}: NewUTXOEntry(cbTx.TxOut[0], true, 1),
}) {
t.Errorf("diff.toRemove doesn't have the expected values")
}
//Test that we get an error if we don't have the outpoint inside the utxo set
invalidTxIns := []*wire.TxIn{{
PreviousOutpoint: wire.Outpoint{TxID: daghash.TxID{}, Index: 0},
SignatureScript: nil,
Sequence: wire.MaxTxInSequenceNum,
}}
invalidTxOuts := []*wire.TxOut{{
ScriptPubKey: OpTrueScript,
Value: uint64(1),
}}
invalidTx := wire.NewNativeMsgTx(wire.TxVersion, invalidTxIns, invalidTxOuts)
_, err = fus.diffFromTx(invalidTx, acceptingBlueScore)
if err == nil {
t.Errorf("diffFromTx: expected an error but got <nil>")
}
//Test that we get an error if the outpoint is inside diffUTXOSet's toRemove
diff2 := &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
}
dus := NewDiffUTXOSet(fus, diff2)
if isAccepted, err := dus.AddTx(tx, 2); err != nil {
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
} else if !isAccepted {
t.Fatalf("AddTx unexpectedly didn't add tx %s", tx.TxID())
}
_, err = dus.diffFromTx(tx, acceptingBlueScore)
if err == nil {
t.Errorf("diffFromTx: expected an error but got <nil>")
}
}
// collection returns a collection of all UTXOs in this set
func (fus *FullUTXOSet) collection() utxoCollection {
return fus.utxoCollection.clone()

View File

@ -400,10 +400,11 @@ func CalcTxMass(tx *util.Tx, previousScriptPubKeys [][]byte) uint64 {
//
// The flags do not modify the behavior of this function directly, however they
// are needed to pass along to checkProofOfWork.
func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags BehaviorFlags) (delay time.Duration, err error) {
func (dag *BlockDAG) checkBlockHeaderSanity(block *util.Block, flags BehaviorFlags) (delay time.Duration, err error) {
// Ensure the proof of work bits in the block header is in min/max range
// and the block hash is less than the target value described by the
// bits.
header := &block.MsgBlock().Header
err = dag.checkProofOfWork(header, flags)
if err != nil {
return 0, err
@ -465,87 +466,182 @@ func checkBlockParentsOrder(header *wire.BlockHeader) error {
// The flags do not modify the behavior of this function directly, however they
// are needed to pass along to checkBlockHeaderSanity.
func (dag *BlockDAG) checkBlockSanity(block *util.Block, flags BehaviorFlags) (time.Duration, error) {
msgBlock := block.MsgBlock()
header := &msgBlock.Header
delay, err := dag.checkBlockHeaderSanity(header, flags)
delay, err := dag.checkBlockHeaderSanity(block, flags)
if err != nil {
return 0, err
}
err = dag.checkBlockContainsAtLeastOneTransaction(block)
if err != nil {
return 0, err
}
err = dag.checkBlockContainsLessThanMaxBlockMassTransactions(block)
if err != nil {
return 0, err
}
err = dag.checkFirstBlockTransactionIsCoinbase(block)
if err != nil {
return 0, err
}
err = dag.checkBlockContainsOnlyOneCoinbase(block)
if err != nil {
return 0, err
}
err = dag.checkBlockTransactionOrder(block)
if err != nil {
return 0, err
}
err = dag.checkNoNonNativeTransactions(block)
if err != nil {
return 0, err
}
err = dag.checkBlockTransactionSanity(block)
if err != nil {
return 0, err
}
err = dag.checkBlockHashMerkleRoot(block)
if err != nil {
return 0, err
}
// A block must have at least one transaction.
numTx := len(msgBlock.Transactions)
if numTx == 0 {
return 0, ruleError(ErrNoTransactions, "block does not contain "+
"any transactions")
// The following check will be fairly quick since the transaction IDs
// are already cached due to building the merkle tree above.
err = dag.checkBlockDuplicateTransactions(block)
if err != nil {
return 0, err
}
err = dag.checkBlockDoubleSpends(block)
if err != nil {
return 0, err
}
return delay, nil
}
func (dag *BlockDAG) checkBlockContainsAtLeastOneTransaction(block *util.Block) error {
transactions := block.Transactions()
numTx := len(transactions)
if numTx == 0 {
return ruleError(ErrNoTransactions, "block does not contain "+
"any transactions")
}
return nil
}
func (dag *BlockDAG) checkBlockContainsLessThanMaxBlockMassTransactions(block *util.Block) error {
// A block must not have more transactions than the max block mass or
// else it is certainly over the block mass limit.
transactions := block.Transactions()
numTx := len(transactions)
if numTx > wire.MaxMassPerBlock {
str := fmt.Sprintf("block contains too many transactions - "+
"got %d, max %d", numTx, wire.MaxMassPerBlock)
return 0, ruleError(ErrBlockMassTooHigh, str)
return ruleError(ErrBlockMassTooHigh, str)
}
return nil
}
// The first transaction in a block must be a coinbase.
func (dag *BlockDAG) checkFirstBlockTransactionIsCoinbase(block *util.Block) error {
transactions := block.Transactions()
if !transactions[util.CoinbaseTransactionIndex].IsCoinBase() {
return 0, ruleError(ErrFirstTxNotCoinbase, "first transaction in "+
return ruleError(ErrFirstTxNotCoinbase, "first transaction in "+
"block is not a coinbase")
}
return nil
}
txOffset := util.CoinbaseTransactionIndex + 1
// A block must not have more than one coinbase. And transactions must be
// ordered by subnetwork
for i, tx := range transactions[txOffset:] {
func (dag *BlockDAG) checkBlockContainsOnlyOneCoinbase(block *util.Block) error {
transactions := block.Transactions()
for i, tx := range transactions[util.CoinbaseTransactionIndex+1:] {
if tx.IsCoinBase() {
str := fmt.Sprintf("block contains second coinbase at "+
"index %d", i+2)
return 0, ruleError(ErrMultipleCoinbases, str)
}
if i != 0 && subnetworkid.Less(&tx.MsgTx().SubnetworkID, &transactions[i].MsgTx().SubnetworkID) {
return 0, ruleError(ErrTransactionsNotSorted, "transactions must be sorted by subnetwork")
return ruleError(ErrMultipleCoinbases, str)
}
}
return nil
}
// Do some preliminary checks on each transaction to ensure they are
// sane before continuing.
func (dag *BlockDAG) checkBlockTransactionOrder(block *util.Block) error {
transactions := block.Transactions()
for i, tx := range transactions[util.CoinbaseTransactionIndex+1:] {
if i != 0 && subnetworkid.Less(&tx.MsgTx().SubnetworkID, &transactions[i].MsgTx().SubnetworkID) {
return ruleError(ErrTransactionsNotSorted, "transactions must be sorted by subnetwork")
}
}
return nil
}
func (dag *BlockDAG) checkNoNonNativeTransactions(block *util.Block) error {
// Disallow non-native/coinbase subnetworks in networks that don't allow them
if !dag.dagParams.EnableNonNativeSubnetworks {
transactions := block.Transactions()
for _, tx := range transactions {
if !(tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) ||
tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)) {
return ruleError(ErrInvalidSubnetwork, "non-native/coinbase subnetworks are not allowed")
}
}
}
return nil
}
func (dag *BlockDAG) checkBlockTransactionSanity(block *util.Block) error {
transactions := block.Transactions()
for _, tx := range transactions {
err := CheckTransactionSanity(tx, dag.subnetworkID)
if err != nil {
return 0, err
return err
}
}
return nil
}
func (dag *BlockDAG) checkBlockHashMerkleRoot(block *util.Block) error {
// Build merkle tree and ensure the calculated merkle root matches the
// entry in the block header. This also has the effect of caching all
// of the transaction hashes in the block to speed up future hash
// checks.
hashMerkleTree := BuildHashMerkleTreeStore(block.Transactions())
calculatedHashMerkleRoot := hashMerkleTree.Root()
if !header.HashMerkleRoot.IsEqual(calculatedHashMerkleRoot) {
if !block.MsgBlock().Header.HashMerkleRoot.IsEqual(calculatedHashMerkleRoot) {
str := fmt.Sprintf("block hash merkle root is invalid - block "+
"header indicates %s, but calculated value is %s",
header.HashMerkleRoot, calculatedHashMerkleRoot)
return 0, ruleError(ErrBadMerkleRoot, str)
block.MsgBlock().Header.HashMerkleRoot, calculatedHashMerkleRoot)
return ruleError(ErrBadMerkleRoot, str)
}
return nil
}
// Check for duplicate transactions. This check will be fairly quick
// since the transaction IDs are already cached due to building the
// merkle tree above.
func (dag *BlockDAG) checkBlockDuplicateTransactions(block *util.Block) error {
existingTxIDs := make(map[daghash.TxID]struct{})
transactions := block.Transactions()
for _, tx := range transactions {
id := tx.ID()
if _, exists := existingTxIDs[*id]; exists {
str := fmt.Sprintf("block contains duplicate "+
"transaction %s", id)
return 0, ruleError(ErrDuplicateTx, str)
return ruleError(ErrDuplicateTx, str)
}
existingTxIDs[*id] = struct{}{}
}
return nil
}
return delay, nil
func (dag *BlockDAG) checkBlockDoubleSpends(block *util.Block) error {
usedOutpoints := make(map[wire.Outpoint]*daghash.TxID)
transactions := block.Transactions()
for _, tx := range transactions {
for _, txIn := range tx.MsgTx().TxIn {
if spendingTxID, exists := usedOutpoints[txIn.PreviousOutpoint]; exists {
str := fmt.Sprintf("transaction %s spends "+
"outpoint %s that was already spent by "+
"transaction %s in this block", tx.ID(), txIn.PreviousOutpoint, spendingTxID)
return ruleError(ErrDoubleSpendInSameBlock, str)
}
usedOutpoints[txIn.PreviousOutpoint] = tx.ID()
}
}
return nil
}
// checkBlockHeaderContext performs several validation checks on the block header
@ -602,7 +698,7 @@ func (dag *BlockDAG) validateParents(blockHeader *wire.BlockHeader, parents bloc
for parentA := range parents {
// isFinalized might be false-negative because node finality status is
// updated in a separate goroutine. This is why later the block is
// checked more thoroughly on the finality rules in dag.checkFinalityRules.
// checked more thoroughly on the finality rules in dag.checkFinalityViolation.
if parentA.isFinalized {
return ruleError(ErrFinality, fmt.Sprintf("block %s is a finalized "+
"parent of block %s", parentA.hash, blockHeader.BlockHash()))
@ -613,7 +709,7 @@ func (dag *BlockDAG) validateParents(blockHeader *wire.BlockHeader, parents bloc
continue
}
isAncestorOf, err := dag.isAncestorOf(parentA, parentB)
isAncestorOf, err := dag.isInPast(parentA, parentB)
if err != nil {
return err
}
@ -838,6 +934,11 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
return nil, err
}
err = checkDoubleSpendsWithBlockPast(pastUTXO, transactions)
if err != nil {
return nil, err
}
if err := validateBlockMass(pastUTXO, transactions); err != nil {
return nil, err
}
@ -913,7 +1014,7 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
// Now that the inexpensive checks are done and have passed, verify the
// transactions are actually allowed to spend the coins by running the
// expensive ECDSA signature check scripts. Doing this last helps
// expensive SCHNORR signature check scripts. Doing this last helps
// prevent CPU exhaustion attacks.
err := checkBlockScripts(block, pastUTXO, transactions, scriptFlags, dag.sigCache)
if err != nil {

View File

@ -5,12 +5,13 @@
package blockdag
import (
"github.com/pkg/errors"
"math"
"path/filepath"
"testing"
"time"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
@ -124,12 +125,6 @@ func TestCheckConnectBlockTemplate(t *testing.T) {
"block 4: %v", err)
}
blockNode3 := dag.index.LookupNode(blocks[3].Hash())
blockNode4 := dag.index.LookupNode(blocks[4].Hash())
if blockNode3.children.contains(blockNode4) {
t.Errorf("Block 4 wasn't successfully detached as a child from block3")
}
// Block 3a should connect even though it does not build on dag tips.
err = dag.CheckConnectBlockTemplateNoLock(blocks[5])
if err != nil {
@ -570,9 +565,9 @@ func TestValidateParents(t *testing.T) {
}
defer teardownFunc()
a := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
b := prepareAndProcessBlock(t, dag, a)
c := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
a := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
b := prepareAndProcessBlockByParentMsgBlocks(t, dag, a)
c := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
aNode := nodeByMsgBlock(t, dag, a)
bNode := nodeByMsgBlock(t, dag, b)

View File

@ -30,16 +30,18 @@ var (
)
type configFlags struct {
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."`
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
config.NetworkFlags
}
@ -75,7 +77,7 @@ func parseConfig() (*configFlags, error) {
}
if cfg.RPCCert == "" && !cfg.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
return nil, errors.New("either --notls or --rpccert must be specified")
}
if cfg.RPCCert != "" && cfg.DisableTLS {
return nil, errors.New("--rpccert should be omitted if --notls is used")

View File

@ -2,6 +2,7 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/util"
"os"
"github.com/kaspanet/kaspad/version"
@ -39,15 +40,20 @@ func main() {
client, err := connectToServer(cfg)
if err != nil {
panic(errors.Wrap(err, "Error connecting to the RPC server"))
panic(errors.Wrap(err, "error connecting to the RPC server"))
}
defer client.Disconnect()
miningAddr, err := util.DecodeAddress(cfg.MiningAddr, cfg.ActiveNetParams.Prefix)
if err != nil {
panic(errors.Wrap(err, "error decoding mining address"))
}
doneChan := make(chan struct{})
spawn(func() {
err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay)
err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay, cfg.MineWhenNotSynced, miningAddr)
if err != nil {
panic(errors.Errorf("Error in mine loop: %s", err))
panic(errors.Wrap(err, "error in mine loop"))
}
doneChan <- struct{}{}
})

View File

@ -25,7 +25,9 @@ var hashesTried uint64
const logHashRateInterval = 10 * time.Second
func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) error {
func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64, mineWhenNotSynced bool,
miningAddr util.Address) error {
errChan := make(chan error)
templateStopChan := make(chan struct{})
@ -35,7 +37,7 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) err
wg := sync.WaitGroup{}
for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ {
foundBlock := make(chan *util.Block)
mineNextBlock(client, foundBlock, templateStopChan, errChan)
mineNextBlock(client, miningAddr, foundBlock, mineWhenNotSynced, templateStopChan, errChan)
block := <-foundBlock
templateStopChan <- struct{}{}
wg.Add(1)
@ -80,13 +82,15 @@ func logHashRate() {
})
}
func mineNextBlock(client *minerClient, foundBlock chan *util.Block, templateStopChan chan struct{}, errChan chan error) {
func mineNextBlock(client *minerClient, miningAddr util.Address, foundBlock chan *util.Block, mineWhenNotSynced bool,
templateStopChan chan struct{}, errChan chan error) {
newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult)
spawn(func() {
templatesLoop(client, newTemplateChan, errChan, templateStopChan)
templatesLoop(client, miningAddr, newTemplateChan, errChan, templateStopChan)
})
spawn(func() {
solveLoop(newTemplateChan, foundBlock, errChan)
solveLoop(newTemplateChan, foundBlock, mineWhenNotSynced, errChan)
})
}
@ -132,7 +136,7 @@ func parseBlock(template *rpcmodel.GetBlockTemplateResult) (*util.Block, error)
wire.NewBlockHeader(template.Version, parentHashes, &daghash.Hash{},
acceptedIDMerkleRoot, utxoCommitment, bits, 0))
for i, txResult := range append([]rpcmodel.GetBlockTemplateResultTx{*template.CoinbaseTxn}, template.Transactions...) {
for i, txResult := range template.Transactions {
reader := hex.NewDecoder(strings.NewReader(txResult.Data))
tx := &wire.MsgTx{}
if err := tx.KaspaDecode(reader, 0); err != nil {
@ -167,7 +171,9 @@ func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util
}
func templatesLoop(client *minerClient, newTemplateChan chan *rpcmodel.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) {
func templatesLoop(client *minerClient, miningAddr util.Address,
newTemplateChan chan *rpcmodel.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) {
longPollID := ""
getBlockTemplateLongPoll := func() {
if longPollID != "" {
@ -175,7 +181,7 @@ func templatesLoop(client *minerClient, newTemplateChan chan *rpcmodel.GetBlockT
} else {
log.Infof("Requesting template without longPollID from %s", client.Host())
}
template, err := getBlockTemplate(client, longPollID)
template, err := getBlockTemplate(client, miningAddr, longPollID)
if nativeerrors.Is(err, rpcclient.ErrResponseTimedOut) {
log.Infof("Got timeout while requesting template '%s' from %s", longPollID, client.Host())
return
@ -203,16 +209,27 @@ func templatesLoop(client *minerClient, newTemplateChan chan *rpcmodel.GetBlockT
}
}
func getBlockTemplate(client *minerClient, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) {
return client.GetBlockTemplate([]string{"coinbasetxn"}, longPollID)
func getBlockTemplate(client *minerClient, miningAddr util.Address, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) {
return client.GetBlockTemplate(miningAddr.String(), longPollID)
}
func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, errChan chan error) {
func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block,
mineWhenNotSynced bool, errChan chan error) {
var stopOldTemplateSolving chan struct{}
for template := range newTemplateChan {
if stopOldTemplateSolving != nil {
close(stopOldTemplateSolving)
}
if !template.IsSynced {
if !mineWhenNotSynced {
errChan <- errors.Errorf("got template with isSynced=false")
return
}
log.Warnf("Got template with isSynced=false")
}
stopOldTemplateSolving = make(chan struct{})
block, err := parseBlock(template)
if err != nil {

View File

@ -117,7 +117,6 @@ type Flags struct {
Upnp bool `long:"upnp" description:"Use UPnP to map our listening port outside of NAT"`
MinRelayTxFee float64 `long:"minrelaytxfee" description:"The minimum transaction fee in KAS/kB to be considered a non-zero fee."`
MaxOrphanTxs int `long:"maxorphantx" description:"Max number of orphan transactions to keep in memory"`
MiningAddrs []string `long:"miningaddr" description:"Add the specified payment address to the list of addresses to use for generated blocks -- At least one address is required if the generate option is set"`
BlockMaxMass uint64 `long:"blockmaxmass" description:"Maximum transaction mass to be used when creating a block"`
UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."`
NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"`
@ -127,7 +126,6 @@ type Flags struct {
DropAcceptanceIndex bool `long:"dropacceptanceindex" description:"Deletes the hash-based acceptance index from the database on start up and then exits."`
RelayNonStd bool `long:"relaynonstd" description:"Relay non-standard transactions regardless of the default settings for the active network."`
RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."`
Subnetwork string `long:"subnetwork" description:"If subnetwork ID is specified, than node will request and process only payloads from specified subnetwork. And if subnetwork ID is ommited, than payloads of all subnetworks are processed. Subnetworks with IDs 2 through 255 are reserved for future use and are currently not allowed."`
ResetDatabase bool `long:"reset-db" description:"Reset database before starting node. It's needed when switching between subnetworks."`
NetworkFlags
}
@ -608,36 +606,6 @@ func loadConfig() (*Config, []string, error) {
return nil, nil, err
}
// Check mining addresses are valid and saved parsed versions.
activeConfig.MiningAddrs = make([]util.Address, 0, len(activeConfig.Flags.MiningAddrs))
for _, strAddr := range activeConfig.Flags.MiningAddrs {
addr, err := util.DecodeAddress(strAddr, activeConfig.NetParams().Prefix)
if err != nil {
str := "%s: mining address '%s' failed to decode: %s"
err := errors.Errorf(str, funcName, strAddr, err)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
if !addr.IsForPrefix(activeConfig.NetParams().Prefix) {
str := "%s: mining address '%s' is on the wrong network"
err := errors.Errorf(str, funcName, strAddr)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
activeConfig.MiningAddrs = append(activeConfig.MiningAddrs, addr)
}
if activeConfig.Flags.Subnetwork != "" {
activeConfig.SubnetworkID, err = subnetworkid.NewFromStr(activeConfig.Flags.Subnetwork)
if err != nil {
return nil, nil, err
}
} else {
activeConfig.SubnetworkID = nil
}
// Add default port to all listener addresses if needed and remove
// duplicate addresses.
activeConfig.Listeners, err = network.NormalizeAddresses(activeConfig.Listeners,

View File

@ -7,6 +7,9 @@ package connmgr
import (
nativeerrors "errors"
"fmt"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/wire"
"net"
"sync"
"sync/atomic"
@ -30,10 +33,6 @@ var (
// defaultRetryDuration is the default duration of time for retrying
// persistent connections.
defaultRetryDuration = time.Second * 5
// defaultTargetOutbound is the default number of outbound connections to
// maintain.
defaultTargetOutbound = uint32(8)
)
var (
@ -54,6 +53,9 @@ var (
// ErrPeerNotFound is an error that is thrown if the peer was not found.
ErrPeerNotFound = errors.New("peer not found")
//ErrAddressManagerNil is used to indicate that Address Manager cannot be nil in the configuration.
ErrAddressManagerNil = errors.New("Config: Address manager cannot be nil")
)
// ConnState represents the state of the requested connection.
@ -77,7 +79,7 @@ type ConnReq struct {
// The following variables must only be used atomically.
id uint64
Addr net.Addr
Addr *net.TCPAddr
Permanent bool
conn net.Conn
@ -151,13 +153,15 @@ type Config struct {
// connection is established.
OnConnection func(*ConnReq, net.Conn)
// OnConnectionFailed is a callback that is fired when a new outbound
// connection has failed to be established.
OnConnectionFailed func(*ConnReq)
// OnDisconnection is a callback that is fired when an outbound
// connection is disconnected.
OnDisconnection func(*ConnReq)
// GetNewAddress is a way to get an address to make a network connection
// to. If nil, no new connections will be made automatically.
GetNewAddress func() (net.Addr, error)
AddrManager *addrmgr.AddrManager
// Dial connects to the address on the named network. It cannot be nil.
Dial func(net.Addr) (net.Conn, error)
@ -197,7 +201,9 @@ type ConnManager struct {
start int32
stop int32
newConnReqMtx sync.Mutex
addressMtx sync.Mutex
usedOutboundGroups map[string]int64
usedAddresses map[string]struct{}
cfg Config
wg sync.WaitGroup
@ -233,9 +239,12 @@ func (cm *ConnManager) handleFailedConn(c *ConnReq, err error) {
log.Debugf("Retrying further connections to %s every %s", c, d)
}
spawnAfter(d, func() {
cm.Connect(c)
cm.connect(c)
})
} else if cm.cfg.GetNewAddress != nil {
} else {
if c.Addr != nil {
cm.releaseAddress(c.Addr)
}
cm.failedAttempts++
if cm.failedAttempts >= maxFailedAttempts {
if shouldWriteLog {
@ -250,6 +259,43 @@ func (cm *ConnManager) handleFailedConn(c *ConnReq, err error) {
}
}
func (cm *ConnManager) releaseAddress(addr *net.TCPAddr) {
cm.addressMtx.Lock()
defer cm.addressMtx.Unlock()
groupKey := usedOutboundGroupsKey(addr)
cm.usedOutboundGroups[groupKey]--
if cm.usedOutboundGroups[groupKey] < 0 {
panic(fmt.Errorf("cm.usedOutboundGroups[%s] has a negative value of %d. This should never happen", groupKey, cm.usedOutboundGroups[groupKey]))
}
delete(cm.usedAddresses, usedAddressesKey(addr))
}
func (cm *ConnManager) markAddressAsUsed(addr *net.TCPAddr) {
cm.usedOutboundGroups[usedOutboundGroupsKey(addr)]++
cm.usedAddresses[usedAddressesKey(addr)] = struct{}{}
}
func (cm *ConnManager) isOutboundGroupUsed(addr *net.TCPAddr) bool {
_, ok := cm.usedOutboundGroups[usedOutboundGroupsKey(addr)]
return ok
}
func (cm *ConnManager) isAddressUsed(addr *net.TCPAddr) bool {
_, ok := cm.usedAddresses[usedAddressesKey(addr)]
return ok
}
func usedOutboundGroupsKey(addr *net.TCPAddr) string {
// A fake service flag is used since it doesn't affect the group key.
na := wire.NewNetAddress(addr, wire.SFNodeNetwork)
return addrmgr.GroupKey(na)
}
func usedAddressesKey(addr *net.TCPAddr) string {
return addr.String()
}
// throttledError defines an error type whose logs get throttled. This is to
// prevent flooding the logs with identical errors.
type throttledError error
@ -388,21 +434,16 @@ out:
continue
}
// Otherwise, we will attempt a reconnection if
// we do not have enough peers, or if this is a
// persistent peer. The connection request is
// re added to the pending map, so that
// subsequent processing of connections and
// failures do not ignore the request.
if uint32(len(conns)) < cm.cfg.TargetOutbound ||
connReq.Permanent {
connReq.updateState(ConnPending)
log.Debugf("Reconnecting to %s",
connReq)
pending[msg.id] = connReq
cm.handleFailedConn(connReq, nil)
}
// Otherwise, we will attempt a reconnection.
// The connection request is re added to the
// pending map, so that subsequent processing
// of connections and failures do not ignore
// the request.
connReq.updateState(ConnPending)
log.Debugf("Reconnecting to %s",
connReq)
pending[msg.id] = connReq
cm.handleFailedConn(connReq, nil)
case handleFailed:
connReq := msg.c
@ -419,6 +460,10 @@ out:
connReq, msg.err)
}
cm.handleFailedConn(connReq, msg.err)
if cm.cfg.OnConnectionFailed != nil {
cm.cfg.OnConnectionFailed(connReq)
}
}
case <-cm.quit:
@ -440,14 +485,9 @@ func (cm *ConnManager) NotifyConnectionRequestComplete() {
// NewConnReq creates a new connection request and connects to the
// corresponding address.
func (cm *ConnManager) NewConnReq() {
cm.newConnReqMtx.Lock()
defer cm.newConnReqMtx.Unlock()
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
if cm.cfg.GetNewAddress == nil {
return
}
c := &ConnReq{}
atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1))
@ -470,8 +510,7 @@ func (cm *ConnManager) NewConnReq() {
case <-cm.quit:
return
}
addr, err := cm.cfg.GetNewAddress()
err := cm.associateAddressToConnReq(c)
if err != nil {
select {
case cm.requests <- handleFailed{c, err}:
@ -480,17 +519,52 @@ func (cm *ConnManager) NewConnReq() {
return
}
c.Addr = addr
cm.connect(c)
}
cm.Connect(c)
func (cm *ConnManager) associateAddressToConnReq(c *ConnReq) error {
cm.addressMtx.Lock()
defer cm.addressMtx.Unlock()
addr, err := cm.getNewAddress()
if err != nil {
return err
}
cm.markAddressAsUsed(addr)
c.Addr = addr
return nil
}
// Connect assigns an id and dials a connection to the address of the
// connection request.
func (cm *ConnManager) Connect(c *ConnReq) {
func (cm *ConnManager) Connect(c *ConnReq) error {
err := func() error {
cm.addressMtx.Lock()
defer cm.addressMtx.Unlock()
if cm.isAddressUsed(c.Addr) {
return fmt.Errorf("address %s is already in use", c.Addr)
}
cm.markAddressAsUsed(c.Addr)
return nil
}()
if err != nil {
return err
}
cm.connect(c)
return nil
}
// connect assigns an id and dials a connection to the address of the
// connection request. This function assumes that the connection address
// has checked and already marked as used.
func (cm *ConnManager) connect(c *ConnReq) {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
if atomic.LoadUint64(&c.id) == 0 {
atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1))
@ -637,23 +711,69 @@ func (cm *ConnManager) Stop() {
log.Trace("Connection manager stopped")
}
func (cm *ConnManager) getNewAddress() (*net.TCPAddr, error) {
for tries := 0; tries < 100; tries++ {
addr := cm.cfg.AddrManager.GetAddress()
if addr == nil {
break
}
// Check if there's already a connection to the same address.
netAddr := addr.NetAddress().TCPAddress()
if cm.isAddressUsed(netAddr) {
continue
}
// Address will not be invalid, local or unroutable
// because addrmanager rejects those on addition.
// Just check that we don't already have an address
// in the same group so that we are not connecting
// to the same network segment at the expense of
// others.
//
// Networks that accept unroutable connections are exempt
// from this rule, since they're meant to run within a
// private subnet, like 10.0.0.0/16.
if !config.ActiveConfig().NetParams().AcceptUnroutable && cm.isOutboundGroupUsed(netAddr) {
continue
}
// only allow recent nodes (10mins) after we failed 30
// times
if tries < 30 && time.Since(addr.LastAttempt()) < 10*time.Minute {
continue
}
// allow nondefault ports after 50 failed tries.
if tries < 50 && fmt.Sprintf("%d", netAddr.Port) !=
config.ActiveConfig().NetParams().DefaultPort {
continue
}
return netAddr, nil
}
return nil, ErrNoAddress
}
// New returns a new connection manager.
// Use Start to start connecting to the network.
func New(cfg *Config) (*ConnManager, error) {
if cfg.Dial == nil {
return nil, ErrDialNil
return nil, errors.WithStack(ErrDialNil)
}
if cfg.AddrManager == nil {
return nil, errors.WithStack(ErrAddressManagerNil)
}
// Default to sane values
if cfg.RetryDuration <= 0 {
cfg.RetryDuration = defaultRetryDuration
}
if cfg.TargetOutbound == 0 {
cfg.TargetOutbound = defaultTargetOutbound
}
cm := ConnManager{
cfg: *cfg, // Copy so caller can't mutate
requests: make(chan interface{}),
quit: make(chan struct{}),
cfg: *cfg, // Copy so caller can't mutate
requests: make(chan interface{}),
quit: make(chan struct{}),
usedAddresses: make(map[string]struct{}),
usedOutboundGroups: make(map[string]int64),
}
return &cm, nil
}

View File

@ -5,12 +5,19 @@
package connmgr
import (
"github.com/pkg/errors"
"fmt"
"io"
"io/ioutil"
"net"
"sync/atomic"
"testing"
"time"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
)
func init() {
@ -70,13 +77,28 @@ func mockDialer(addr net.Addr) (net.Conn, error) {
// TestNewConfig tests that new ConnManager config is validated as expected.
func TestNewConfig(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
_, err := New(&Config{})
if err == nil {
t.Fatalf("New expected error: 'Dial can't be nil', got nil")
if !errors.Is(err, ErrDialNil) {
t.Fatalf("New expected error: %s, got %s", ErrDialNil, err)
}
_, err = New(&Config{
Dial: mockDialer,
})
if !errors.Is(err, ErrAddressManagerNil) {
t.Fatalf("New expected error: %s, got %s", ErrAddressManagerNil, err)
}
amgr, teardown := addressManagerForTest(t, "TestNewConfig", 10)
defer teardown()
_, err = New(&Config{
Dial: mockDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New unexpected error: %v", err)
}
@ -85,17 +107,19 @@ func TestNewConfig(t *testing.T) {
// TestStartStop tests that the connection manager starts and stops as
// expected.
func TestStartStop(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
connected := make(chan *ConnReq)
disconnected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestStartStop", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: 1,
GetNewAddress: func() (net.Addr, error) {
return &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
}, nil
},
Dial: mockDialer,
AddrManager: amgr,
Dial: mockDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
@ -104,7 +128,7 @@ func TestStartStop(t *testing.T) {
},
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
gotConnReq := <-connected
@ -119,7 +143,10 @@ func TestStartStop(t *testing.T) {
},
Permanent: true,
}
cmgr.Connect(cr)
err = cmgr.Connect(cr)
if err != nil {
t.Fatalf("Connect error: %s", err)
}
if cr.ID() != 0 {
t.Fatalf("start/stop: got id: %v, want: 0", cr.ID())
}
@ -133,21 +160,85 @@ func TestStartStop(t *testing.T) {
}
}
func overrideActiveConfig() func() {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
return func() {
// Give some extra time to all open NewConnReq goroutines
// to finish before restoring the active config to prevent
// potential panics.
time.Sleep(10 * time.Millisecond)
config.SetActiveConfig(originalActiveCfg)
}
}
func addressManagerForTest(t *testing.T, testName string, numAddresses uint8) (*addrmgr.AddrManager, func()) {
amgr, teardown := createEmptyAddressManagerForTest(t, testName)
for i := uint8(0); i < numAddresses; i++ {
ip := fmt.Sprintf("173.%d.115.66:16511", i)
err := amgr.AddAddressByIP(ip, nil)
if err != nil {
t.Fatalf("AddAddressByIP unexpectedly failed to add IP %s: %s", ip, err)
}
}
return amgr, teardown
}
func createEmptyAddressManagerForTest(t *testing.T, testName string) (*addrmgr.AddrManager, func()) {
path, err := ioutil.TempDir("", fmt.Sprintf("%s-database", testName))
if err != nil {
t.Fatalf("createEmptyAddressManagerForTest: TempDir unexpectedly "+
"failed: %s", err)
}
err = dbaccess.Open(path)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
return addrmgr.New(nil, nil), func() {
// Wait for the connection manager to finish, so it'll
// have access to the address manager as long as it's
// alive.
time.Sleep(10 * time.Millisecond)
err := dbaccess.Close()
if err != nil {
t.Fatalf("error closing the database: %s", err)
}
}
}
// TestConnectMode tests that the connection manager works in the connect mode.
//
// In connect mode, automatic connections are disabled, so we test that
// requests using Connect are handled and that no other connections are made.
func TestConnectMode(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
connected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestConnectMode", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: 2,
TargetOutbound: 0,
Dial: mockDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cr := &ConnReq{
Addr: &net.TCPAddr{
@ -176,6 +267,7 @@ func TestConnectMode(t *testing.T) {
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestTargetOutbound tests the target number of outbound connections.
@ -183,23 +275,26 @@ func TestConnectMode(t *testing.T) {
// We wait until all connections are established, then test they there are the
// only connections made.
func TestTargetOutbound(t *testing.T) {
targetOutbound := uint32(10)
restoreConfig := overrideActiveConfig()
defer restoreConfig()
const numAddressesInAddressManager = 10
targetOutbound := uint32(numAddressesInAddressManager - 2)
connected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestTargetOutbound", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: targetOutbound,
Dial: mockDialer,
GetNewAddress: func() (net.Addr, error) {
return &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
}, nil
},
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
for i := uint32(0); i < targetOutbound; i++ {
@ -213,6 +308,146 @@ func TestTargetOutbound(t *testing.T) {
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestDuplicateOutboundConnections tests that connection requests cannot use an already used address.
// It checks it by creating one connection request for each address in the address manager, so that
// the next connection request will have to fail because no unused address will be available.
func TestDuplicateOutboundConnections(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
const numAddressesInAddressManager = 10
targetOutbound := uint32(numAddressesInAddressManager - 1)
connected := make(chan struct{})
failedConnections := make(chan struct{})
amgr, teardown := addressManagerForTest(t, "TestDuplicateOutboundConnections", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: targetOutbound,
Dial: mockDialer,
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- struct{}{}
},
OnConnectionFailed: func(_ *ConnReq) {
failedConnections <- struct{}{}
},
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
for i := uint32(0); i < targetOutbound; i++ {
<-connected
}
time.Sleep(time.Millisecond)
// Here we check that making a manual connection request beyond the target outbound connection
// doesn't fail, so we can know that the reason such connection request will fail is an address
// related issue.
cmgr.NewConnReq()
select {
case <-connected:
break
case <-time.After(time.Millisecond):
t.Fatalf("connection request unexpectedly didn't connect")
}
select {
case <-failedConnections:
t.Fatalf("a connection request unexpectedly failed")
case <-time.After(time.Millisecond):
break
}
// After we created numAddressesInAddressManager connection requests, this request should fail
// because there aren't any more available addresses.
cmgr.NewConnReq()
select {
case <-connected:
t.Fatalf("connection request unexpectedly succeeded")
case <-time.After(time.Millisecond):
t.Fatalf("connection request didn't fail as expected")
case <-failedConnections:
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestSameOutboundGroupConnections tests that connection requests cannot use an address with an already used
// address CIDR group.
// It checks it by creating an address manager with only two addresses, that both belong to the same CIDR group
// and checks that the second connection request fails.
func TestSameOutboundGroupConnections(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
amgr, teardown := createEmptyAddressManagerForTest(t, "TestSameOutboundGroupConnections")
defer teardown()
err := amgr.AddAddressByIP("173.190.115.66:16511", nil)
if err != nil {
t.Fatalf("AddAddressByIP unexpectedly failed: %s", err)
}
err = amgr.AddAddressByIP("173.190.115.67:16511", nil)
if err != nil {
t.Fatalf("AddAddressByIP unexpectedly failed: %s", err)
}
connected := make(chan struct{})
failedConnections := make(chan struct{})
cmgr, err := New(&Config{
TargetOutbound: 0,
Dial: mockDialer,
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- struct{}{}
},
OnConnectionFailed: func(_ *ConnReq) {
failedConnections <- struct{}{}
},
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
cmgr.NewConnReq()
select {
case <-connected:
break
case <-time.After(time.Millisecond):
t.Fatalf("connection request unexpectedly didn't connect")
}
select {
case <-failedConnections:
t.Fatalf("a connection request unexpectedly failed")
case <-time.After(time.Millisecond):
break
}
cmgr.NewConnReq()
select {
case <-connected:
t.Fatalf("connection request unexpectedly succeeded")
case <-time.After(time.Millisecond):
t.Fatalf("connection request didn't fail as expected")
case <-failedConnections:
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestRetryPermanent tests that permanent connection requests are retried.
@ -220,11 +455,18 @@ func TestTargetOutbound(t *testing.T) {
// We make a permanent connection request using Connect, disconnect it using
// Disconnect and we wait for it to be connected back.
func TestRetryPermanent(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
connected := make(chan *ConnReq)
disconnected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestRetryPermanent", 10)
defer teardown()
cmgr, err := New(&Config{
RetryDuration: time.Millisecond,
TargetOutbound: 1,
TargetOutbound: 0,
Dial: mockDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
@ -232,9 +474,10 @@ func TestRetryPermanent(t *testing.T) {
OnDisconnection: func(c *ConnReq) {
disconnected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cr := &ConnReq{
@ -289,6 +532,9 @@ func TestRetryPermanent(t *testing.T) {
cmgr.Remove(cr.ID())
gotConnReq = <-disconnected
// Wait for status to be updated
time.Sleep(10 * time.Millisecond)
wantID = cr.ID()
gotID = gotConnReq.ID()
if gotID != wantID {
@ -300,6 +546,7 @@ func TestRetryPermanent(t *testing.T) {
t.Fatalf("retry: %v - want state %v, got state %v", cr.Addr, wantState, gotState)
}
cmgr.Stop()
cmgr.Wait()
}
// TestMaxRetryDuration tests the maximum retry duration.
@ -307,6 +554,9 @@ func TestRetryPermanent(t *testing.T) {
// We have a timed dialer which initially returns err but after RetryDuration
// hits maxRetryDuration returns a mock conn.
func TestMaxRetryDuration(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
networkUp := make(chan struct{})
time.AfterFunc(5*time.Millisecond, func() {
close(networkUp)
@ -320,17 +570,21 @@ func TestMaxRetryDuration(t *testing.T) {
}
}
amgr, teardown := addressManagerForTest(t, "TestMaxRetryDuration", 10)
defer teardown()
connected := make(chan *ConnReq)
cmgr, err := New(&Config{
RetryDuration: time.Millisecond,
TargetOutbound: 1,
TargetOutbound: 0,
Dial: timedDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cr := &ConnReq{
@ -350,35 +604,40 @@ func TestMaxRetryDuration(t *testing.T) {
case <-time.Tick(100 * time.Millisecond):
t.Fatalf("max retry duration: connection timeout")
}
cmgr.Stop()
cmgr.Wait()
}
// TestNetworkFailure tests that the connection manager handles a network
// failure gracefully.
func TestNetworkFailure(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
var dials uint32
errDialer := func(net net.Addr) (net.Conn, error) {
atomic.AddUint32(&dials, 1)
return nil, errors.New("network down")
}
amgr, teardown := addressManagerForTest(t, "TestNetworkFailure", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: 5,
RetryDuration: 5 * time.Millisecond,
Dial: errDialer,
GetNewAddress: func() (net.Addr, error) {
return &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
}, nil
},
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
t.Fatalf("network failure: got unexpected connection - %v", c.Addr)
},
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
time.AfterFunc(10*time.Millisecond, cmgr.Stop)
time.Sleep(10 * time.Millisecond)
cmgr.Stop()
cmgr.Wait()
wantMaxDials := uint32(75)
if atomic.LoadUint32(&dials) > wantMaxDials {
@ -394,17 +653,25 @@ func TestNetworkFailure(t *testing.T) {
// err so that the handler assumes that the conn manager is stopped and ignores
// the failure.
func TestStopFailed(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
done := make(chan struct{}, 1)
waitDialer := func(addr net.Addr) (net.Conn, error) {
done <- struct{}{}
time.Sleep(time.Millisecond)
return nil, errors.New("network down")
}
amgr, teardown := addressManagerForTest(t, "TestStopFailed", 10)
defer teardown()
cmgr, err := New(&Config{
Dial: waitDialer,
Dial: waitDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
go func() {
@ -428,6 +695,9 @@ func TestStopFailed(t *testing.T) {
// TestRemovePendingConnection tests that it's possible to cancel a pending
// connection, removing its internal state from the ConnMgr.
func TestRemovePendingConnection(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
// Create a ConnMgr instance with an instance of a dialer that'll never
// succeed.
wait := make(chan struct{})
@ -435,11 +705,16 @@ func TestRemovePendingConnection(t *testing.T) {
<-wait
return nil, errors.Errorf("error")
}
amgr, teardown := addressManagerForTest(t, "TestRemovePendingConnection", 10)
defer teardown()
cmgr, err := New(&Config{
Dial: indefiniteDialer,
Dial: indefiniteDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
@ -474,12 +749,16 @@ func TestRemovePendingConnection(t *testing.T) {
close(wait)
cmgr.Stop()
cmgr.Wait()
}
// TestCancelIgnoreDelayedConnection tests that a canceled connection request will
// not execute the on connection callback, even if an outstanding retry
// succeeds.
func TestCancelIgnoreDelayedConnection(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
retryTimeout := 10 * time.Millisecond
// Setup a dialer that will continue to return an error until the
@ -497,18 +776,22 @@ func TestCancelIgnoreDelayedConnection(t *testing.T) {
}
connected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestCancelIgnoreDelayedConnection", 10)
defer teardown()
cmgr, err := New(&Config{
Dial: failingDialer,
RetryDuration: retryTimeout,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
defer cmgr.Stop()
// Establish a connection request to a random IP we've chosen.
cr := &ConnReq{
@ -552,7 +835,8 @@ func TestCancelIgnoreDelayedConnection(t *testing.T) {
t.Fatalf("on-connect should not be called for canceled req")
case <-time.After(5 * retryTimeout):
}
cmgr.Stop()
cmgr.Wait()
}
// mockListener implements the net.Listener interface and is used to test
@ -617,21 +901,29 @@ func newMockListener(localAddr string) *mockListener {
// TestListeners ensures providing listeners to the connection manager along
// with an accept callback works properly.
func TestListeners(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
// Setup a connection manager with a couple of mock listeners that
// notify a channel when they receive mock connections.
receivedConns := make(chan net.Conn)
listener1 := newMockListener("127.0.0.1:16111")
listener2 := newMockListener("127.0.0.1:9333")
listeners := []net.Listener{listener1, listener2}
amgr, teardown := addressManagerForTest(t, "TestListeners", 10)
defer teardown()
cmgr, err := New(&Config{
Listeners: listeners,
OnAccept: func(conn net.Conn) {
receivedConns <- conn
},
Dial: mockDialer,
Dial: mockDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()

View File

@ -5,7 +5,6 @@
package dagconfig
import (
"math"
"time"
"github.com/kaspanet/kaspad/util/daghash"
@ -13,22 +12,10 @@ import (
"github.com/kaspanet/kaspad/wire"
)
var genesisTxIns = []*wire.TxIn{
{
PreviousOutpoint: wire.Outpoint{
TxID: daghash.TxID{},
Index: math.MaxUint32,
},
SignatureScript: []byte{
0x00, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48,
0x2f, 0x62, 0x74, 0x63, 0x64, 0x2f,
},
Sequence: math.MaxUint64,
},
}
var genesisTxOuts = []*wire.TxOut{}
var genesisTxPayload = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score
0x17, // Varint
0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, // OP-TRUE p2sh
0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7,
@ -37,58 +24,46 @@ var genesisTxPayload = []byte{
// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for
// the main network.
var genesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, genesisTxIns, genesisTxOuts, subnetworkid.SubnetworkIDCoinbase, 0, genesisTxPayload)
var genesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, genesisTxOuts, subnetworkid.SubnetworkIDCoinbase, 0, genesisTxPayload)
// genesisHash is the hash of the first block in the block DAG for the main
// network (genesis block).
var genesisHash = daghash.Hash{
0x9b, 0x22, 0x59, 0x44, 0x66, 0xf0, 0xbe, 0x50,
0x7c, 0x1c, 0x8a, 0xf6, 0x06, 0x27, 0xe6, 0x33,
0x38, 0x7e, 0xd1, 0xd5, 0x8c, 0x42, 0x59, 0x1a,
0x31, 0xac, 0x9a, 0xa6, 0x2e, 0xd5, 0x2b, 0x0f,
0xb3, 0x5d, 0x34, 0x3c, 0xf6, 0xbb, 0xd5, 0xaf,
0x40, 0x4b, 0xff, 0x3f, 0x83, 0x27, 0x71, 0x1e,
0xe1, 0x83, 0xf6, 0x41, 0x32, 0x8c, 0xba, 0xe6,
0xd3, 0xba, 0x13, 0xef, 0x7b, 0x7e, 0x61, 0x65,
}
// genesisMerkleRoot is the hash of the first transaction in the genesis block
// for the main network.
var genesisMerkleRoot = daghash.Hash{
0x72, 0x10, 0x35, 0x85, 0xdd, 0xac, 0x82, 0x5c,
0x49, 0x13, 0x9f, 0xc0, 0x0e, 0x37, 0xc0, 0x45,
0x71, 0xdf, 0xd9, 0xf6, 0x36, 0xdf, 0x4c, 0x42,
0x72, 0x7b, 0x9e, 0x86, 0xdd, 0x37, 0xd2, 0xbd,
0xca, 0x85, 0x56, 0x27, 0xc7, 0x6a, 0xb5, 0x7a,
0x26, 0x1d, 0x63, 0x62, 0x1e, 0x57, 0x21, 0xf0,
0x5e, 0x60, 0x1f, 0xee, 0x1d, 0x4d, 0xaa, 0x53,
0x72, 0xe1, 0x16, 0xda, 0x4b, 0xb3, 0xd8, 0x0e,
}
// genesisBlock defines the genesis block of the block DAG which serves as the
// public transaction ledger for the main network.
var genesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
Version: 0x10000000,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: &genesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.Hash{},
UTXOCommitment: &daghash.ZeroHash,
Timestamp: time.Unix(0x5cdac4b0, 0),
Timestamp: time.Unix(0x5edf4ce0, 0),
Bits: 0x207fffff,
Nonce: 0x1,
Nonce: 0,
},
Transactions: []*wire.MsgTx{genesisCoinbaseTx},
}
var devnetGenesisTxIns = []*wire.TxIn{
{
PreviousOutpoint: wire.Outpoint{
TxID: daghash.TxID{},
Index: math.MaxUint32,
},
SignatureScript: []byte{
0x00, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48,
0x2f, 0x62, 0x74, 0x63, 0x64, 0x2f,
},
Sequence: math.MaxUint64,
},
}
var devnetGenesisTxOuts = []*wire.TxOut{}
var devnetGenesisTxPayload = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score
0x17, // Varint
0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, // OP-TRUE p2sh
0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7,
@ -98,58 +73,46 @@ var devnetGenesisTxPayload = []byte{
// devnetGenesisCoinbaseTx is the coinbase transaction for the genesis blocks for
// the development network.
var devnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, devnetGenesisTxIns, devnetGenesisTxOuts, subnetworkid.SubnetworkIDCoinbase, 0, devnetGenesisTxPayload)
var devnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, devnetGenesisTxOuts, subnetworkid.SubnetworkIDCoinbase, 0, devnetGenesisTxPayload)
// devGenesisHash is the hash of the first block in the block DAG for the development
// network (genesis block).
var devnetGenesisHash = daghash.Hash{
0x17, 0x59, 0x5c, 0x09, 0xdd, 0x1a, 0x51, 0x65,
0x14, 0xbc, 0x19, 0xff, 0x29, 0xea, 0xf3, 0xcb,
0xe2, 0x76, 0xf0, 0xc7, 0x86, 0xf8, 0x0c, 0x53,
0x59, 0xbe, 0xee, 0x0c, 0x2b, 0x5d, 0x00, 0x00,
0x50, 0x92, 0xd1, 0x1f, 0xaa, 0xba, 0xd3, 0x58,
0xa8, 0x22, 0xd7, 0xec, 0x8e, 0xe3, 0xf4, 0x26,
0x17, 0x18, 0x74, 0xd7, 0x87, 0x05, 0x9d, 0xed,
0x33, 0xcd, 0xe1, 0x26, 0x1a, 0x69, 0x00, 0x00,
}
// devnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
// for the devopment network.
var devnetGenesisMerkleRoot = daghash.Hash{
0x16, 0x0a, 0xc6, 0x8b, 0x77, 0x08, 0xf4, 0x96,
0xa3, 0x07, 0x05, 0xbc, 0x92, 0xda, 0xee, 0x73,
0x26, 0x5e, 0xd0, 0x85, 0x78, 0xa2, 0x5d, 0x02,
0x49, 0x8a, 0x2a, 0x22, 0xef, 0x41, 0xc9, 0xc3,
0x68, 0x60, 0xe7, 0x77, 0x47, 0x74, 0x7f, 0xd5,
0x55, 0x58, 0x8a, 0xb5, 0xc2, 0x29, 0x0c, 0xa6,
0x65, 0x44, 0xb4, 0x4f, 0xfa, 0x31, 0x7a, 0xfa,
0x55, 0xe0, 0xcf, 0xac, 0x9c, 0x86, 0x30, 0x2a,
}
// devnetGenesisBlock defines the genesis block of the block DAG which serves as the
// public transaction ledger for the development network.
var devnetGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
Version: 0x10000000,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: &devnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.Hash{},
UTXOCommitment: &daghash.ZeroHash,
Timestamp: time.Unix(0x5e15e758, 0),
Timestamp: time.Unix(0x5edf4ce0, 0),
Bits: 0x1e7fffff,
Nonce: 0x282ac,
Nonce: 0xb3ed,
},
Transactions: []*wire.MsgTx{devnetGenesisCoinbaseTx},
}
var regtestGenesisTxIns = []*wire.TxIn{
{
PreviousOutpoint: wire.Outpoint{
TxID: daghash.TxID{},
Index: math.MaxUint32,
},
SignatureScript: []byte{
0x00, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48,
0x2f, 0x62, 0x74, 0x63, 0x64, 0x2f,
},
Sequence: math.MaxUint64,
},
}
var regtestGenesisTxOuts = []*wire.TxOut{}
var regtestGenesisTxPayload = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score
0x17, // Varint
0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, // OP-TRUE p2sh
0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7,
@ -159,58 +122,46 @@ var regtestGenesisTxPayload = []byte{
// regtestGenesisCoinbaseTx is the coinbase transaction for
// the genesis blocks for the regtest network.
var regtestGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, regtestGenesisTxIns, regtestGenesisTxOuts, subnetworkid.SubnetworkIDCoinbase, 0, regtestGenesisTxPayload)
var regtestGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, regtestGenesisTxOuts, subnetworkid.SubnetworkIDCoinbase, 0, regtestGenesisTxPayload)
// devGenesisHash is the hash of the first block in the block DAG for the development
// network (genesis block).
var regtestGenesisHash = daghash.Hash{
0xfc, 0x02, 0x19, 0x6f, 0x79, 0x7a, 0xed, 0x2d,
0x0f, 0x31, 0xa5, 0xbd, 0x32, 0x13, 0x29, 0xc7,
0x7c, 0x0c, 0x5c, 0x1a, 0x5b, 0x7c, 0x20, 0x68,
0xb7, 0xc9, 0x9f, 0x61, 0x13, 0x11, 0x00, 0x00,
0xf8, 0x1d, 0xe9, 0x86, 0xa5, 0x60, 0xe0, 0x34,
0x0f, 0x02, 0xaa, 0x8d, 0xea, 0x6f, 0x1f, 0xc6,
0x2a, 0xb4, 0x77, 0xbd, 0xca, 0xed, 0xad, 0x3c,
0x99, 0xe6, 0x98, 0x7c, 0x7b, 0x5e, 0x00, 0x00,
}
// regtestGenesisMerkleRoot is the hash of the first transaction in the genesis block
// for the regtest.
var regtestGenesisMerkleRoot = daghash.Hash{
0x3a, 0x9f, 0x62, 0xc9, 0x2b, 0x16, 0x17, 0xb3,
0x41, 0x6d, 0x9e, 0x2d, 0x87, 0x93, 0xfd, 0x72,
0x77, 0x4d, 0x1d, 0x6f, 0x6d, 0x38, 0x5b, 0xf1,
0x24, 0x1b, 0xdc, 0x96, 0xce, 0xbf, 0xa1, 0x09,
0x1e, 0x08, 0xae, 0x1f, 0x43, 0xf5, 0xfc, 0x24,
0xe6, 0xec, 0x54, 0x5b, 0xf7, 0x52, 0x99, 0xe4,
0xcc, 0x4c, 0xa0, 0x79, 0x41, 0xfc, 0xbe, 0x76,
0x72, 0x4c, 0x7e, 0xd8, 0xa3, 0x43, 0x65, 0x94,
}
// regtestGenesisBlock defines the genesis block of the block DAG which serves as the
// public transaction ledger for the development network.
var regtestGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
Version: 0x10000000,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: &regtestGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.Hash{},
UTXOCommitment: &daghash.ZeroHash,
Timestamp: time.Unix(0x5e15e2d8, 0),
Timestamp: time.Unix(0x5edf4ce0, 0),
Bits: 0x1e7fffff,
Nonce: 0x15a6,
Nonce: 0x4a78,
},
Transactions: []*wire.MsgTx{regtestGenesisCoinbaseTx},
}
var simnetGenesisTxIns = []*wire.TxIn{
{
PreviousOutpoint: wire.Outpoint{
TxID: daghash.TxID{},
Index: math.MaxUint32,
},
SignatureScript: []byte{
0x00, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48,
0x2f, 0x62, 0x74, 0x63, 0x64, 0x2f,
},
Sequence: math.MaxUint64,
},
}
var simnetGenesisTxOuts = []*wire.TxOut{}
var simnetGenesisTxPayload = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score
0x17, // Varint
0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, // OP-TRUE p2sh
0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7,
@ -219,96 +170,84 @@ var simnetGenesisTxPayload = []byte{
}
// simnetGenesisCoinbaseTx is the coinbase transaction for the simnet genesis block.
var simnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, simnetGenesisTxIns, simnetGenesisTxOuts, subnetworkid.SubnetworkIDCoinbase, 0, simnetGenesisTxPayload)
var simnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, simnetGenesisTxOuts, subnetworkid.SubnetworkIDCoinbase, 0, simnetGenesisTxPayload)
// simnetGenesisHash is the hash of the first block in the block DAG for
// the simnet (genesis block).
var simnetGenesisHash = daghash.Hash{
0xff, 0x69, 0xcc, 0x45, 0x45, 0x74, 0x5b, 0xf9,
0xd5, 0x4e, 0x43, 0x56, 0x4f, 0x1b, 0xdf, 0x31,
0x09, 0xb7, 0x76, 0xaa, 0x2a, 0x33, 0x35, 0xc9,
0xa1, 0x80, 0xe0, 0x92, 0xbb, 0xae, 0xcd, 0x49,
0x34, 0x43, 0xed, 0xdc, 0xab, 0x0c, 0x39, 0x53,
0xa2, 0xc5, 0x6d, 0x12, 0x4b, 0xc2, 0x41, 0x1c,
0x1a, 0x05, 0x24, 0xb4, 0xff, 0xeb, 0xe8, 0xbd,
0xee, 0x6e, 0x9a, 0x77, 0xc7, 0xbb, 0x70, 0x7d,
}
// simnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
// for the devopment network.
var simnetGenesisMerkleRoot = daghash.Hash{
0xb0, 0x1c, 0x3b, 0x9e, 0x0d, 0x9a, 0xc0, 0x80,
0x0a, 0x08, 0x42, 0x50, 0x02, 0xa3, 0xea, 0xdb,
0xed, 0xc8, 0xd0, 0xad, 0x35, 0x03, 0xd8, 0x0e,
0x11, 0x3c, 0x7b, 0xb2, 0xb5, 0x20, 0xe5, 0x84,
0x47, 0x52, 0xc7, 0x23, 0x70, 0x4d, 0x89, 0x17,
0xbd, 0x44, 0x26, 0xfa, 0x82, 0x7e, 0x1b, 0xa9,
0xc6, 0x46, 0x1a, 0x37, 0x5a, 0x73, 0x88, 0x09,
0xe8, 0x17, 0xff, 0xb1, 0xdb, 0x1a, 0xb3, 0x3f,
}
// simnetGenesisBlock defines the genesis block of the block DAG which serves as the
// public transaction ledger for the development network.
var simnetGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
Version: 0x10000000,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: &simnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.Hash{},
UTXOCommitment: &daghash.ZeroHash,
Timestamp: time.Unix(0x5e15d31c, 0),
Timestamp: time.Unix(0x5ede5261, 0),
Bits: 0x207fffff,
Nonce: 0x3,
Nonce: 0x2,
},
Transactions: []*wire.MsgTx{simnetGenesisCoinbaseTx},
}
var testnetGenesisTxIns = []*wire.TxIn{
{
PreviousOutpoint: wire.Outpoint{
TxID: daghash.TxID{},
Index: math.MaxUint32,
},
SignatureScript: []byte{
0x00, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48,
0x2f, 0x62, 0x74, 0x63, 0x64, 0x2f,
},
Sequence: math.MaxUint64,
},
}
var testnetGenesisTxOuts = []*wire.TxOut{}
var testnetGenesisTxPayload = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score
0x01, // Varint
0x00, // OP-FALSE
0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x74, 0x65, 0x73, 0x74, 0x6e, 0x65, 0x74, // kaspa-testnet
}
// testnetGenesisCoinbaseTx is the coinbase transaction for the testnet genesis block.
var testnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, testnetGenesisTxIns, testnetGenesisTxOuts, subnetworkid.SubnetworkIDCoinbase, 0, testnetGenesisTxPayload)
var testnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, testnetGenesisTxOuts, subnetworkid.SubnetworkIDCoinbase, 0, testnetGenesisTxPayload)
// testnetGenesisHash is the hash of the first block in the block DAG for the test
// network (genesis block).
var testnetGenesisHash = daghash.Hash{
0x22, 0x15, 0x34, 0xa9, 0xff, 0x10, 0xdd, 0x47,
0xcd, 0x21, 0x11, 0x25, 0xc5, 0x6d, 0x85, 0x9a,
0x97, 0xc8, 0x63, 0x63, 0x79, 0x40, 0x80, 0x04,
0x74, 0xe6, 0x29, 0x7b, 0xbc, 0x08, 0x00, 0x00,
0xFC, 0x21, 0x64, 0x1A, 0xB5, 0x59, 0x61, 0x8E,
0xF3, 0x9A, 0x95, 0xF1, 0xDA, 0x07, 0x79, 0xBD,
0x11, 0x2F, 0x90, 0xFC, 0x8B, 0x33, 0x14, 0x8A,
0x90, 0x6B, 0x76, 0x08, 0x4B, 0x52, 0x00, 0x00,
}
// testnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
// for testnet.
var testnetGenesisMerkleRoot = daghash.Hash{
0x88, 0x05, 0xd0, 0xe7, 0x8f, 0x41, 0x77, 0x39,
0x2c, 0xb6, 0xbb, 0xb4, 0x19, 0xa8, 0x48, 0x4a,
0xdf, 0x77, 0xb0, 0x82, 0xd6, 0x70, 0xd8, 0x24,
0x6a, 0x36, 0x05, 0xaa, 0xbd, 0x7a, 0xd1, 0x62,
0xA0, 0xA1, 0x3D, 0xFD, 0x86, 0x41, 0x35, 0xC8,
0xBD, 0xBB, 0xE6, 0x37, 0x35, 0xBB, 0x4C, 0x51,
0x11, 0x7B, 0x26, 0x90, 0x15, 0x64, 0x0F, 0x42,
0x6D, 0x2B, 0x6F, 0x37, 0x4D, 0xC1, 0xA9, 0x72,
}
// testnetGenesisBlock defines the genesis block of the block DAG which serves as the
// public transaction ledger for testnet.
var testnetGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
Version: 0x10000000,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: &testnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
Timestamp: time.Unix(0x5e15adfe, 0),
Timestamp: time.Unix(0x5efc2128, 0),
Bits: 0x1e7fffff,
Nonce: 0x20a1,
Nonce: 0x1124,
},
Transactions: []*wire.MsgTx{testnetGenesisCoinbaseTx},
}

View File

@ -148,187 +148,101 @@ func TestDevnetGenesisBlock(t *testing.T) {
// genesisBlockBytes are the wire encoded bytes for the genesis block of the
// main network as of protocol version 1.
var genesisBlockBytes = []byte{
0x01, 0x00, 0x00, 0x00, 0x00, 0x72, 0x10, 0x35, 0x85, 0xdd, 0xac, 0x82, 0x5c, 0x49, 0x13, 0x9f,
0xc0, 0x0e, 0x37, 0xc0, 0x45, 0x71, 0xdf, 0xd9, 0xf6, 0x36, 0xdf, 0x4c, 0x42, 0x72, 0x7b, 0x9e,
0x86, 0xdd, 0x37, 0xd2, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x10, 0x00, 0xca, 0x85, 0x56, 0x27, 0xc7, 0x6a, 0xb5, 0x7a, 0x26, 0x1d, 0x63,
0x62, 0x1e, 0x57, 0x21, 0xf0, 0x5e, 0x60, 0x1f, 0xee, 0x1d, 0x4d, 0xaa, 0x53, 0x72, 0xe1, 0x16,
0xda, 0x4b, 0xb3, 0xd8, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0xc4, 0xda, 0x5c, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x4c, 0xdf, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63,
0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2,
0xea, 0x82, 0x4e, 0xb8, 0x87, 0x42, 0xd0, 0x6d, 0x1f, 0x8d, 0xc3, 0xad, 0x9f, 0x43, 0x9e, 0xed,
0x6f, 0x43, 0x3c, 0x02, 0x71, 0x71, 0x69, 0xfb, 0xbc, 0x91, 0x44, 0xac, 0xf1, 0x93, 0xd3, 0x18,
0x17, 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71,
0xc7, 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87,
0x00, 0x00, 0x00, 0x00, 0xc4, 0x41, 0xe6, 0x78, 0x1d, 0xf7, 0xb3, 0x39, 0x66, 0x4d, 0x1a, 0x03,
0x97, 0x63, 0xc7, 0x2c, 0xfc, 0x70, 0xd7, 0x75, 0xb6, 0xd9, 0xfc, 0x1a, 0x96, 0xf0, 0xac, 0x07,
0xef, 0xfa, 0x26, 0x38, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xa9, 0x14,
0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7, 0x7e, 0xba,
0x30, 0xcd, 0x5a, 0x4b, 0x87,
}
// regtestGenesisBlockBytes are the wire encoded bytes for the genesis block of
// the regression test network as of protocol version 1.
var regtestGenesisBlockBytes = []byte{
0x01, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x9f, 0x62,
0xc9, 0x2b, 0x16, 0x17, 0xb3, 0x41, 0x6d, 0x9e,
0x2d, 0x87, 0x93, 0xfd, 0x72, 0x77, 0x4d, 0x1d,
0x6f, 0x6d, 0x38, 0x5b, 0xf1, 0x24, 0x1b, 0xdc,
0x96, 0xce, 0xbf, 0xa1, 0x09, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0xe2, 0x15,
0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x1e, 0xa6, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f,
0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63,
0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xed,
0x32, 0xec, 0xb4, 0xf8, 0x3c, 0x7a, 0x32, 0x0f,
0xd2, 0xe5, 0x24, 0x77, 0x89, 0x43, 0x3a, 0x78,
0x0a, 0xda, 0x68, 0x2d, 0xf6, 0xaa, 0xb1, 0x19,
0xdd, 0xd8, 0x97, 0x15, 0x4b, 0xcb, 0x42, 0x25,
0x17, 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5,
0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71,
0xc7, 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87,
0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x72, 0x65,
0x67, 0x74, 0x65, 0x73, 0x74,
0x00, 0x00, 0x00, 0x10, 0x00, 0x1e, 0x08, 0xae, 0x1f, 0x43, 0xf5, 0xfc, 0x24, 0xe6, 0xec, 0x54,
0x5b, 0xf7, 0x52, 0x99, 0xe4, 0xcc, 0x4c, 0xa0, 0x79, 0x41, 0xfc, 0xbe, 0x76, 0x72, 0x4c, 0x7e,
0xd8, 0xa3, 0x43, 0x65, 0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x4c, 0xdf, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x1e, 0x78, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xd4, 0xc4, 0x87, 0x77, 0xf2, 0xe7, 0x5d, 0xf7, 0xff, 0x2d, 0xbb, 0xb6,
0x2a, 0x73, 0x1f, 0x54, 0x36, 0x33, 0xa7, 0x99, 0xad, 0xb1, 0x09, 0x65, 0xc0, 0xf0, 0xf4, 0x53,
0xba, 0xfb, 0x88, 0xae, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xa9, 0x14,
0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7, 0x7e, 0xba,
0x30, 0xcd, 0x5a, 0x4b, 0x87, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x72, 0x65, 0x67, 0x74, 0x65,
0x73, 0x74,
}
// testnetGenesisBlockBytes are the wire encoded bytes for the genesis block of
// the test network as of protocol version 1.
var testnetGenesisBlockBytes = []byte{
0x01, 0x00, 0x00, 0x00, 0x00, 0x88, 0x05, 0xd0,
0xe7, 0x8f, 0x41, 0x77, 0x39, 0x2c, 0xb6, 0xbb,
0xb4, 0x19, 0xa8, 0x48, 0x4a, 0xdf, 0x77, 0xb0,
0x82, 0xd6, 0x70, 0xd8, 0x24, 0x6a, 0x36, 0x05,
0xaa, 0xbd, 0x7a, 0xd1, 0x62, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xad, 0x15,
0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x1e, 0xa1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f,
0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63,
0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc,
0x72, 0xe6, 0x7e, 0x37, 0xa1, 0x34, 0x89, 0x23,
0x24, 0xaf, 0xae, 0x99, 0x1f, 0x89, 0x09, 0x41,
0x1a, 0x4d, 0x58, 0xfe, 0x5a, 0x04, 0xb0, 0x3e,
0xeb, 0x1b, 0x5b, 0xb8, 0x65, 0xa8, 0x65, 0x0f,
0x01, 0x00, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d,
0x74, 0x65, 0x73, 0x74, 0x6e, 0x65, 0x74,
0x00, 0x00, 0x00, 0x10, 0x00, 0xa0, 0xa1, 0x3d, 0xfd, 0x86, 0x41, 0x35, 0xc8, 0xbd, 0xbb, 0xe6,
0x37, 0x35, 0xbb, 0x4c, 0x51, 0x11, 0x7b, 0x26, 0x90, 0x15, 0x64, 0x0f, 0x42, 0x6d, 0x2b, 0x6f,
0x37, 0x4d, 0xc1, 0xa9, 0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x21, 0xfc, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x1e, 0x24, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xf5, 0x41, 0x4c, 0xf4, 0xa8, 0xa2, 0x8c, 0x47, 0x9d, 0xb5, 0x75, 0x5e,
0x0f, 0x38, 0xd3, 0x27, 0x82, 0xc6, 0xd1, 0x89, 0xc1, 0x60, 0x49, 0xd9, 0x99, 0xc6, 0x2e, 0xbf,
0x4b, 0x5a, 0x3a, 0xcf, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x6b,
0x61, 0x73, 0x70, 0x61, 0x2d, 0x74, 0x65, 0x73, 0x74, 0x6e, 0x65, 0x74,
}
// simnetGenesisBlockBytes are the wire encoded bytes for the genesis block of
// the simulation test network as of protocol version 1.
var simnetGenesisBlockBytes = []byte{
0x01, 0x00, 0x00, 0x00, 0x00, 0xb0, 0x1c, 0x3b,
0x9e, 0x0d, 0x9a, 0xc0, 0x80, 0x0a, 0x08, 0x42,
0x50, 0x02, 0xa3, 0xea, 0xdb, 0xed, 0xc8, 0xd0,
0xad, 0x35, 0x03, 0xd8, 0x0e, 0x11, 0x3c, 0x7b,
0xb2, 0xb5, 0x20, 0xe5, 0x84, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0xd3, 0x15,
0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x20, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f,
0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63,
0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89,
0x48, 0xd3, 0x23, 0x9c, 0xf9, 0x88, 0x2b, 0x63,
0xc7, 0x33, 0x0f, 0xa3, 0x64, 0xf2, 0xdb, 0x39,
0x73, 0x5f, 0x2b, 0xa8, 0xd5, 0x7b, 0x5c, 0x31,
0x68, 0xc9, 0x63, 0x37, 0x5c, 0xe7, 0x41, 0x24,
0x17, 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5,
0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71,
0xc7, 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87,
0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x73, 0x69,
0x6d, 0x6e, 0x65, 0x74,
0x00, 0x00, 0x00, 0x10, 0x00, 0x47, 0x52, 0xc7, 0x23, 0x70, 0x4d, 0x89, 0x17, 0xbd, 0x44, 0x26,
0xfa, 0x82, 0x7e, 0x1b, 0xa9, 0xc6, 0x46, 0x1a, 0x37, 0x5a, 0x73, 0x88, 0x09, 0xe8, 0x17, 0xff,
0xb1, 0xdb, 0x1a, 0xb3, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x52, 0xde, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x20, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xd9, 0x39, 0x5f, 0x40, 0x2a, 0x5e, 0x24, 0x09, 0x1b, 0x9a, 0x4b, 0xdf,
0x7f, 0x0c, 0x03, 0x7f, 0xf1, 0xd2, 0x48, 0x8c, 0x26, 0xb0, 0xa3, 0x74, 0x60, 0xd9, 0x48, 0x18,
0x2b, 0x33, 0x22, 0x64, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xa9, 0x14,
0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7, 0x7e, 0xba,
0x30, 0xcd, 0x5a, 0x4b, 0x87, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x73, 0x69, 0x6d, 0x6e, 0x65,
0x74,
}
// devnetGenesisBlockBytes are the wire encoded bytes for the genesis block of
// the development network as of protocol version 1.
var devnetGenesisBlockBytes = []byte{
0x01, 0x00, 0x00, 0x00, 0x00, 0x16, 0x0a, 0xc6,
0x8b, 0x77, 0x08, 0xf4, 0x96, 0xa3, 0x07, 0x05,
0xbc, 0x92, 0xda, 0xee, 0x73, 0x26, 0x5e, 0xd0,
0x85, 0x78, 0xa2, 0x5d, 0x02, 0x49, 0x8a, 0x2a,
0x22, 0xef, 0x41, 0xc9, 0xc3, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0xe7, 0x15,
0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x1e, 0xac, 0x82, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f,
0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63,
0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0xc7, 0x0c, 0x02, 0x9e, 0xb2, 0x2e, 0xb3, 0xad,
0x24, 0x10, 0xfe, 0x2c, 0xdb, 0x8e, 0x1d, 0xde,
0x81, 0x5b, 0xbb, 0x42, 0xfe, 0xb4, 0x93, 0xd6,
0xe3, 0xbe, 0x86, 0x02, 0xe6, 0x3a, 0x65, 0x24,
0x17, 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5,
0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71,
0xc7, 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87,
0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x64, 0x65,
0x76, 0x6e, 0x65, 0x74,
0x00, 0x00, 0x00, 0x10, 0x00, 0x68, 0x60, 0xe7, 0x77, 0x47, 0x74, 0x7f, 0xd5, 0x55, 0x58, 0x8a,
0xb5, 0xc2, 0x29, 0x0c, 0xa6, 0x65, 0x44, 0xb4, 0x4f, 0xfa, 0x31, 0x7a, 0xfa, 0x55, 0xe0, 0xcf,
0xac, 0x9c, 0x86, 0x30, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x4c, 0xdf, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f,
0x1e, 0xed, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x1d, 0x1c, 0x05, 0x21, 0x10, 0x45, 0x61, 0xed, 0xc6, 0x0b, 0xdc, 0x85,
0xc0, 0x0a, 0x70, 0x2b, 0x15, 0xd5, 0x3c, 0x07, 0xb0, 0x54, 0x4f, 0x5b, 0x1a, 0x04, 0xcd, 0x49,
0xf1, 0x7b, 0xd6, 0x27, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xa9, 0x14,
0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7, 0x7e, 0xba,
0x30, 0xcd, 0x5a, 0x4b, 0x87, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x64, 0x65, 0x76, 0x6e, 0x65,
0x74,
}

View File

@ -48,7 +48,7 @@ var (
)
const (
ghostdagK = 10
ghostdagK = 15
difficultyAdjustmentWindowSize = 2640
timestampDeviationTolerance = 132
finalityDuration = 24 * time.Hour
@ -176,6 +176,9 @@ type Params struct {
// Address encoding magics
PrivateKeyID byte // First byte of a WIF private key
// EnableNonNativeSubnetworks enables non-native/coinbase transactions
EnableNonNativeSubnetworks bool
}
// NormalizeRPCServerAddress returns addr with the current network default
@ -230,6 +233,9 @@ var MainnetParams = Params{
// Address encoding magics
PrivateKeyID: 0x80, // starts with 5 (uncompressed) or K (compressed)
// EnableNonNativeSubnetworks enables non-native/coinbase transactions
EnableNonNativeSubnetworks: false,
}
// RegressionNetParams defines the network parameters for the regression test
@ -280,6 +286,9 @@ var RegressionNetParams = Params{
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// EnableNonNativeSubnetworks enables non-native/coinbase transactions
EnableNonNativeSubnetworks: false,
}
// TestnetParams defines the network parameters for the test Kaspa network.
@ -328,6 +337,9 @@ var TestnetParams = Params{
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// EnableNonNativeSubnetworks enables non-native/coinbase transactions
EnableNonNativeSubnetworks: false,
}
// SimnetParams defines the network parameters for the simulation test Kaspa
@ -380,6 +392,9 @@ var SimnetParams = Params{
PrivateKeyID: 0x64, // starts with 4 (uncompressed) or F (compressed)
// Human-readable part for Bech32 encoded addresses
Prefix: util.Bech32PrefixKaspaSim,
// EnableNonNativeSubnetworks enables non-native/coinbase transactions
EnableNonNativeSubnetworks: false,
}
// DevnetParams defines the network parameters for the development Kaspa network.
@ -428,6 +443,9 @@ var DevnetParams = Params{
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// EnableNonNativeSubnetworks enables non-native/coinbase transactions
EnableNonNativeSubnetworks: false,
}
var (

View File

@ -1,51 +0,0 @@
package database
import "bytes"
var separator = []byte("/")
// Bucket is a helper type meant to combine buckets,
// sub-buckets, and keys into a single full key-value
// database key.
type Bucket struct {
path [][]byte
}
// MakeBucket creates a new Bucket using the given path
// of buckets.
func MakeBucket(path ...[]byte) *Bucket {
return &Bucket{path: path}
}
// Bucket returns the sub-bucket of the current bucket
// defined by bucketBytes.
func (b *Bucket) Bucket(bucketBytes []byte) *Bucket {
newPath := make([][]byte, len(b.path)+1)
copy(newPath, b.path)
copy(newPath[len(b.path):], [][]byte{bucketBytes})
return MakeBucket(newPath...)
}
// Key returns the key inside of the current bucket.
func (b *Bucket) Key(key []byte) []byte {
bucketPath := b.Path()
fullKeyLength := len(bucketPath) + len(key)
fullKey := make([]byte, fullKeyLength)
copy(fullKey, bucketPath)
copy(fullKey[len(bucketPath):], key)
return fullKey
}
// Path returns the full path of the current bucket.
func (b *Bucket) Path() []byte {
bucketPath := bytes.Join(b.path, separator)
bucketPathWithFinalSeparator := make([]byte, len(bucketPath)+len(separator))
copy(bucketPathWithFinalSeparator, bucketPath)
copy(bucketPathWithFinalSeparator[len(bucketPath):], separator)
return bucketPathWithFinalSeparator
}

84
database/common_test.go Normal file
View File

@ -0,0 +1,84 @@
package database_test
import (
"fmt"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/database/ffldb"
"io/ioutil"
"testing"
)
type databasePrepareFunc func(t *testing.T, testName string) (db database.Database, name string, teardownFunc func())
// databasePrepareFuncs is a set of functions, in which each function
// prepares a separate database type for testing.
// See testForAllDatabaseTypes for further details.
var databasePrepareFuncs = []databasePrepareFunc{
prepareFFLDBForTest,
}
func prepareFFLDBForTest(t *testing.T, testName string) (db database.Database, name string, teardownFunc func()) {
// Create a temp db to run tests against
path, err := ioutil.TempDir("", testName)
if err != nil {
t.Fatalf("%s: TempDir unexpectedly "+
"failed: %s", testName, err)
}
db, err = ffldb.Open(path)
if err != nil {
t.Fatalf("%s: Open unexpectedly "+
"failed: %s", testName, err)
}
teardownFunc = func() {
err = db.Close()
if err != nil {
t.Fatalf("%s: Close unexpectedly "+
"failed: %s", testName, err)
}
}
return db, "ffldb", teardownFunc
}
// testForAllDatabaseTypes runs the given testFunc for every database
// type defined in databasePrepareFuncs. This is to make sure that
// all supported database types adhere to the assumptions defined in
// the interfaces in this package.
func testForAllDatabaseTypes(t *testing.T, testName string,
testFunc func(t *testing.T, db database.Database, testName string)) {
for _, prepareDatabase := range databasePrepareFuncs {
func() {
db, dbType, teardownFunc := prepareDatabase(t, testName)
defer teardownFunc()
testName := fmt.Sprintf("%s: %s", dbType, testName)
testFunc(t, db, testName)
}()
}
}
type keyValuePair struct {
key *database.Key
value []byte
}
func populateDatabaseForTest(t *testing.T, db database.Database, testName string) []keyValuePair {
// Prepare a list of key/value pairs
entries := make([]keyValuePair, 10)
for i := 0; i < 10; i++ {
key := database.MakeBucket().Key([]byte(fmt.Sprintf("key%d", i)))
value := []byte("value")
entries[i] = keyValuePair{key: key, value: value}
}
// Put the pairs into the database
for _, entry := range entries {
err := db.Put(entry.key, entry.value)
if err != nil {
t.Fatalf("%s: Put unexpectedly "+
"failed: %s", testName, err)
}
}
return entries
}

View File

@ -3,23 +3,22 @@ package database
// Cursor iterates over database entries given some bucket.
type Cursor interface {
// Next moves the iterator to the next key/value pair. It returns whether the
// iterator is exhausted. Returns false if the cursor is closed.
// iterator is exhausted. Panics if the cursor is closed.
Next() bool
// First moves the iterator to the first key/value pair. It returns false if
// such a pair does not exist or if the cursor is closed.
// such a pair does not exist. Panics if the cursor is closed.
First() bool
// Seek moves the iterator to the first key/value pair whose key is greater
// than or equal to the given key. It returns ErrNotFound if such pair does not
// exist.
Seek(key []byte) error
Seek(key *Key) error
// Key returns the key of the current key/value pair, or ErrNotFound if done.
// Note that the key is trimmed to not include the prefix the cursor was opened
// with. The caller should not modify the contents of the returned slice, and
// The caller should not modify the contents of the returned key, and
// its contents may change on the next call to Next.
Key() ([]byte, error)
Key() (*Key, error)
// Value returns the value of the current key/value pair, or ErrNotFound if done.
// The caller should not modify the contents of the returned slice, and its

345
database/cursor_test.go Normal file
View File

@ -0,0 +1,345 @@
// All tests within this file should call testForAllDatabaseTypes
// over the actual test. This is to make sure that all supported
// database types adhere to the assumptions defined in the
// interfaces in this package.
package database_test
import (
"bytes"
"fmt"
"github.com/kaspanet/kaspad/database"
"reflect"
"strings"
"testing"
)
func prepareCursorForTest(t *testing.T, db database.Database, testName string) database.Cursor {
cursor, err := db.Cursor(database.MakeBucket())
if err != nil {
t.Fatalf("%s: Cursor unexpectedly "+
"failed: %s", testName, err)
}
return cursor
}
func recoverFromClosedCursorPanic(t *testing.T, testName string) {
panicErr := recover()
if panicErr == nil {
t.Fatalf("%s: cursor unexpectedly "+
"didn't panic after being closed", testName)
}
expectedPanicErr := "closed cursor"
if !strings.Contains(fmt.Sprintf("%v", panicErr), expectedPanicErr) {
t.Fatalf("%s: cursor panicked "+
"with wrong message. Want: %v, got: %s",
testName, expectedPanicErr, panicErr)
}
}
func TestCursorNext(t *testing.T) {
testForAllDatabaseTypes(t, "TestCursorNext", testCursorNext)
}
func testCursorNext(t *testing.T, db database.Database, testName string) {
entries := populateDatabaseForTest(t, db, testName)
cursor := prepareCursorForTest(t, db, testName)
// Make sure that all the entries exist in the cursor, in their
// correct order
for _, entry := range entries {
hasNext := cursor.Next()
if !hasNext {
t.Fatalf("%s: cursor unexpectedly "+
"done", testName)
}
cursorKey, err := cursor.Key()
if err != nil {
t.Fatalf("%s: Key unexpectedly "+
"failed: %s", testName, err)
}
if !reflect.DeepEqual(cursorKey, entry.key) {
t.Fatalf("%s: Cursor returned "+
"wrong key. Want: %s, got: %s", testName, entry.key, cursorKey)
}
cursorValue, err := cursor.Value()
if err != nil {
t.Fatalf("%s: Value unexpectedly "+
"failed: %s", testName, err)
}
if !bytes.Equal(cursorValue, entry.value) {
t.Fatalf("%s: Cursor returned "+
"wrong value. Want: %s, got: %s", testName, entry.value, cursorValue)
}
}
// The cursor should now be exhausted. Make sure Next now
// returns false
hasNext := cursor.Next()
if hasNext {
t.Fatalf("%s: cursor unexpectedly "+
"not done", testName)
}
// Rewind the cursor and close it
cursor.First()
err := cursor.Close()
if err != nil {
t.Fatalf("%s: Close unexpectedly "+
"failed: %s", testName, err)
}
// Call Next on the cursor. This time it should panic
// because it's closed.
func() {
defer recoverFromClosedCursorPanic(t, testName)
cursor.Next()
}()
}
func TestCursorFirst(t *testing.T) {
testForAllDatabaseTypes(t, "TestCursorFirst", testCursorFirst)
}
func testCursorFirst(t *testing.T, db database.Database, testName string) {
entries := populateDatabaseForTest(t, db, testName)
cursor := prepareCursorForTest(t, db, testName)
// Make sure that First returns true when the cursor is not empty
exists := cursor.First()
if !exists {
t.Fatalf("%s: Cursor unexpectedly "+
"returned false", testName)
}
// Make sure that the first key and value are as expected
firstEntryKey := entries[0].key
firstCursorKey, err := cursor.Key()
if err != nil {
t.Fatalf("%s: Key unexpectedly "+
"failed: %s", testName, err)
}
if !reflect.DeepEqual(firstCursorKey, firstEntryKey) {
t.Fatalf("%s: Cursor returned "+
"wrong key. Want: %s, got: %s", testName, firstEntryKey, firstCursorKey)
}
firstEntryValue := entries[0].value
firstCursorValue, err := cursor.Value()
if err != nil {
t.Fatalf("%s: Value unexpectedly "+
"failed: %s", testName, err)
}
if !bytes.Equal(firstCursorValue, firstEntryValue) {
t.Fatalf("%s: Cursor returned "+
"wrong value. Want: %s, got: %s", testName, firstEntryValue, firstCursorValue)
}
// Exhaust the cursor
for cursor.Next() {
// Do nothing
}
// Call first again and make sure it still returns true
exists = cursor.First()
if !exists {
t.Fatalf("%s: First unexpectedly "+
"returned false", testName)
}
// Call next and make sure it returns true as well
exists = cursor.Next()
if !exists {
t.Fatalf("%s: Next unexpectedly "+
"returned false", testName)
}
// Remove all the entries from the database
for _, entry := range entries {
err := db.Delete(entry.key)
if err != nil {
t.Fatalf("%s: Delete unexpectedly "+
"failed: %s", testName, err)
}
}
// Create a new cursor over an empty dataset
cursor = prepareCursorForTest(t, db, testName)
// Make sure that First returns false when the cursor is empty
exists = cursor.First()
if exists {
t.Fatalf("%s: Cursor unexpectedly "+
"returned true", testName)
}
}
func TestCursorSeek(t *testing.T) {
testForAllDatabaseTypes(t, "TestCursorSeek", testCursorSeek)
}
func testCursorSeek(t *testing.T, db database.Database, testName string) {
entries := populateDatabaseForTest(t, db, testName)
cursor := prepareCursorForTest(t, db, testName)
// Seek to the fourth entry and make sure it exists
fourthEntry := entries[3]
err := cursor.Seek(fourthEntry.key)
if err != nil {
t.Fatalf("%s: Cursor unexpectedly "+
"failed: %s", testName, err)
}
// Make sure that the key and value are as expected
fourthEntryKey := entries[3].key
fourthCursorKey, err := cursor.Key()
if err != nil {
t.Fatalf("%s: Key unexpectedly "+
"failed: %s", testName, err)
}
if !reflect.DeepEqual(fourthCursorKey, fourthEntryKey) {
t.Fatalf("%s: Cursor returned "+
"wrong key. Want: %s, got: %s", testName, fourthEntryKey, fourthCursorKey)
}
fourthEntryValue := entries[3].value
fourthCursorValue, err := cursor.Value()
if err != nil {
t.Fatalf("%s: Value unexpectedly "+
"failed: %s", testName, err)
}
if !bytes.Equal(fourthCursorValue, fourthEntryValue) {
t.Fatalf("%s: Cursor returned "+
"wrong value. Want: %s, got: %s", testName, fourthEntryValue, fourthCursorValue)
}
// Call Next and make sure that we are now on the fifth entry
exists := cursor.Next()
if !exists {
t.Fatalf("%s: Next unexpectedly "+
"returned false", testName)
}
fifthEntryKey := entries[4].key
fifthCursorKey, err := cursor.Key()
if err != nil {
t.Fatalf("%s: Key unexpectedly "+
"failed: %s", testName, err)
}
if !reflect.DeepEqual(fifthCursorKey, fifthEntryKey) {
t.Fatalf("%s: Cursor returned "+
"wrong key. Want: %s, got: %s", testName, fifthEntryKey, fifthCursorKey)
}
fifthEntryValue := entries[4].value
fifthCursorValue, err := cursor.Value()
if err != nil {
t.Fatalf("%s: Value unexpectedly "+
"failed: %s", testName, err)
}
if !bytes.Equal(fifthCursorValue, fifthEntryValue) {
t.Fatalf("%s: Cursor returned "+
"wrong value. Want: %s, got: %s", testName, fifthEntryValue, fifthCursorValue)
}
// Seek to a value that doesn't exist and make sure that
// the returned error is ErrNotFound
err = cursor.Seek(database.MakeBucket().Key([]byte("doesn't exist")))
if err == nil {
t.Fatalf("%s: Seek unexpectedly "+
"succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: Seek returned "+
"wrong error: %s", testName, err)
}
}
func TestCursorCloseErrors(t *testing.T) {
testForAllDatabaseTypes(t, "TestCursorCloseErrors", testCursorCloseErrors)
}
func testCursorCloseErrors(t *testing.T, db database.Database, testName string) {
populateDatabaseForTest(t, db, testName)
cursor := prepareCursorForTest(t, db, testName)
// Close the cursor
err := cursor.Close()
if err != nil {
t.Fatalf("%s: Close "+
"unexpectedly failed: %s", testName, err)
}
tests := []struct {
name string
function func() error
}{
{
name: "Seek",
function: func() error {
return cursor.Seek(database.MakeBucket().Key([]byte{}))
},
},
{
name: "Key",
function: func() error {
_, err := cursor.Key()
return err
},
},
{
name: "Value",
function: func() error {
_, err := cursor.Value()
return err
},
},
{
name: "Close",
function: func() error {
return cursor.Close()
},
},
}
for _, test := range tests {
expectedErrContainsString := "closed cursor"
// Make sure that the test function returns a "closed cursor" error
err = test.function()
if err == nil {
t.Fatalf("%s: %s "+
"unexpectedly succeeded", testName, test.name)
}
if !strings.Contains(err.Error(), expectedErrContainsString) {
t.Fatalf("%s: %s "+
"returned wrong error. Want: %s, got: %s",
testName, test.name, expectedErrContainsString, err)
}
}
}
func TestCursorCloseFirstAndNext(t *testing.T) {
testForAllDatabaseTypes(t, "TestCursorCloseFirstAndNext", testCursorCloseFirstAndNext)
}
func testCursorCloseFirstAndNext(t *testing.T, db database.Database, testName string) {
populateDatabaseForTest(t, db, testName)
cursor := prepareCursorForTest(t, db, testName)
// Close the cursor
err := cursor.Close()
if err != nil {
t.Fatalf("%s: Close "+
"unexpectedly failed: %s", testName, err)
}
// We expect First to panic
func() {
defer recoverFromClosedCursorPanic(t, testName)
cursor.First()
}()
// We expect Next to panic
func() {
defer recoverFromClosedCursorPanic(t, testName)
cursor.Next()
}()
}

View File

@ -5,19 +5,19 @@ package database
type DataAccessor interface {
// Put sets the value for the given key. It overwrites
// any previous value for that key.
Put(key []byte, value []byte) error
Put(key *Key, value []byte) error
// Get gets the value for the given key. It returns
// ErrNotFound if the given key does not exist.
Get(key []byte) ([]byte, error)
Get(key *Key) ([]byte, error)
// Has returns true if the database does contains the
// given key.
Has(key []byte) (bool, error)
Has(key *Key) (bool, error)
// Delete deletes the value for the given key. Will not
// return an error if the key doesn't exist.
Delete(key []byte) error
Delete(key *Key) error
// AppendToStore appends the given data to the store
// defined by storeName. This function returns a serialized
@ -32,5 +32,5 @@ type DataAccessor interface {
RetrieveFromStore(storeName string, location []byte) ([]byte, error)
// Cursor begins a new cursor over the given bucket.
Cursor(bucket []byte) (Cursor, error)
Cursor(bucket *Bucket) (Cursor, error)
}

207
database/database_test.go Normal file
View File

@ -0,0 +1,207 @@
// All tests within this file should call testForAllDatabaseTypes
// over the actual test. This is to make sure that all supported
// database types adhere to the assumptions defined in the
// interfaces in this package.
package database_test
import (
"bytes"
"github.com/kaspanet/kaspad/database"
"testing"
)
func TestDatabasePut(t *testing.T) {
testForAllDatabaseTypes(t, "TestDatabasePut", testDatabasePut)
}
func testDatabasePut(t *testing.T, db database.Database, testName string) {
// Put value1 into the database
key := database.MakeBucket().Key([]byte("key"))
value1 := []byte("value1")
err := db.Put(key, value1)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that the returned value is value1
returnedValue, err := db.Get(key)
if err != nil {
t.Fatalf("%s: Get "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(returnedValue, value1) {
t.Fatalf("%s: Get "+
"returned wrong value. Want: %s, got: %s",
testName, string(value1), string(returnedValue))
}
// Put value2 into the database with the same key
value2 := []byte("value2")
err = db.Put(key, value2)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that the returned value is value2
returnedValue, err = db.Get(key)
if err != nil {
t.Fatalf("%s: Get "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(returnedValue, value2) {
t.Fatalf("%s: Get "+
"returned wrong value. Want: %s, got: %s",
testName, string(value2), string(returnedValue))
}
}
func TestDatabaseGet(t *testing.T) {
testForAllDatabaseTypes(t, "TestDatabaseGet", testDatabaseGet)
}
func testDatabaseGet(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key := database.MakeBucket().Key([]byte("key"))
value := []byte("value")
err := db.Put(key, value)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Get the value back and make sure it's the same one
returnedValue, err := db.Get(key)
if err != nil {
t.Fatalf("%s: Get "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(returnedValue, value) {
t.Fatalf("%s: Get "+
"returned wrong value. Want: %s, got: %s",
testName, string(value), string(returnedValue))
}
// Try getting a non-existent value and make sure
// the returned error is ErrNotFound
_, err = db.Get(database.MakeBucket().Key([]byte("doesn't exist")))
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: Get "+
"returned wrong error: %s", testName, err)
}
}
func TestDatabaseHas(t *testing.T) {
testForAllDatabaseTypes(t, "TestDatabaseHas", testDatabaseHas)
}
func testDatabaseHas(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key := database.MakeBucket().Key([]byte("key"))
value := []byte("value")
err := db.Put(key, value)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that Has returns true for the value we just put
exists, err := db.Has(key)
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
}
if !exists {
t.Fatalf("%s: Has "+
"unexpectedly returned that the value does not exist", testName)
}
// Make sure that Has returns false for a non-existent value
exists, err = db.Has(database.MakeBucket().Key([]byte("doesn't exist")))
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
}
if exists {
t.Fatalf("%s: Has "+
"unexpectedly returned that the value exists", testName)
}
}
func TestDatabaseDelete(t *testing.T) {
testForAllDatabaseTypes(t, "TestDatabaseDelete", testDatabaseDelete)
}
func testDatabaseDelete(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key := database.MakeBucket().Key([]byte("key"))
value := []byte("value")
err := db.Put(key, value)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Delete the value
err = db.Delete(key)
if err != nil {
t.Fatalf("%s: Delete "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that Has returns false for the deleted value
exists, err := db.Has(key)
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
}
if exists {
t.Fatalf("%s: Has "+
"unexpectedly returned that the value exists", testName)
}
}
func TestDatabaseAppendToStoreAndRetrieveFromStore(t *testing.T) {
testForAllDatabaseTypes(t, "TestDatabaseAppendToStoreAndRetrieveFromStore", testDatabaseAppendToStoreAndRetrieveFromStore)
}
func testDatabaseAppendToStoreAndRetrieveFromStore(t *testing.T, db database.Database, testName string) {
// Append some data into the store
storeName := "store"
data := []byte("data")
location, err := db.AppendToStore(storeName, data)
if err != nil {
t.Fatalf("%s: AppendToStore "+
"unexpectedly failed: %s", testName, err)
}
// Retrieve the data and make sure it's equal to what was appended
retrievedData, err := db.RetrieveFromStore(storeName, location)
if err != nil {
t.Fatalf("%s: RetrieveFromStore "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(retrievedData, data) {
t.Fatalf("%s: RetrieveFromStore "+
"returned unexpected data. Want: %s, got: %s",
testName, string(data), string(retrievedData))
}
// Make sure that an invalid location returns ErrNotFound
fakeLocation := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
_, err = db.RetrieveFromStore(storeName, fakeLocation)
if err == nil {
t.Fatalf("%s: RetrieveFromStore "+
"unexpectedly succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: RetrieveFromStore "+
"returned wrong error: %s", testName, err)
}
}

View File

@ -16,12 +16,14 @@ const (
// cache. Note that this does not include the current/write file, so there
// will typically be one more than this value open.
maxOpenFiles = 25
)
var (
// maxFileSize is the maximum size for each file used to store data.
//
// NOTE: The current code uses uint32 for all offsets, so this value
// must be less than 2^32 (4 GiB). This is also why it's a typed
// constant.
// must be less than 2^32 (4 GiB).
// NOTE: This is a var rather than a const for testing purposes.
maxFileSize uint32 = 512 * 1024 * 1024 // 512 MiB
)

View File

@ -1,24 +1,40 @@
package ff
import (
"bytes"
"github.com/kaspanet/kaspad/database"
"io/ioutil"
"os"
"reflect"
"testing"
)
func TestFlatFileStoreSanity(t *testing.T) {
// Open a test store
path, err := ioutil.TempDir("", "TestFlatFileStoreSanity")
func prepareStoreForTest(t *testing.T, testName string) (store *flatFileStore, teardownFunc func()) {
// Create a temp db to run tests against
path, err := ioutil.TempDir("", testName)
if err != nil {
t.Fatalf("TestFlatFileStoreSanity: TempDir unexpectedly "+
"failed: %s", err)
t.Fatalf("%s: TempDir unexpectedly "+
"failed: %s", testName, err)
}
name := "test"
store, err := openFlatFileStore(path, name)
store, err = openFlatFileStore(path, name)
if err != nil {
t.Fatalf("TestFlatFileStoreSanity: openFlatFileStore "+
"unexpectedly failed: %s", err)
t.Fatalf("%s: openFlatFileStore "+
"unexpectedly failed: %s", testName, err)
}
teardownFunc = func() {
err = store.Close()
if err != nil {
t.Fatalf("%s: Close unexpectedly "+
"failed: %s", testName, err)
}
}
return store, teardownFunc
}
func TestFlatFileStoreSanity(t *testing.T) {
store, teardownFunc := prepareStoreForTest(t, "TestFlatFileStoreSanity")
defer teardownFunc()
// Write something to the store
writeData := []byte("Hello world!")
@ -72,3 +88,88 @@ func TestFlatFilePath(t *testing.T) {
}
}
}
func TestFlatFileMultiFileRollback(t *testing.T) {
store, teardownFunc := prepareStoreForTest(t, "TestFlatFileMultiFileRollback")
defer teardownFunc()
// Set the maxFileSize to 16 bytes so that we don't have to write
// an enormous amount of data to disk to get multiple files, all
// for the sake of this test.
currentMaxFileSize := maxFileSize
maxFileSize = 16
defer func() {
maxFileSize = currentMaxFileSize
}()
// Write five 8 byte chunks and keep the last location written to
var lastWriteLocation1 *flatFileLocation
for i := byte(0); i < 5; i++ {
writeData := []byte{i, i, i, i, i, i, i, i}
var err error
lastWriteLocation1, err = store.write(writeData)
if err != nil {
t.Fatalf("TestFlatFileMultiFileRollback: write returned "+
"unexpected error: %s", err)
}
}
// Grab the current location and the current file number
currentLocation := store.currentLocation()
fileNumberBeforeWriting := store.writeCursor.currentFileNumber
// Write (2 * maxOpenFiles) more 8 byte chunks and keep the last location written to
var lastWriteLocation2 *flatFileLocation
for i := byte(0); i < byte(2*maxFileSize); i++ {
writeData := []byte{0, 1, 2, 3, 4, 5, 6, 7}
var err error
lastWriteLocation2, err = store.write(writeData)
if err != nil {
t.Fatalf("TestFlatFileMultiFileRollback: write returned "+
"unexpected error: %s", err)
}
}
// Grab the file number again to later make sure its file no longer exists
fileNumberAfterWriting := store.writeCursor.currentFileNumber
// Rollback
err := store.rollback(currentLocation)
if err != nil {
t.Fatalf("TestFlatFileMultiFileRollback: rollback returned "+
"unexpected error: %s", err)
}
// Make sure that lastWriteLocation1 still exists
expectedData := []byte{4, 4, 4, 4, 4, 4, 4, 4}
data, err := store.read(lastWriteLocation1)
if err != nil {
t.Fatalf("TestFlatFileMultiFileRollback: read returned "+
"unexpected error: %s", err)
}
if !bytes.Equal(data, expectedData) {
t.Fatalf("TestFlatFileMultiFileRollback: read returned "+
"unexpected data. Want: %s, got: %s", string(expectedData),
string(data))
}
// Make sure that lastWriteLocation2 does NOT exist
_, err = store.read(lastWriteLocation2)
if err == nil {
t.Fatalf("TestFlatFileMultiFileRollback: read " +
"unexpectedly succeeded")
}
if !database.IsNotFoundError(err) {
t.Fatalf("TestFlatFileMultiFileRollback: read "+
"returned unexpected error: %s", err)
}
// Make sure that all the appropriate files have been deleted
for i := fileNumberAfterWriting; i > fileNumberBeforeWriting; i-- {
filePath := flatFilePath(store.basePath, store.storeName, i)
if _, err := os.Stat(filePath); err == nil || !os.IsNotExist(err) {
t.Fatalf("TestFlatFileMultiFileRollback: file "+
"unexpectedly still exists: %s", filePath)
}
}
}

View File

@ -0,0 +1,62 @@
package ff
import (
"bytes"
"encoding/hex"
"reflect"
"strings"
"testing"
)
func TestFlatFileLocationSerialization(t *testing.T) {
location := &flatFileLocation{
fileNumber: 1,
fileOffset: 2,
dataLength: 3,
}
serializedLocation := serializeLocation(location)
expectedSerializedLocation := []byte{1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0}
if !bytes.Equal(serializedLocation, expectedSerializedLocation) {
t.Fatalf("TestFlatFileLocationSerialization: serializeLocation "+
"returned unexpected bytes. Want: %s, got: %s",
hex.EncodeToString(expectedSerializedLocation), hex.EncodeToString(serializedLocation))
}
deserializedLocation, err := deserializeLocation(serializedLocation)
if err != nil {
t.Fatalf("TestFlatFileLocationSerialization: deserializeLocation "+
"unexpectedly failed: %s", err)
}
if !reflect.DeepEqual(deserializedLocation, location) {
t.Fatalf("TestFlatFileLocationSerialization: original "+
"location and deserialized location aren't the same. Want: %v, "+
"got: %v", location, deserializedLocation)
}
}
func TestFlatFileLocationDeserializationErrors(t *testing.T) {
expectedError := "unexpected serializedLocation length"
tooShortSerializedLocation := []byte{0, 1, 2, 3, 4, 5}
_, err := deserializeLocation(tooShortSerializedLocation)
if err == nil {
t.Fatalf("TestFlatFileLocationSerialization: deserializeLocation " +
"unexpectedly succeeded")
}
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf("TestFlatFileLocationSerialization: deserializeLocation "+
"returned unexpected error. Want: %s, got: %s", expectedError, err)
}
tooLongSerializedLocation := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}
_, err = deserializeLocation(tooLongSerializedLocation)
if err == nil {
t.Fatalf("TestFlatFileLocationSerialization: deserializeLocation " +
"unexpectedly succeeded")
}
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf("TestFlatFileLocationSerialization: deserializeLocation "+
"returned unexpected error. Want: %s, got: %s", expectedError, err)
}
}

View File

@ -36,5 +36,9 @@ func (lf *lockableFile) Close() error {
lf.Lock()
defer lf.Unlock()
if lf.file == nil {
return nil
}
return errors.WithStack(lf.file.Close())
}

View File

@ -33,10 +33,11 @@ func (s *flatFileStore) read(location *flatFileLocation) ([]byte, error) {
if err != nil {
return nil, err
}
flatFile.RLock()
defer flatFile.RUnlock()
data := make([]byte, location.dataLength)
n, err := flatFile.file.ReadAt(data, int64(location.fileOffset))
flatFile.RUnlock()
if err != nil {
return nil, errors.Wrapf(err, "failed to read data in store '%s' "+
"from file %d, offset %d", s.storeName, location.fileNumber,
@ -62,43 +63,31 @@ func (s *flatFileStore) read(location *flatFileLocation) ([]byte, error) {
// will also open the file when it's not already open subject to the rules
// described in openFile. Also handles closing files as needed to avoid going
// over the max allowed open files.
//
// NOTE: The returned flat file will already have the read lock acquired and
// the caller MUST call .RUnlock() to release it once it has finished all read
// operations. This is necessary because otherwise it would be possible for a
// separate goroutine to close the file after it is returned from here, but
// before the caller has acquired a read lock.
func (s *flatFileStore) flatFile(fileNumber uint32) (*lockableFile, error) {
// When the requested flat file is open for writes, return it.
s.writeCursor.RLock()
defer s.writeCursor.RUnlock()
if fileNumber == s.writeCursor.currentFileNumber && s.writeCursor.currentFile.file != nil {
openFile := s.writeCursor.currentFile
openFile.RLock()
s.writeCursor.RUnlock()
return openFile, nil
}
s.writeCursor.RUnlock()
// Try to return an open file under the overall files read lock.
s.openFilesMutex.RLock()
defer s.openFilesMutex.RUnlock()
if openFile, ok := s.openFiles[fileNumber]; ok {
s.lruMutex.Lock()
s.openFilesLRU.MoveToFront(s.fileNumberToLRUElement[fileNumber])
s.lruMutex.Unlock()
defer s.lruMutex.Unlock()
s.openFilesLRU.MoveToFront(s.fileNumberToLRUElement[fileNumber])
openFile.RLock()
s.openFilesMutex.RUnlock()
return openFile, nil
}
s.openFilesMutex.RUnlock()
// Since the file isn't open already, need to check the open files map
// again under write lock in case multiple readers got here and a
// separate one is already opening the file.
s.openFilesMutex.Lock()
if openFlatFile, ok := s.openFiles[fileNumber]; ok {
openFlatFile.RLock()
s.openFilesMutex.Unlock()
return openFlatFile, nil
}
@ -106,11 +95,8 @@ func (s *flatFileStore) flatFile(fileNumber uint32) (*lockableFile, error) {
// recently used one as needed.
openFile, err := s.openFile(fileNumber)
if err != nil {
s.openFilesMutex.Unlock()
return nil, err
}
openFile.RLock()
s.openFilesMutex.Unlock()
return openFile, nil
}
@ -142,6 +128,7 @@ func (s *flatFileStore) openFile(fileNumber uint32) (*lockableFile, error) {
// recently used list to indicate it is the most recently used file and
// therefore should be closed last.
s.lruMutex.Lock()
defer s.lruMutex.Unlock()
lruList := s.openFilesLRU
if lruList.Len() >= maxOpenFiles {
lruFileNumber := lruList.Remove(lruList.Back()).(uint32)
@ -151,14 +138,13 @@ func (s *flatFileStore) openFile(fileNumber uint32) (*lockableFile, error) {
// any readers are currently reading from it so it's not closed
// out from under them.
oldFile.Lock()
defer oldFile.Unlock()
_ = oldFile.file.Close()
oldFile.Unlock()
delete(s.openFiles, lruFileNumber)
delete(s.fileNumberToLRUElement, lruFileNumber)
}
s.fileNumberToLRUElement[fileNumber] = lruList.PushFront(fileNumber)
s.lruMutex.Unlock()
// Store a reference to it in the open files map.
s.openFiles[fileNumber] = flatFile

View File

@ -64,12 +64,7 @@ func (s *flatFileStore) rollback(targetLocation *flatFileLocation) error {
// Close the current write file if it needs to be deleted.
if s.writeCursor.currentFileNumber > targetFileNumber {
s.writeCursor.currentFile.Lock()
if s.writeCursor.currentFile.file != nil {
s.writeCursor.currentFile.file.Close()
s.writeCursor.currentFile.file = nil
}
s.writeCursor.currentFile.Unlock()
s.closeCurrentWriteCursorFile()
}
// Delete all files that are newer than the provided rollback file
@ -90,10 +85,10 @@ func (s *flatFileStore) rollback(targetLocation *flatFileLocation) error {
// Open the file for the current write cursor if needed.
s.writeCursor.currentFile.Lock()
defer s.writeCursor.currentFile.Unlock()
if s.writeCursor.currentFile.file == nil {
openFile, err := s.openWriteFile(s.writeCursor.currentFileNumber)
if err != nil {
s.writeCursor.currentFile.Unlock()
return err
}
s.writeCursor.currentFile.file = openFile
@ -102,14 +97,12 @@ func (s *flatFileStore) rollback(targetLocation *flatFileLocation) error {
// Truncate the file to the provided target offset.
err := s.writeCursor.currentFile.file.Truncate(int64(targetFileOffset))
if err != nil {
s.writeCursor.currentFile.Unlock()
return errors.Wrapf(err, "ROLLBACK: Failed to truncate file %d "+
"in store '%s'", s.writeCursor.currentFileNumber, s.storeName)
}
// Sync the file to disk.
err = s.writeCursor.currentFile.file.Sync()
s.writeCursor.currentFile.Unlock()
if err != nil {
return errors.Wrapf(err, "ROLLBACK: Failed to sync file %d in "+
"store '%s'", s.writeCursor.currentFileNumber, s.storeName)

View File

@ -1,6 +1,7 @@
package ff
import (
"github.com/kaspanet/kaspad/util/panics"
"github.com/pkg/errors"
"hash/crc32"
"os"
@ -46,18 +47,16 @@ func (s *flatFileStore) write(data []byte) (*flatFileLocation, error) {
// with LRU tracking. The close is done under the write lock
// for the file to prevent it from being closed out from under
// any readers currently reading from it.
cursor.Lock()
cursor.currentFile.Lock()
if cursor.currentFile.file != nil {
_ = cursor.currentFile.file.Close()
cursor.currentFile.file = nil
}
cursor.currentFile.Unlock()
func() {
cursor.Lock()
defer cursor.Unlock()
// Start writes into next file.
cursor.currentFileNumber++
cursor.currentOffset = 0
cursor.Unlock()
s.closeCurrentWriteCursorFile()
// Start writes into next file.
cursor.currentFileNumber++
cursor.currentOffset = 0
}()
}
// All writes are done under the write lock for the file to ensure any
@ -154,7 +153,7 @@ func (s *flatFileStore) writeData(data []byte, fieldName string) error {
if err != nil {
var pathErr *os.PathError
if ok := errors.As(err, &pathErr); ok && pathErr.Err == syscall.ENOSPC {
panic("No space left on the hard disk, exiting...")
panics.Exit(log, "No space left on the hard disk.")
}
return errors.Wrapf(err, "failed to write %s in store %s to file %d "+
"at offset %d", fieldName, s.storeName, cursor.currentFileNumber,
@ -163,3 +162,15 @@ func (s *flatFileStore) writeData(data []byte, fieldName string) error {
return nil
}
// closeCurrentWriteCursorFile closes the currently open writeCursor file if
// it's open.
// This method MUST be called with the writeCursor lock held for writes.
func (s *flatFileStore) closeCurrentWriteCursorFile() {
s.writeCursor.currentFile.Lock()
defer s.writeCursor.currentFile.Unlock()
if s.writeCursor.currentFile.file != nil {
_ = s.writeCursor.currentFile.file.Close()
s.writeCursor.currentFile.file = nil
}
}

View File

@ -59,28 +59,28 @@ func (db *ffldb) Close() error {
// Put sets the value for the given key. It overwrites
// any previous value for that key.
// This method is part of the DataAccessor interface.
func (db *ffldb) Put(key []byte, value []byte) error {
func (db *ffldb) Put(key *database.Key, value []byte) error {
return db.levelDB.Put(key, value)
}
// Get gets the value for the given key. It returns
// ErrNotFound if the given key does not exist.
// This method is part of the DataAccessor interface.
func (db *ffldb) Get(key []byte) ([]byte, error) {
func (db *ffldb) Get(key *database.Key) ([]byte, error) {
return db.levelDB.Get(key)
}
// Has returns true if the database does contains the
// given key.
// This method is part of the DataAccessor interface.
func (db *ffldb) Has(key []byte) (bool, error) {
func (db *ffldb) Has(key *database.Key) (bool, error) {
return db.levelDB.Has(key)
}
// Delete deletes the value for the given key. Will not
// return an error if the key doesn't exist.
// This method is part of the DataAccessor interface.
func (db *ffldb) Delete(key []byte) error {
func (db *ffldb) Delete(key *database.Key) error {
return db.levelDB.Delete(key)
}
@ -155,7 +155,7 @@ func (db *ffldb) RetrieveFromStore(storeName string, location []byte) ([]byte, e
// Cursor begins a new cursor over the given bucket.
// This method is part of the DataAccessor interface.
func (db *ffldb) Cursor(bucket []byte) (database.Cursor, error) {
func (db *ffldb) Cursor(bucket *database.Bucket) (database.Cursor, error) {
ldbCursor := db.levelDB.Cursor(bucket)
return ldbCursor, nil
@ -170,8 +170,9 @@ func (db *ffldb) Begin() (database.Transaction, error) {
}
transaction := &transaction{
ldbTx: ldbTx,
ffdb: db.flatFileDB,
ldbTx: ldbTx,
ffdb: db.flatFileDB,
isClosed: false,
}
return transaction, nil
}

View File

@ -7,6 +7,28 @@ import (
"testing"
)
func prepareDatabaseForTest(t *testing.T, testName string) (db database.Database, teardownFunc func()) {
// Create a temp db to run tests against
path, err := ioutil.TempDir("", testName)
if err != nil {
t.Fatalf("%s: TempDir unexpectedly "+
"failed: %s", testName, err)
}
db, err = Open(path)
if err != nil {
t.Fatalf("%s: Open unexpectedly "+
"failed: %s", testName, err)
}
teardownFunc = func() {
err = db.Close()
if err != nil {
t.Fatalf("%s: Close unexpectedly "+
"failed: %s", testName, err)
}
}
return db, teardownFunc
}
func TestRepairFlatFiles(t *testing.T) {
// Create a temp db to run tests against
path, err := ioutil.TempDir("", "TestRepairFlatFiles")

View File

@ -18,8 +18,7 @@ func (db *ffldb) initialize() error {
}
func (db *ffldb) flatFiles() (map[string][]byte, error) {
flatFilesBucketPath := flatFilesBucket.Path()
flatFilesCursor := db.levelDB.Cursor(flatFilesBucketPath)
flatFilesCursor := db.levelDB.Cursor(flatFilesBucket)
defer func() {
err := flatFilesCursor.Close()
if err != nil {
@ -33,7 +32,7 @@ func (db *ffldb) flatFiles() (map[string][]byte, error) {
if err != nil {
return nil, err
}
storeName := string(storeNameKey)
storeName := string(storeNameKey.Suffix())
currentLocation, err := flatFilesCursor.Value()
if err != nil {

View File

@ -2,7 +2,7 @@ package ldb
import (
"bytes"
"encoding/hex"
"github.com/kaspanet/kaspad/database"
"github.com/pkg/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
@ -12,35 +12,35 @@ import (
// LevelDBCursor is a thin wrapper around native leveldb iterators.
type LevelDBCursor struct {
ldbIterator iterator.Iterator
prefix []byte
bucket *database.Bucket
isClosed bool
}
// Cursor begins a new cursor over the given prefix.
func (db *LevelDB) Cursor(prefix []byte) *LevelDBCursor {
ldbIterator := db.ldb.NewIterator(util.BytesPrefix(prefix), nil)
func (db *LevelDB) Cursor(bucket *database.Bucket) *LevelDBCursor {
ldbIterator := db.ldb.NewIterator(util.BytesPrefix(bucket.Path()), nil)
return &LevelDBCursor{
ldbIterator: ldbIterator,
prefix: prefix,
bucket: bucket,
isClosed: false,
}
}
// Next moves the iterator to the next key/value pair. It returns whether the
// iterator is exhausted. Returns false if the cursor is closed.
// iterator is exhausted. Panics if the cursor is closed.
func (c *LevelDBCursor) Next() bool {
if c.isClosed {
return false
panic("cannot call next on a closed cursor")
}
return c.ldbIterator.Next()
}
// First moves the iterator to the first key/value pair. It returns false if
// such a pair does not exist or if the cursor is closed.
// such a pair does not exist. Panics if the cursor is closed.
func (c *LevelDBCursor) First() bool {
if c.isClosed {
return false
panic("cannot call first on a closed cursor")
}
return c.ldbIterator.First()
}
@ -48,25 +48,20 @@ func (c *LevelDBCursor) First() bool {
// Seek moves the iterator to the first key/value pair whose key is greater
// than or equal to the given key. It returns ErrNotFound if such pair does not
// exist.
func (c *LevelDBCursor) Seek(key []byte) error {
func (c *LevelDBCursor) Seek(key *database.Key) error {
if c.isClosed {
return errors.New("cannot seek a closed cursor")
}
notFoundErr := errors.Wrapf(database.ErrNotFound, "key %s not "+
"found", hex.EncodeToString(key))
found := c.ldbIterator.Seek(key)
found := c.ldbIterator.Seek(key.Bytes())
if !found {
return notFoundErr
return errors.Wrapf(database.ErrNotFound, "key %s not found", key)
}
// Use c.ldbIterator.Key because c.Key removes the prefix from the key
currentKey := c.ldbIterator.Key()
if currentKey == nil {
return notFoundErr
}
if !bytes.Equal(currentKey, key) {
return notFoundErr
if currentKey == nil || !bytes.Equal(currentKey, key.Bytes()) {
return errors.Wrapf(database.ErrNotFound, "key %s not found", key)
}
return nil
@ -76,17 +71,17 @@ func (c *LevelDBCursor) Seek(key []byte) error {
// Note that the key is trimmed to not include the prefix the cursor was opened
// with. The caller should not modify the contents of the returned slice, and
// its contents may change on the next call to Next.
func (c *LevelDBCursor) Key() ([]byte, error) {
func (c *LevelDBCursor) Key() (*database.Key, error) {
if c.isClosed {
return nil, errors.New("cannot get the key of a closed cursor")
}
fullKeyPath := c.ldbIterator.Key()
if fullKeyPath == nil {
return nil, errors.Wrapf(database.ErrNotFound, "cannot get the "+
"key of a done cursor")
"key of an exhausted cursor")
}
key := bytes.TrimPrefix(fullKeyPath, c.prefix)
return key, nil
suffix := bytes.TrimPrefix(fullKeyPath, c.bucket.Path())
return c.bucket.Key(suffix), nil
}
// Value returns the value of the current key/value pair, or ErrNotFound if done.
@ -99,7 +94,7 @@ func (c *LevelDBCursor) Value() ([]byte, error) {
value := c.ldbIterator.Value()
if value == nil {
return nil, errors.Wrapf(database.ErrNotFound, "cannot get the "+
"value of a done cursor")
"value of an exhausted cursor")
}
return value, nil
}

View File

@ -0,0 +1,246 @@
package ldb
import (
"bytes"
"fmt"
"github.com/kaspanet/kaspad/database"
"reflect"
"strings"
"testing"
)
func validateCurrentCursorKeyAndValue(t *testing.T, testName string, cursor *LevelDBCursor,
expectedKey *database.Key, expectedValue []byte) {
cursorKey, err := cursor.Key()
if err != nil {
t.Fatalf("%s: Key "+
"unexpectedly failed: %s", testName, err)
}
if !reflect.DeepEqual(cursorKey, expectedKey) {
t.Fatalf("%s: Key "+
"returned wrong key. Want: %s, got: %s",
testName, string(expectedKey.Bytes()), string(cursorKey.Bytes()))
}
cursorValue, err := cursor.Value()
if err != nil {
t.Fatalf("%s: Value "+
"unexpectedly failed for key %s: %s",
testName, cursorKey, err)
}
if !bytes.Equal(cursorValue, expectedValue) {
t.Fatalf("%s: Value "+
"returned wrong value for key %s. Want: %s, got: %s",
testName, cursorKey, string(expectedValue), string(cursorValue))
}
}
func recoverFromClosedCursorPanic(t *testing.T, testName string) {
panicErr := recover()
if panicErr == nil {
t.Fatalf("%s: cursor unexpectedly "+
"didn't panic after being closed", testName)
}
expectedPanicErr := "closed cursor"
if !strings.Contains(fmt.Sprintf("%v", panicErr), expectedPanicErr) {
t.Fatalf("%s: cursor panicked "+
"with wrong message. Want: %v, got: %s",
testName, expectedPanicErr, panicErr)
}
}
// TestCursorSanity validates typical cursor usage, including
// opening a cursor over some existing data, seeking back
// and forth over that data, and getting some keys/values out
// of the cursor.
func TestCursorSanity(t *testing.T) {
ldb, teardownFunc := prepareDatabaseForTest(t, "TestCursorSanity")
defer teardownFunc()
// Write some data to the database
bucket := database.MakeBucket([]byte("bucket"))
for i := 0; i < 10; i++ {
key := fmt.Sprintf("key%d", i)
value := fmt.Sprintf("value%d", i)
err := ldb.Put(bucket.Key([]byte(key)), []byte(value))
if err != nil {
t.Fatalf("TestCursorSanity: Put "+
"unexpectedly failed: %s", err)
}
}
// Open a new cursor
cursor := ldb.Cursor(bucket)
defer func() {
err := cursor.Close()
if err != nil {
t.Fatalf("TestCursorSanity: Close "+
"unexpectedly failed: %s", err)
}
}()
// Seek to first key and make sure its key and value are correct
hasNext := cursor.First()
if !hasNext {
t.Fatalf("TestCursorSanity: First " +
"unexpectedly returned non-existance")
}
expectedKey := bucket.Key([]byte("key0"))
expectedValue := []byte("value0")
validateCurrentCursorKeyAndValue(t, "TestCursorSanity", cursor, expectedKey, expectedValue)
// Seek to a non-existant key
err := cursor.Seek(database.MakeBucket().Key([]byte("doesn't exist")))
if err == nil {
t.Fatalf("TestCursorSanity: Seek " +
"unexpectedly succeeded")
}
if !database.IsNotFoundError(err) {
t.Fatalf("TestCursorSanity: Seek "+
"returned wrong error: %s", err)
}
// Seek to the last key
err = cursor.Seek(bucket.Key([]byte("key9")))
if err != nil {
t.Fatalf("TestCursorSanity: Seek "+
"unexpectedly failed: %s", err)
}
expectedKey = bucket.Key([]byte("key9"))
expectedValue = []byte("value9")
validateCurrentCursorKeyAndValue(t, "TestCursorSanity", cursor, expectedKey, expectedValue)
// Call Next to get to the end of the cursor. This should
// return false to signify that there are no items after that.
// Key and Value calls should return ErrNotFound.
hasNext = cursor.Next()
if hasNext {
t.Fatalf("TestCursorSanity: Next " +
"after last value is unexpectedly not done")
}
_, err = cursor.Key()
if err == nil {
t.Fatalf("TestCursorSanity: Key " +
"unexpectedly succeeded")
}
if !database.IsNotFoundError(err) {
t.Fatalf("TestCursorSanity: Key "+
"returned wrong error: %s", err)
}
_, err = cursor.Value()
if err == nil {
t.Fatalf("TestCursorSanity: Value " +
"unexpectedly succeeded")
}
if !database.IsNotFoundError(err) {
t.Fatalf("TestCursorSanity: Value "+
"returned wrong error: %s", err)
}
}
func TestCursorCloseErrors(t *testing.T) {
tests := []struct {
name string
// function is the LevelDBCursor function that we're
// verifying returns an error after the cursor had
// been closed.
function func(dbTx *LevelDBCursor) error
}{
{
name: "Seek",
function: func(cursor *LevelDBCursor) error {
return cursor.Seek(database.MakeBucket().Key([]byte{}))
},
},
{
name: "Key",
function: func(cursor *LevelDBCursor) error {
_, err := cursor.Key()
return err
},
},
{
name: "Value",
function: func(cursor *LevelDBCursor) error {
_, err := cursor.Value()
return err
},
},
{
name: "Close",
function: func(cursor *LevelDBCursor) error {
return cursor.Close()
},
},
}
for _, test := range tests {
func() {
ldb, teardownFunc := prepareDatabaseForTest(t, "TestCursorCloseErrors")
defer teardownFunc()
// Open a new cursor
cursor := ldb.Cursor(database.MakeBucket())
// Close the cursor
err := cursor.Close()
if err != nil {
t.Fatalf("TestCursorCloseErrors: Close "+
"unexpectedly failed: %s", err)
}
expectedErrContainsString := "closed cursor"
// Make sure that the test function returns a "closed transaction" error
err = test.function(cursor)
if err == nil {
t.Fatalf("TestCursorCloseErrors: %s "+
"unexpectedly succeeded", test.name)
}
if !strings.Contains(err.Error(), expectedErrContainsString) {
t.Fatalf("TestCursorCloseErrors: %s "+
"returned wrong error. Want: %s, got: %s",
test.name, expectedErrContainsString, err)
}
}()
}
}
func TestCursorCloseFirstAndNext(t *testing.T) {
ldb, teardownFunc := prepareDatabaseForTest(t, "TestCursorCloseFirstAndNext")
defer teardownFunc()
// Write some data to the database
for i := 0; i < 10; i++ {
key := fmt.Sprintf("key%d", i)
value := fmt.Sprintf("value%d", i)
err := ldb.Put(database.MakeBucket([]byte("bucket")).Key([]byte(key)), []byte(value))
if err != nil {
t.Fatalf("TestCursorCloseFirstAndNext: Put "+
"unexpectedly failed: %s", err)
}
}
// Open a new cursor
cursor := ldb.Cursor(database.MakeBucket([]byte("bucket")))
// Close the cursor
err := cursor.Close()
if err != nil {
t.Fatalf("TestCursorCloseFirstAndNext: Close "+
"unexpectedly failed: %s", err)
}
// We expect First to panic
func() {
defer recoverFromClosedCursorPanic(t, "TestCursorCloseFirstAndNext")
cursor.First()
}()
// We expect Next to panic
func() {
defer recoverFromClosedCursorPanic(t, "TestCursorCloseFirstAndNext")
cursor.Next()
}()
}

View File

@ -1,7 +1,6 @@
package ldb
import (
"encoding/hex"
"github.com/kaspanet/kaspad/database"
"github.com/pkg/errors"
"github.com/syndtr/goleveldb/leveldb"
@ -16,7 +15,7 @@ type LevelDB struct {
// NewLevelDB opens a leveldb instance defined by the given path.
func NewLevelDB(path string) (*LevelDB, error) {
// Open leveldb. If it doesn't exist, create it.
ldb, err := leveldb.OpenFile(path, nil)
ldb, err := leveldb.OpenFile(path, Options())
// If the database is corrupted, attempt to recover.
if _, corrupted := err.(*ldbErrors.ErrCorrupted); corrupted {
@ -52,19 +51,19 @@ func (db *LevelDB) Close() error {
// Put sets the value for the given key. It overwrites
// any previous value for that key.
func (db *LevelDB) Put(key []byte, value []byte) error {
err := db.ldb.Put(key, value, nil)
func (db *LevelDB) Put(key *database.Key, value []byte) error {
err := db.ldb.Put(key.Bytes(), value, nil)
return errors.WithStack(err)
}
// Get gets the value for the given key. It returns
// ErrNotFound if the given key does not exist.
func (db *LevelDB) Get(key []byte) ([]byte, error) {
data, err := db.ldb.Get(key, nil)
func (db *LevelDB) Get(key *database.Key) ([]byte, error) {
data, err := db.ldb.Get(key.Bytes(), nil)
if err != nil {
if errors.Is(err, leveldb.ErrNotFound) {
return nil, errors.Wrapf(database.ErrNotFound,
"key %s not found", hex.EncodeToString(key))
"key %s not found", key)
}
return nil, errors.WithStack(err)
}
@ -73,8 +72,8 @@ func (db *LevelDB) Get(key []byte) ([]byte, error) {
// Has returns true if the database does contains the
// given key.
func (db *LevelDB) Has(key []byte) (bool, error) {
exists, err := db.ldb.Has(key, nil)
func (db *LevelDB) Has(key *database.Key) (bool, error) {
exists, err := db.ldb.Has(key.Bytes(), nil)
if err != nil {
return false, errors.WithStack(err)
}
@ -83,7 +82,7 @@ func (db *LevelDB) Has(key []byte) (bool, error) {
// Delete deletes the value for the given key. Will not
// return an error if the key doesn't exist.
func (db *LevelDB) Delete(key []byte) error {
err := db.ldb.Delete(key, nil)
func (db *LevelDB) Delete(key *database.Key) error {
err := db.ldb.Delete(key.Bytes(), nil)
return errors.WithStack(err)
}

View File

@ -7,30 +7,36 @@ import (
"testing"
)
func TestLevelDBSanity(t *testing.T) {
// Open a test db
path, err := ioutil.TempDir("", "TestLevelDBSanity")
func prepareDatabaseForTest(t *testing.T, testName string) (ldb *LevelDB, teardownFunc func()) {
// Create a temp db to run tests against
path, err := ioutil.TempDir("", testName)
if err != nil {
t.Fatalf("TestLevelDBSanity: TempDir unexpectedly "+
"failed: %s", err)
t.Fatalf("%s: TempDir unexpectedly "+
"failed: %s", testName, err)
}
ldb, err := NewLevelDB(path)
ldb, err = NewLevelDB(path)
if err != nil {
t.Fatalf("TestLevelDBSanity: NewLevelDB "+
"unexpectedly failed: %s", err)
t.Fatalf("%s: NewLevelDB unexpectedly "+
"failed: %s", testName, err)
}
defer func() {
err := ldb.Close()
teardownFunc = func() {
err = ldb.Close()
if err != nil {
t.Fatalf("TestLevelDBSanity: Close "+
"unexpectedly failed: %s", err)
t.Fatalf("%s: Close unexpectedly "+
"failed: %s", testName, err)
}
}()
}
return ldb, teardownFunc
}
func TestLevelDBSanity(t *testing.T) {
ldb, teardownFunc := prepareDatabaseForTest(t, "TestLevelDBSanity")
defer teardownFunc()
// Put something into the db
key := []byte("key")
key := database.MakeBucket().Key([]byte("key"))
putData := []byte("Hello world!")
err = ldb.Put(key, putData)
err := ldb.Put(key, putData)
if err != nil {
t.Fatalf("TestLevelDBSanity: Put returned "+
"unexpected error: %s", err)
@ -52,24 +58,8 @@ func TestLevelDBSanity(t *testing.T) {
}
func TestLevelDBTransactionSanity(t *testing.T) {
// Open a test db
path, err := ioutil.TempDir("", "TestLevelDBTransactionSanity")
if err != nil {
t.Fatalf("TestLevelDBTransactionSanity: TempDir unexpectedly "+
"failed: %s", err)
}
ldb, err := NewLevelDB(path)
if err != nil {
t.Fatalf("TestLevelDBTransactionSanity: NewLevelDB "+
"unexpectedly failed: %s", err)
}
defer func() {
err := ldb.Close()
if err != nil {
t.Fatalf("TestLevelDBTransactionSanity: Close "+
"unexpectedly failed: %s", err)
}
}()
ldb, teardownFunc := prepareDatabaseForTest(t, "TestLevelDBTransactionSanity")
defer teardownFunc()
// Case 1. Write in tx and then read directly from the DB
// Begin a new transaction
@ -80,7 +70,7 @@ func TestLevelDBTransactionSanity(t *testing.T) {
}
// Put something into the transaction
key := []byte("key")
key := database.MakeBucket().Key([]byte("key"))
putData := []byte("Hello world!")
err = tx.Put(key, putData)
if err != nil {
@ -124,7 +114,7 @@ func TestLevelDBTransactionSanity(t *testing.T) {
// Case 2. Write directly to the DB and then read from a tx
// Put something into the db
key = []byte("key2")
key = database.MakeBucket().Key([]byte("key2"))
putData = []byte("Goodbye world!")
err = ldb.Put(key, putData)
if err != nil {

View File

@ -0,0 +1,19 @@
package ldb
import "github.com/syndtr/goleveldb/leveldb/opt"
var (
defaultOptions = opt.Options{
Compression: opt.NoCompression,
BlockCacheCapacity: 256 * opt.MiB,
WriteBuffer: 128 * opt.MiB,
DisableSeeksCompaction: true,
}
// Options is a function that returns a leveldb
// opt.Options struct for opening a database.
// It's defined as a variable for the sake of testing.
Options = func() *opt.Options {
return &defaultOptions
}
)

View File

@ -1,7 +1,6 @@
package ldb
import (
"encoding/hex"
"github.com/kaspanet/kaspad/database"
"github.com/pkg/errors"
"github.com/syndtr/goleveldb/leveldb"
@ -54,7 +53,7 @@ func (tx *LevelDBTransaction) Commit() error {
tx.isClosed = true
tx.snapshot.Release()
return tx.db.ldb.Write(tx.batch, nil)
return errors.WithStack(tx.db.ldb.Write(tx.batch, nil))
}
// Rollback rolls back whatever changes were made to the
@ -82,27 +81,27 @@ func (tx *LevelDBTransaction) RollbackUnlessClosed() error {
// Put sets the value for the given key. It overwrites
// any previous value for that key.
func (tx *LevelDBTransaction) Put(key []byte, value []byte) error {
func (tx *LevelDBTransaction) Put(key *database.Key, value []byte) error {
if tx.isClosed {
return errors.New("cannot put into a closed transaction")
}
tx.batch.Put(key, value)
tx.batch.Put(key.Bytes(), value)
return nil
}
// Get gets the value for the given key. It returns
// ErrNotFound if the given key does not exist.
func (tx *LevelDBTransaction) Get(key []byte) ([]byte, error) {
func (tx *LevelDBTransaction) Get(key *database.Key) ([]byte, error) {
if tx.isClosed {
return nil, errors.New("cannot get from a closed transaction")
}
data, err := tx.snapshot.Get(key, nil)
data, err := tx.snapshot.Get(key.Bytes(), nil)
if err != nil {
if errors.Is(err, leveldb.ErrNotFound) {
return nil, errors.Wrapf(database.ErrNotFound,
"key %s not found", hex.EncodeToString(key))
"key %s not found", key)
}
return nil, errors.WithStack(err)
}
@ -111,27 +110,28 @@ func (tx *LevelDBTransaction) Get(key []byte) ([]byte, error) {
// Has returns true if the database does contains the
// given key.
func (tx *LevelDBTransaction) Has(key []byte) (bool, error) {
func (tx *LevelDBTransaction) Has(key *database.Key) (bool, error) {
if tx.isClosed {
return false, errors.New("cannot has from a closed transaction")
}
return tx.snapshot.Has(key, nil)
res, err := tx.snapshot.Has(key.Bytes(), nil)
return res, errors.WithStack(err)
}
// Delete deletes the value for the given key. Will not
// return an error if the key doesn't exist.
func (tx *LevelDBTransaction) Delete(key []byte) error {
func (tx *LevelDBTransaction) Delete(key *database.Key) error {
if tx.isClosed {
return errors.New("cannot delete from a closed transaction")
}
tx.batch.Delete(key)
tx.batch.Delete(key.Bytes())
return nil
}
// Cursor begins a new cursor over the given bucket.
func (tx *LevelDBTransaction) Cursor(bucket []byte) (*LevelDBCursor, error) {
func (tx *LevelDBTransaction) Cursor(bucket *database.Bucket) (*LevelDBCursor, error) {
if tx.isClosed {
return nil, errors.New("cannot open a cursor from a closed transaction")
}

View File

@ -0,0 +1,146 @@
package ldb
import (
"github.com/kaspanet/kaspad/database"
"strings"
"testing"
)
func TestTransactionCloseErrors(t *testing.T) {
tests := []struct {
name string
// function is the LevelDBTransaction function that
// we're verifying whether it returns an error after
// the transaction had been closed.
function func(dbTx *LevelDBTransaction) error
shouldReturnError bool
}{
{
name: "Put",
function: func(dbTx *LevelDBTransaction) error {
return dbTx.Put(database.MakeBucket().Key([]byte("key")), []byte("value"))
},
shouldReturnError: true,
},
{
name: "Get",
function: func(dbTx *LevelDBTransaction) error {
_, err := dbTx.Get(database.MakeBucket().Key([]byte("key")))
return err
},
shouldReturnError: true,
},
{
name: "Has",
function: func(dbTx *LevelDBTransaction) error {
_, err := dbTx.Has(database.MakeBucket().Key([]byte("key")))
return err
},
shouldReturnError: true,
},
{
name: "Delete",
function: func(dbTx *LevelDBTransaction) error {
return dbTx.Delete(database.MakeBucket().Key([]byte("key")))
},
shouldReturnError: true,
},
{
name: "Cursor",
function: func(dbTx *LevelDBTransaction) error {
_, err := dbTx.Cursor(database.MakeBucket([]byte("bucket")))
return err
},
shouldReturnError: true,
},
{
name: "Rollback",
function: (*LevelDBTransaction).Rollback,
shouldReturnError: true,
},
{
name: "Commit",
function: (*LevelDBTransaction).Commit,
shouldReturnError: true,
},
{
name: "RollbackUnlessClosed",
function: (*LevelDBTransaction).RollbackUnlessClosed,
shouldReturnError: false,
},
}
for _, test := range tests {
func() {
ldb, teardownFunc := prepareDatabaseForTest(t, "TestTransactionCloseErrors")
defer teardownFunc()
// Begin a new transaction to test Commit
commitTx, err := ldb.Begin()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: Begin "+
"unexpectedly failed: %s", err)
}
defer func() {
err := commitTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: RollbackUnlessClosed "+
"unexpectedly failed: %s", err)
}
}()
// Commit the Commit test transaction
err = commitTx.Commit()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: Commit "+
"unexpectedly failed: %s", err)
}
// Begin a new transaction to test Rollback
rollbackTx, err := ldb.Begin()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: Begin "+
"unexpectedly failed: %s", err)
}
defer func() {
err := rollbackTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: RollbackUnlessClosed "+
"unexpectedly failed: %s", err)
}
}()
// Rollback the Rollback test transaction
err = rollbackTx.Rollback()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: Rollback "+
"unexpectedly failed: %s", err)
}
expectedErrContainsString := "closed transaction"
// Make sure that the test function returns a "closed transaction" error
// for both the commitTx and the rollbackTx
for _, closedTx := range []*LevelDBTransaction{commitTx, rollbackTx} {
err = test.function(closedTx)
if test.shouldReturnError {
if err == nil {
t.Fatalf("TestTransactionCloseErrors: %s "+
"unexpectedly succeeded", test.name)
}
if !strings.Contains(err.Error(), expectedErrContainsString) {
t.Fatalf("TestTransactionCloseErrors: %s "+
"returned wrong error. Want: %s, got: %s",
test.name, expectedErrContainsString, err)
}
} else {
if err != nil {
t.Fatalf("TestTransactionCloseErrors: %s "+
"unexpectedly failed: %s", test.name, err)
}
}
}
}()
}
}

View File

@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/database/ffldb/ff"
"github.com/kaspanet/kaspad/database/ffldb/ldb"
"github.com/pkg/errors"
)
// transaction is an ffldb transaction.
@ -13,35 +14,52 @@ import (
// NO guarantee that if one puts data into the transaction then
// it will be available to get within the same transaction.
type transaction struct {
ldbTx *ldb.LevelDBTransaction
ffdb *ff.FlatFileDB
ldbTx *ldb.LevelDBTransaction
ffdb *ff.FlatFileDB
isClosed bool
}
// Put sets the value for the given key. It overwrites
// any previous value for that key.
// This method is part of the DataAccessor interface.
func (tx *transaction) Put(key []byte, value []byte) error {
func (tx *transaction) Put(key *database.Key, value []byte) error {
if tx.isClosed {
return errors.New("cannot put into a closed transaction")
}
return tx.ldbTx.Put(key, value)
}
// Get gets the value for the given key. It returns
// ErrNotFound if the given key does not exist.
// This method is part of the DataAccessor interface.
func (tx *transaction) Get(key []byte) ([]byte, error) {
func (tx *transaction) Get(key *database.Key) ([]byte, error) {
if tx.isClosed {
return nil, errors.New("cannot get from a closed transaction")
}
return tx.ldbTx.Get(key)
}
// Has returns true if the database does contains the
// given key.
// This method is part of the DataAccessor interface.
func (tx *transaction) Has(key []byte) (bool, error) {
func (tx *transaction) Has(key *database.Key) (bool, error) {
if tx.isClosed {
return false, errors.New("cannot has from a closed transaction")
}
return tx.ldbTx.Has(key)
}
// Delete deletes the value for the given key. Will not
// return an error if the key doesn't exist.
// This method is part of the DataAccessor interface.
func (tx *transaction) Delete(key []byte) error {
func (tx *transaction) Delete(key *database.Key) error {
if tx.isClosed {
return errors.New("cannot delete from a closed transaction")
}
return tx.ldbTx.Delete(key)
}
@ -52,6 +70,10 @@ func (tx *transaction) Delete(key []byte) error {
// that has just now been inserted.
// This method is part of the DataAccessor interface.
func (tx *transaction) AppendToStore(storeName string, data []byte) ([]byte, error) {
if tx.isClosed {
return nil, errors.New("cannot append to store on a closed transaction")
}
return appendToStore(tx, tx.ffdb, storeName, data)
}
@ -61,12 +83,20 @@ func (tx *transaction) AppendToStore(storeName string, data []byte) ([]byte, err
// AppendToStore for further details.
// This method is part of the DataAccessor interface.
func (tx *transaction) RetrieveFromStore(storeName string, location []byte) ([]byte, error) {
if tx.isClosed {
return nil, errors.New("cannot retrieve from store on a closed transaction")
}
return tx.ffdb.Read(storeName, location)
}
// Cursor begins a new cursor over the given bucket.
// This method is part of the DataAccessor interface.
func (tx *transaction) Cursor(bucket []byte) (database.Cursor, error) {
func (tx *transaction) Cursor(bucket *database.Bucket) (database.Cursor, error) {
if tx.isClosed {
return nil, errors.New("cannot open a cursor from a closed transaction")
}
return tx.ldbTx.Cursor(bucket)
}
@ -74,6 +104,11 @@ func (tx *transaction) Cursor(bucket []byte) (database.Cursor, error) {
// database within this transaction.
// This method is part of the Transaction interface.
func (tx *transaction) Rollback() error {
if tx.isClosed {
return errors.New("cannot rollback a closed transaction")
}
tx.isClosed = true
return tx.ldbTx.Rollback()
}
@ -81,6 +116,11 @@ func (tx *transaction) Rollback() error {
// within this transaction.
// This method is part of the Transaction interface.
func (tx *transaction) Commit() error {
if tx.isClosed {
return errors.New("cannot commit a closed transaction")
}
tx.isClosed = true
return tx.ldbTx.Commit()
}
@ -88,5 +128,10 @@ func (tx *transaction) Commit() error {
// the database within the transaction, unless the transaction
// had already been closed using either Rollback or Commit.
func (tx *transaction) RollbackUnlessClosed() error {
if tx.isClosed {
return nil
}
tx.isClosed = true
return tx.ldbTx.RollbackUnlessClosed()
}

View File

@ -0,0 +1,500 @@
package ffldb
import (
"bytes"
"github.com/kaspanet/kaspad/database"
"strings"
"testing"
)
func TestTransactionCommitForLevelDBMethods(t *testing.T) {
db, teardownFunc := prepareDatabaseForTest(t, "TestTransactionCommitForLevelDBMethods")
defer teardownFunc()
// Put a value into the database
key1 := database.MakeBucket().Key([]byte("key1"))
value1 := []byte("value1")
err := db.Put(key1, value1)
if err != nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Put "+
"unexpectedly failed: %s", err)
}
// Begin a new transaction
dbTx, err := db.Begin()
if err != nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Begin "+
"unexpectedly failed: %s", err)
}
defer func() {
err := dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: RollbackUnlessClosed "+
"unexpectedly failed: %s", err)
}
}()
// Make sure that Has returns that the original value exists
exists, err := dbTx.Has(key1)
if err != nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Has "+
"unexpectedly failed: %s", err)
}
if !exists {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Has " +
"unexpectedly returned that the value does not exist")
}
// Get the existing value and make sure it's equal to the original
existingValue, err := dbTx.Get(key1)
if err != nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+
"unexpectedly failed: %s", err)
}
if !bytes.Equal(existingValue, value1) {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+
"returned unexpected value. Want: %s, got: %s",
string(value1), string(existingValue))
}
// Delete the existing value
err = dbTx.Delete(key1)
if err != nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Delete "+
"unexpectedly failed: %s", err)
}
// Try to get a value that does not exist and make sure it returns ErrNotFound
_, err = dbTx.Get(database.MakeBucket().Key([]byte("doesn't exist")))
if err == nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Get " +
"unexpectedly succeeded")
}
if !database.IsNotFoundError(err) {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+
"returned unexpected error: %s", err)
}
// Put a new value
key2 := database.MakeBucket().Key([]byte("key2"))
value2 := []byte("value2")
err = dbTx.Put(key2, value2)
if err != nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Put "+
"unexpectedly failed: %s", err)
}
// Commit the transaction
err = dbTx.Commit()
if err != nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Commit "+
"unexpectedly failed: %s", err)
}
// Make sure that Has returns that the original value does NOT exist
exists, err = db.Has(key1)
if err != nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Has "+
"unexpectedly failed: %s", err)
}
if exists {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Has " +
"unexpectedly returned that the value exists")
}
// Try to Get the existing value and make sure an ErrNotFound is returned
_, err = db.Get(key1)
if err == nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Get " +
"unexpectedly succeeded")
}
if !database.IsNotFoundError(err) {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+
"returned unexpected err: %s", err)
}
// Make sure that Has returns that the new value exists
exists, err = db.Has(key2)
if err != nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Has "+
"unexpectedly failed: %s", err)
}
if !exists {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Has " +
"unexpectedly returned that the value does not exist")
}
// Get the new value and make sure it's equal to the original
existingValue, err = db.Get(key2)
if err != nil {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+
"unexpectedly failed: %s", err)
}
if !bytes.Equal(existingValue, value2) {
t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+
"returned unexpected value. Want: %s, got: %s",
string(value2), string(existingValue))
}
}
func TestTransactionRollbackForLevelDBMethods(t *testing.T) {
db, teardownFunc := prepareDatabaseForTest(t, "TestTransactionRollbackForLevelDBMethods")
defer teardownFunc()
// Put a value into the database
key1 := database.MakeBucket().Key([]byte("key1"))
value1 := []byte("value1")
err := db.Put(key1, value1)
if err != nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Put "+
"unexpectedly failed: %s", err)
}
// Begin a new transaction
dbTx, err := db.Begin()
if err != nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Begin "+
"unexpectedly failed: %s", err)
}
defer func() {
err := dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: RollbackUnlessClosed "+
"unexpectedly failed: %s", err)
}
}()
// Make sure that Has returns that the original value exists
exists, err := dbTx.Has(key1)
if err != nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has "+
"unexpectedly failed: %s", err)
}
if !exists {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has " +
"unexpectedly returned that the value does not exist")
}
// Get the existing value and make sure it's equal to the original
existingValue, err := dbTx.Get(key1)
if err != nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get "+
"unexpectedly failed: %s", err)
}
if !bytes.Equal(existingValue, value1) {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get "+
"returned unexpected value. Want: %s, got: %s",
string(value1), string(existingValue))
}
// Delete the existing value
err = dbTx.Delete(key1)
if err != nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Delete "+
"unexpectedly failed: %s", err)
}
// Put a new value
key2 := database.MakeBucket().Key([]byte("key2"))
value2 := []byte("value2")
err = dbTx.Put(key2, value2)
if err != nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Put "+
"unexpectedly failed: %s", err)
}
// Rollback the transaction
err = dbTx.Rollback()
if err != nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Rollback "+
"unexpectedly failed: %s", err)
}
// Make sure that Has returns that the original value still exists
exists, err = db.Has(key1)
if err != nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has "+
"unexpectedly failed: %s", err)
}
if !exists {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has " +
"unexpectedly returned that the value does not exist")
}
// Get the existing value and make sure it is still returned
existingValue, err = db.Get(key1)
if err != nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get "+
"unexpectedly failed: %s", err)
}
if !bytes.Equal(existingValue, value1) {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get "+
"returned unexpected value. Want: %s, got: %s",
string(value1), string(existingValue))
}
// Make sure that Has returns that the new value does NOT exist
exists, err = db.Has(key2)
if err != nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has "+
"unexpectedly failed: %s", err)
}
if exists {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has " +
"unexpectedly returned that the value exists")
}
// Try to Get the new value and make sure it returns an ErrNotFound
_, err = db.Get(key2)
if err == nil {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get " +
"unexpectedly succeeded")
}
if !database.IsNotFoundError(err) {
t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get "+
"returned unexpected error: %s", err)
}
}
func TestTransactionCloseErrors(t *testing.T) {
tests := []struct {
name string
function func(dbTx database.Transaction) error
shouldReturnError bool
}{
{
name: "Put",
function: func(dbTx database.Transaction) error {
return dbTx.Put(database.MakeBucket().Key([]byte("key")), []byte("value"))
},
shouldReturnError: true,
},
{
name: "Get",
function: func(dbTx database.Transaction) error {
_, err := dbTx.Get(database.MakeBucket().Key([]byte("key")))
return err
},
shouldReturnError: true,
},
{
name: "Has",
function: func(dbTx database.Transaction) error {
_, err := dbTx.Has(database.MakeBucket().Key([]byte("key")))
return err
},
shouldReturnError: true,
},
{
name: "Delete",
function: func(dbTx database.Transaction) error {
return dbTx.Delete(database.MakeBucket().Key([]byte("key")))
},
shouldReturnError: true,
},
{
name: "Cursor",
function: func(dbTx database.Transaction) error {
_, err := dbTx.Cursor(database.MakeBucket([]byte("bucket")))
return err
},
shouldReturnError: true,
},
{
name: "AppendToStore",
function: func(dbTx database.Transaction) error {
_, err := dbTx.AppendToStore("store", []byte("data"))
return err
},
shouldReturnError: true,
},
{
name: "RetrieveFromStore",
function: func(dbTx database.Transaction) error {
_, err := dbTx.RetrieveFromStore("store", []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
return err
},
shouldReturnError: true,
},
{
name: "Rollback",
function: func(dbTx database.Transaction) error {
return dbTx.Rollback()
},
shouldReturnError: true,
},
{
name: "Commit",
function: func(dbTx database.Transaction) error {
return dbTx.Commit()
},
shouldReturnError: true,
},
{
name: "RollbackUnlessClosed",
function: func(dbTx database.Transaction) error {
return dbTx.RollbackUnlessClosed()
},
shouldReturnError: false,
},
}
for _, test := range tests {
func() {
db, teardownFunc := prepareDatabaseForTest(t, "TestTransactionCloseErrors")
defer teardownFunc()
// Begin a new transaction to test Commit
commitTx, err := db.Begin()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: Begin "+
"unexpectedly failed: %s", err)
}
defer func() {
err := commitTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: RollbackUnlessClosed "+
"unexpectedly failed: %s", err)
}
}()
// Commit the Commit test transaction
err = commitTx.Commit()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: Commit "+
"unexpectedly failed: %s", err)
}
// Begin a new transaction to test Rollback
rollbackTx, err := db.Begin()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: Begin "+
"unexpectedly failed: %s", err)
}
defer func() {
err := rollbackTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: RollbackUnlessClosed "+
"unexpectedly failed: %s", err)
}
}()
// Rollback the Rollback test transaction
err = rollbackTx.Rollback()
if err != nil {
t.Fatalf("TestTransactionCloseErrors: Rollback "+
"unexpectedly failed: %s", err)
}
expectedErrContainsString := "closed transaction"
// Make sure that the test function returns a "closed transaction" error
// for both the commitTx and the rollbackTx
for _, closedTx := range []database.Transaction{commitTx, rollbackTx} {
err = test.function(closedTx)
if test.shouldReturnError {
if err == nil {
t.Fatalf("TestTransactionCloseErrors: %s "+
"unexpectedly succeeded", test.name)
}
if !strings.Contains(err.Error(), expectedErrContainsString) {
t.Fatalf("TestTransactionCloseErrors: %s "+
"returned wrong error. Want: %s, got: %s",
test.name, expectedErrContainsString, err)
}
} else {
if err != nil {
t.Fatalf("TestTransactionCloseErrors: %s "+
"unexpectedly failed: %s", test.name, err)
}
}
}
}()
}
}
func TestTransactionRollbackUnlessClosed(t *testing.T) {
db, teardownFunc := prepareDatabaseForTest(t, "TestTransactionRollbackUnlessClosed")
defer teardownFunc()
// Begin a new transaction
dbTx, err := db.Begin()
if err != nil {
t.Fatalf("TestTransactionRollbackUnlessClosed: Begin "+
"unexpectedly failed: %s", err)
}
// Roll it back
err = dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("TestTransactionRollbackUnlessClosed: RollbackUnlessClosed "+
"unexpectedly failed: %s", err)
}
}
func TestTransactionCommitForFlatFileMethods(t *testing.T) {
db, teardownFunc := prepareDatabaseForTest(t, "TestTransactionCommitForFlatFileMethods")
defer teardownFunc()
// Put a value into the database
store := "store"
value1 := []byte("value1")
location1, err := db.AppendToStore(store, value1)
if err != nil {
t.Fatalf("TestTransactionCommitForFlatFileMethods: AppendToStore "+
"unexpectedly failed: %s", err)
}
// Begin a new transaction
dbTx, err := db.Begin()
if err != nil {
t.Fatalf("TestTransactionCommitForFlatFileMethods: Begin "+
"unexpectedly failed: %s", err)
}
defer func() {
err := dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("TestTransactionCommitForFlatFileMethods: RollbackUnlessClosed "+
"unexpectedly failed: %s", err)
}
}()
// Retrieve the existing value and make sure it's equal to the original
existingValue, err := dbTx.RetrieveFromStore(store, location1)
if err != nil {
t.Fatalf("TestTransactionCommitForFlatFileMethods: RetrieveFromStore "+
"unexpectedly failed: %s", err)
}
if !bytes.Equal(existingValue, value1) {
t.Fatalf("TestTransactionCommitForFlatFileMethods: RetrieveFromStore "+
"returned unexpected value. Want: %s, got: %s",
string(value1), string(existingValue))
}
// Put a new value
value2 := []byte("value2")
location2, err := dbTx.AppendToStore(store, value2)
if err != nil {
t.Fatalf("TestTransactionCommitForFlatFileMethods: AppendToStore "+
"unexpectedly failed: %s", err)
}
// Commit the transaction
err = dbTx.Commit()
if err != nil {
t.Fatalf("TestTransactionCommitForFlatFileMethods: Commit "+
"unexpectedly failed: %s", err)
}
// Retrieve the new value and make sure it's equal to the original
newValue, err := db.RetrieveFromStore(store, location2)
if err != nil {
t.Fatalf("TestTransactionCommitForFlatFileMethods: RetrieveFromStore "+
"unexpectedly failed: %s", err)
}
if !bytes.Equal(newValue, value2) {
t.Fatalf("TestTransactionCommitForFlatFileMethods: RetrieveFromStore "+
"returned unexpected value. Want: %s, got: %s",
string(value2), string(newValue))
}
}

85
database/keys.go Normal file
View File

@ -0,0 +1,85 @@
package database
import (
"bytes"
"encoding/hex"
)
var bucketSeparator = []byte("/")
// Key is a helper type meant to combine prefix
// and suffix into a single database key.
type Key struct {
bucket *Bucket
suffix []byte
}
// Bytes returns the full key bytes that are consisted
// from the bucket path concatenated to the suffix.
func (k *Key) Bytes() []byte {
bucketPath := k.bucket.Path()
keyBytes := make([]byte, len(bucketPath)+len(k.suffix))
copy(keyBytes, bucketPath)
copy(keyBytes[len(bucketPath):], k.suffix)
return keyBytes
}
func (k *Key) String() string {
return hex.EncodeToString(k.Bytes())
}
// Bucket returns the key bucket.
func (k *Key) Bucket() *Bucket {
return k.bucket
}
// Suffix returns the key suffix.
func (k *Key) Suffix() []byte {
return k.suffix
}
// newKey returns a new key composed
// of the given bucket and suffix
func newKey(bucket *Bucket, suffix []byte) *Key {
return &Key{bucket: bucket, suffix: suffix}
}
// Bucket is a helper type meant to combine buckets
// and sub-buckets that can be used to create database
// keys and prefix-based cursors.
type Bucket struct {
path [][]byte
}
// MakeBucket creates a new Bucket using the given path
// of buckets.
func MakeBucket(path ...[]byte) *Bucket {
return &Bucket{path: path}
}
// Bucket returns the sub-bucket of the current bucket
// defined by bucketBytes.
func (b *Bucket) Bucket(bucketBytes []byte) *Bucket {
newPath := make([][]byte, len(b.path)+1)
copy(newPath, b.path)
copy(newPath[len(b.path):], [][]byte{bucketBytes})
return MakeBucket(newPath...)
}
// Key returns a key in the current bucket with the
// given suffix.
func (b *Bucket) Key(suffix []byte) *Key {
return newKey(b, suffix)
}
// Path returns the full path of the current bucket.
func (b *Bucket) Path() []byte {
bucketPath := bytes.Join(b.path, bucketSeparator)
bucketPathWithFinalSeparator := make([]byte, len(bucketPath)+len(bucketSeparator))
copy(bucketPathWithFinalSeparator, bucketPath)
copy(bucketPathWithFinalSeparator[len(bucketPath):], bucketSeparator)
return bucketPathWithFinalSeparator
}

View File

@ -1,6 +1,7 @@
package database
import (
"bytes"
"reflect"
"testing"
)
@ -45,17 +46,26 @@ func TestBucketKey(t *testing.T) {
tests := []struct {
bucketByteSlices [][]byte
key []byte
expectedKey []byte
expectedKeyBytes []byte
expectedKey *Key
}{
{
bucketByteSlices: [][]byte{[]byte("hello")},
key: []byte("test"),
expectedKey: []byte("hello/test"),
expectedKeyBytes: []byte("hello/test"),
expectedKey: &Key{
bucket: MakeBucket([]byte("hello")),
suffix: []byte("test"),
},
},
{
bucketByteSlices: [][]byte{[]byte("hello"), []byte("world")},
key: []byte("test"),
expectedKey: []byte("hello/world/test"),
expectedKeyBytes: []byte("hello/world/test"),
expectedKey: &Key{
bucket: MakeBucket([]byte("hello"), []byte("world")),
suffix: []byte("test"),
},
},
}
@ -63,7 +73,11 @@ func TestBucketKey(t *testing.T) {
resultKey := MakeBucket(test.bucketByteSlices...).Key(test.key)
if !reflect.DeepEqual(resultKey, test.expectedKey) {
t.Errorf("TestBucketKey: got wrong key. Want: %s, got: %s",
string(test.expectedKey), string(resultKey))
test.expectedKeyBytes, resultKey)
}
if !bytes.Equal(resultKey.Bytes(), test.expectedKeyBytes) {
t.Errorf("TestBucketKey: got wrong key bytes. Want: %s, got: %s",
test.expectedKeyBytes, resultKey.Bytes())
}
}
}

View File

@ -0,0 +1,549 @@
// All tests within this file should call testForAllDatabaseTypes
// over the actual test. This is to make sure that all supported
// database types adhere to the assumptions defined in the
// interfaces in this package.
package database_test
import (
"bytes"
"github.com/kaspanet/kaspad/database"
"strings"
"testing"
)
func TestTransactionPut(t *testing.T) {
testForAllDatabaseTypes(t, "TestTransactionPut", testTransactionPut)
}
func testTransactionPut(t *testing.T, db database.Database, testName string) {
// Begin a new transaction
dbTx, err := db.Begin()
if err != nil {
t.Fatalf("%s: Begin "+
"unexpectedly failed: %s", testName, err)
}
defer func() {
err := dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("%s: RollbackUnlessClosed "+
"unexpectedly failed: %s", testName, err)
}
}()
// Put value1 into the transaction
key := database.MakeBucket().Key([]byte("key"))
value1 := []byte("value1")
err = dbTx.Put(key, value1)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Put value2 into the transaction with the same key
value2 := []byte("value2")
err = dbTx.Put(key, value2)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Commit the transaction
err = dbTx.Commit()
if err != nil {
t.Fatalf("%s: Commit "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that the returned value is value2
returnedValue, err := db.Get(key)
if err != nil {
t.Fatalf("%s: Get "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(returnedValue, value2) {
t.Fatalf("%s: Get "+
"returned wrong value. Want: %s, got: %s",
testName, string(value2), string(returnedValue))
}
}
func TestTransactionGet(t *testing.T) {
testForAllDatabaseTypes(t, "TestTransactionGet", testTransactionGet)
}
func testTransactionGet(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key1 := database.MakeBucket().Key([]byte("key1"))
value1 := []byte("value1")
err := db.Put(key1, value1)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Begin a new transaction
dbTx, err := db.Begin()
if err != nil {
t.Fatalf("%s: Begin "+
"unexpectedly failed: %s", testName, err)
}
defer func() {
err := dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("%s: RollbackUnlessClosed "+
"unexpectedly failed: %s", testName, err)
}
}()
// Get the value back and make sure it's the same one
returnedValue, err := dbTx.Get(key1)
if err != nil {
t.Fatalf("%s: Get "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(returnedValue, value1) {
t.Fatalf("%s: Get "+
"returned wrong value. Want: %s, got: %s",
testName, string(value1), string(returnedValue))
}
// Try getting a non-existent value and make sure
// the returned error is ErrNotFound
_, err = dbTx.Get(database.MakeBucket().Key([]byte("doesn't exist")))
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: Get "+
"returned wrong error: %s", testName, err)
}
// Put a new value into the database outside of the transaction
key2 := database.MakeBucket().Key([]byte("key2"))
value2 := []byte("value2")
err = db.Put(key2, value2)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that the new value doesn't exist inside the transaction
_, err = dbTx.Get(key2)
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: Get "+
"returned wrong error: %s", testName, err)
}
// Put a new value into the transaction
key3 := database.MakeBucket().Key([]byte("key3"))
value3 := []byte("value3")
err = dbTx.Put(key3, value3)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that the new value doesn't exist outside the transaction
_, err = db.Get(key3)
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: Get "+
"returned wrong error: %s", testName, err)
}
}
func TestTransactionHas(t *testing.T) {
testForAllDatabaseTypes(t, "TestTransactionHas", testTransactionHas)
}
func testTransactionHas(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key1 := database.MakeBucket().Key([]byte("key1"))
value1 := []byte("value1")
err := db.Put(key1, value1)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Begin a new transaction
dbTx, err := db.Begin()
if err != nil {
t.Fatalf("%s: Begin "+
"unexpectedly failed: %s", testName, err)
}
defer func() {
err := dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("%s: RollbackUnlessClosed "+
"unexpectedly failed: %s", testName, err)
}
}()
// Make sure that Has returns true for the value we just put
exists, err := dbTx.Has(key1)
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
}
if !exists {
t.Fatalf("%s: Has "+
"unexpectedly returned that the value does not exist", testName)
}
// Make sure that Has returns false for a non-existent value
exists, err = dbTx.Has(database.MakeBucket().Key([]byte("doesn't exist")))
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
}
if exists {
t.Fatalf("%s: Has "+
"unexpectedly returned that the value exists", testName)
}
// Put a new value into the database outside of the transaction
key2 := database.MakeBucket().Key([]byte("key2"))
value2 := []byte("value2")
err = db.Put(key2, value2)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that the new value doesn't exist inside the transaction
exists, err = dbTx.Has(key2)
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
}
if exists {
t.Fatalf("%s: Has "+
"unexpectedly returned that the value exists", testName)
}
}
func TestTransactionDelete(t *testing.T) {
testForAllDatabaseTypes(t, "TestTransactionDelete", testTransactionDelete)
}
func testTransactionDelete(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key := database.MakeBucket().Key([]byte("key"))
value := []byte("value")
err := db.Put(key, value)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Begin two new transactions
dbTx1, err := db.Begin()
if err != nil {
t.Fatalf("%s: Begin "+
"unexpectedly failed: %s", testName, err)
}
dbTx2, err := db.Begin()
if err != nil {
t.Fatalf("%s: Begin "+
"unexpectedly failed: %s", testName, err)
}
defer func() {
err := dbTx1.RollbackUnlessClosed()
if err != nil {
t.Fatalf("%s: RollbackUnlessClosed "+
"unexpectedly failed: %s", testName, err)
}
err = dbTx2.RollbackUnlessClosed()
if err != nil {
t.Fatalf("%s: RollbackUnlessClosed "+
"unexpectedly failed: %s", testName, err)
}
}()
// Delete the value in the first transaction
err = dbTx1.Delete(key)
if err != nil {
t.Fatalf("%s: Delete "+
"unexpectedly failed: %s", testName, err)
}
// Commit the first transaction
err = dbTx1.Commit()
if err != nil {
t.Fatalf("%s: Commit "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that Has returns false for the deleted value
exists, err := db.Has(key)
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
}
if exists {
t.Fatalf("%s: Has "+
"unexpectedly returned that the value exists", testName)
}
// Make sure that the second transaction was no affected
exists, err = dbTx2.Has(key)
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
}
if !exists {
t.Fatalf("%s: Has "+
"unexpectedly returned that the value does not exist", testName)
}
}
func TestTransactionAppendToStoreAndRetrieveFromStore(t *testing.T) {
testForAllDatabaseTypes(t, "TestTransactionAppendToStoreAndRetrieveFromStore", testTransactionAppendToStoreAndRetrieveFromStore)
}
func testTransactionAppendToStoreAndRetrieveFromStore(t *testing.T, db database.Database, testName string) {
// Begin a new transaction
dbTx, err := db.Begin()
if err != nil {
t.Fatalf("%s: Begin "+
"unexpectedly failed: %s", testName, err)
}
defer func() {
err := dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("%s: RollbackUnlessClosed "+
"unexpectedly failed: %s", testName, err)
}
}()
// Append some data into the store
storeName := "store"
data := []byte("data")
location, err := dbTx.AppendToStore(storeName, data)
if err != nil {
t.Fatalf("%s: AppendToStore "+
"unexpectedly failed: %s", testName, err)
}
// Retrieve the data and make sure it's equal to what was appended
retrievedData, err := dbTx.RetrieveFromStore(storeName, location)
if err != nil {
t.Fatalf("%s: RetrieveFromStore "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(retrievedData, data) {
t.Fatalf("%s: RetrieveFromStore "+
"returned unexpected data. Want: %s, got: %s",
testName, string(data), string(retrievedData))
}
// Make sure that an invalid location returns ErrNotFound
fakeLocation := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
_, err = dbTx.RetrieveFromStore(storeName, fakeLocation)
if err == nil {
t.Fatalf("%s: RetrieveFromStore "+
"unexpectedly succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: RetrieveFromStore "+
"returned wrong error: %s", testName, err)
}
}
func TestTransactionCommit(t *testing.T) {
testForAllDatabaseTypes(t, "TestTransactionCommit", testTransactionCommit)
}
func testTransactionCommit(t *testing.T, db database.Database, testName string) {
// Begin a new transaction
dbTx, err := db.Begin()
if err != nil {
t.Fatalf("%s: Begin "+
"unexpectedly failed: %s", testName, err)
}
defer func() {
err := dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("%s: RollbackUnlessClosed "+
"unexpectedly failed: %s", testName, err)
}
}()
// Put a value into the transaction
key := database.MakeBucket().Key([]byte("key"))
value := []byte("value")
err = dbTx.Put(key, value)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Commit the transaction
err = dbTx.Commit()
if err != nil {
t.Fatalf("%s: Commit "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that the returned value exists and is as expected
returnedValue, err := db.Get(key)
if err != nil {
t.Fatalf("%s: Get "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(returnedValue, value) {
t.Fatalf("%s: Get "+
"returned wrong value. Want: %s, got: %s",
testName, string(value), string(returnedValue))
}
// Make sure that further operations on the transaction return an error
_, err = dbTx.Get(key)
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
}
expectedError := "closed transaction"
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf("%s: Get "+
"returned wrong error. Want: %s, got: %s",
testName, expectedError, err)
}
}
func TestTransactionRollback(t *testing.T) {
testForAllDatabaseTypes(t, "TestTransactionRollback", testTransactionRollback)
}
func testTransactionRollback(t *testing.T, db database.Database, testName string) {
// Begin a new transaction
dbTx, err := db.Begin()
if err != nil {
t.Fatalf("%s: Begin "+
"unexpectedly failed: %s", testName, err)
}
defer func() {
err := dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("%s: RollbackUnlessClosed "+
"unexpectedly failed: %s", testName, err)
}
}()
// Put a value into the transaction
key := database.MakeBucket().Key([]byte("key"))
value := []byte("value")
err = dbTx.Put(key, value)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Rollback the transaction
err = dbTx.Rollback()
if err != nil {
t.Fatalf("%s: Rollback "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that the returned value did not get added to the database
_, err = db.Get(key)
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: Get "+
"returned wrong error", testName)
}
// Make sure that further operations on the transaction return an error
_, err = dbTx.Get(key)
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
}
expectedError := "closed transaction"
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf("%s: Get "+
"returned wrong error. Want: %s, got: %s",
testName, expectedError, err)
}
}
func TestTransactionRollbackUnlessClosed(t *testing.T) {
testForAllDatabaseTypes(t, "TestTransactionRollbackUnlessClosed", testTransactionRollbackUnlessClosed)
}
func testTransactionRollbackUnlessClosed(t *testing.T, db database.Database, testName string) {
// Begin a new transaction
dbTx, err := db.Begin()
if err != nil {
t.Fatalf("%s: Begin "+
"unexpectedly failed: %s", testName, err)
}
defer func() {
err := dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("%s: RollbackUnlessClosed "+
"unexpectedly failed: %s", testName, err)
}
}()
// Put a value into the transaction
key := database.MakeBucket().Key([]byte("key"))
value := []byte("value")
err = dbTx.Put(key, value)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// RollbackUnlessClosed the transaction
err = dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("%s: RollbackUnlessClosed "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that the returned value did not get added to the database
_, err = db.Get(key)
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: Get "+
"returned wrong error", testName)
}
// Make sure that further operations on the transaction return an error
_, err = dbTx.Get(key)
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
}
expectedError := "closed transaction"
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf("%s: Get "+
"returned wrong error. Want: %s, got: %s",
testName, expectedError, err)
}
// Make sure that further calls to RollbackUnlessClosed don't return an error
err = dbTx.RollbackUnlessClosed()
if err != nil {
t.Fatalf("%s: RollbackUnlessClosed "+
"unexpectedly failed: %s", testName, err)
}
}

View File

@ -10,7 +10,7 @@ var (
acceptanceIndexBucket = database.MakeBucket([]byte("acceptance-index"))
)
func acceptanceIndexKey(hash *daghash.Hash) []byte {
func acceptanceIndexKey(hash *daghash.Hash) *database.Key {
return acceptanceIndexBucket.Key(hash[:])
}

View File

@ -14,7 +14,7 @@ var (
blockLocationsBucket = database.MakeBucket([]byte("block-locations"))
)
func blockLocationKey(hash *daghash.Hash) []byte {
func blockLocationKey(hash *daghash.Hash) *database.Key {
return blockLocationsBucket.Key(hash[:])
}

View File

@ -30,7 +30,7 @@ func BlockIndexCursor(context Context) (database.Cursor, error) {
return nil, err
}
return accessor.Cursor(blockIndexBucket.Path())
return accessor.Cursor(blockIndexBucket)
}
// BlockIndexCursorFrom opens a cursor over blocks-index blocks

View File

@ -11,8 +11,8 @@ func clearBucket(dbTx *TxContext, bucket *database.Bucket) error {
// Collect all of the keys before deleting them. We do this
// as to not modify the cursor while we're still iterating
// over it.
keys := make([][]byte, 0)
cursor, err := accessor.Cursor(bucket.Path())
keys := make([]*database.Key, 0)
cursor, err := accessor.Cursor(bucket)
if err != nil {
return err
}

View File

@ -1,7 +1,9 @@
package dbaccess
import "github.com/kaspanet/kaspad/database"
var (
dagStateKey = []byte("dag-state")
dagStateKey = database.MakeBucket().Key([]byte("dag-state"))
)
// StoreDAGState stores the DAG state in the database.

View File

@ -8,7 +8,7 @@ import (
var feeBucket = database.MakeBucket([]byte("fees"))
func feeDataKey(hash *daghash.Hash) []byte {
func feeDataKey(hash *daghash.Hash) *database.Key {
return feeBucket.Key(hash[:])
}

View File

@ -7,7 +7,7 @@ import (
var multisetBucket = database.MakeBucket([]byte("multiset"))
func multisetKey(hash *daghash.Hash) []byte {
func multisetKey(hash *daghash.Hash) *database.Key {
return multisetBucket.Key(hash[:])
}
@ -19,7 +19,7 @@ func MultisetCursor(context Context) (database.Cursor, error) {
return nil, err
}
return accessor.Cursor(multisetBucket.Path())
return accessor.Cursor(multisetBucket)
}
// StoreMultiset stores the multiset of a block by its hash.

26
dbaccess/peers.go Normal file
View File

@ -0,0 +1,26 @@
package dbaccess
import "github.com/kaspanet/kaspad/database"
var (
peersKey = database.MakeBucket().Key([]byte("peers"))
)
// StorePeersState stores the peers state in the database.
func StorePeersState(context Context, peersState []byte) error {
accessor, err := context.accessor()
if err != nil {
return err
}
return accessor.Put(peersKey, peersState)
}
// FetchPeersState retrieves the peers state from the database.
// Returns ErrNotFound if the state is missing from the database.
func FetchPeersState(context Context) ([]byte, error) {
accessor, err := context.accessor()
if err != nil {
return nil, err
}
return accessor.Get(peersKey)
}

View File

@ -6,8 +6,9 @@ import (
)
var reachabilityDataBucket = database.MakeBucket([]byte("reachability"))
var reachabilityReindexKey = database.MakeBucket().Key([]byte("reachability-reindex-root"))
func reachabilityKey(hash *daghash.Hash) []byte {
func reachabilityKey(hash *daghash.Hash) *database.Key {
return reachabilityDataBucket.Key(hash[:])
}
@ -19,7 +20,7 @@ func ReachabilityDataCursor(context Context) (database.Cursor, error) {
return nil, err
}
return accessor.Cursor(reachabilityDataBucket.Path())
return accessor.Cursor(reachabilityDataBucket)
}
// StoreReachabilityData stores the reachability data of a block by its hash.
@ -38,3 +39,26 @@ func StoreReachabilityData(context Context, blockHash *daghash.Hash, reachabilit
func ClearReachabilityData(dbTx *TxContext) error {
return clearBucket(dbTx, reachabilityDataBucket)
}
// StoreReachabilityReindexRoot stores the reachability reindex root in the database.
func StoreReachabilityReindexRoot(context Context, reachabilityReindexRoot *daghash.Hash) error {
accessor, err := context.accessor()
if err != nil {
return err
}
return accessor.Put(reachabilityReindexKey, reachabilityReindexRoot[:])
}
// FetchReachabilityReindexRoot retrieves the reachability reindex root from the database.
// Returns ErrNotFound if the state is missing from the database.
func FetchReachabilityReindexRoot(context Context) (*daghash.Hash, error) {
accessor, err := context.accessor()
if err != nil {
return nil, err
}
bytes, err := accessor.Get(reachabilityReindexKey)
if err != nil {
return nil, err
}
return daghash.NewHash(bytes)
}

View File

@ -7,7 +7,7 @@ import (
var subnetworkBucket = database.MakeBucket([]byte("subnetworks"))
func subnetworkKey(subnetworkID *subnetworkid.SubnetworkID) []byte {
func subnetworkKey(subnetworkID *subnetworkid.SubnetworkID) *database.Key {
return subnetworkBucket.Key(subnetworkID[:])
}

View File

@ -8,7 +8,7 @@ var (
utxoBucket = database.MakeBucket([]byte("utxo"))
)
func utxoKey(outpointKey []byte) []byte {
func utxoKey(outpointKey []byte) *database.Key {
return utxoBucket.Key(outpointKey)
}
@ -44,5 +44,5 @@ func UTXOSetCursor(context Context) (database.Cursor, error) {
return nil, err
}
return accessor.Cursor(utxoBucket.Path())
return accessor.Cursor(utxoBucket)
}

View File

@ -8,7 +8,7 @@ import (
var utxoDiffsBucket = database.MakeBucket([]byte("utxo-diffs"))
func utxoDiffKey(hash *daghash.Hash) []byte {
func utxoDiffKey(hash *daghash.Hash) *database.Key {
return utxoDiffsBucket.Key(hash[:])
}

2
go.mod
View File

@ -14,7 +14,7 @@ require (
github.com/kaspanet/go-secp256k1 v0.0.2
github.com/kr/pretty v0.1.0 // indirect
github.com/pkg/errors v0.9.1
github.com/syndtr/goleveldb v1.0.0
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59
golang.org/x/sys v0.0.0-20190426135247-a129542de9ae // indirect
golang.org/x/text v0.3.2 // indirect

2
go.sum
View File

@ -38,6 +38,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs=
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=

View File

@ -6,14 +6,15 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/dbaccess"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"runtime/pprof"
"strings"
"time"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/kaspanet/kaspad/config"
@ -137,15 +138,27 @@ func kaspadMain(serverChan chan<- *server.Server) error {
server, err := server.NewServer(cfg.Listeners, config.ActiveConfig().NetParams(),
interrupt)
if err != nil {
// TODO: this logging could do with some beautifying.
kasdLog.Errorf("Unable to start server on %s: %s",
kasdLog.Errorf("Unable to start server on %s: %+v",
strings.Join(cfg.Listeners, ", "), err)
return err
}
defer func() {
kasdLog.Infof("Gracefully shutting down the server...")
server.Stop()
server.WaitForShutdown()
shutdownDone := make(chan struct{})
go func() {
server.WaitForShutdown()
shutdownDone <- struct{}{}
}()
const shutdownTimeout = 2 * time.Minute
select {
case <-shutdownDone:
case <-time.After(shutdownTimeout):
kasdLog.Criticalf("Graceful shutdown timed out %s. Terminating...", shutdownTimeout)
}
srvrLog.Infof("Server shutdown complete")
}()
server.Start()
@ -251,12 +264,6 @@ func main() {
// Use all processor cores.
runtime.GOMAXPROCS(runtime.NumCPU())
// Block and transaction processing can cause bursty allocations. This
// limits the garbage collector from excessively overallocating during
// bursts. This value was arrived at with the help of profiling live
// usage.
debug.SetGCPercent(10)
// Up some limits.
if err := limits.SetLimits(); err != nil {
fmt.Fprintf(os.Stderr, "failed to set limits: %s\n", err)

View File

@ -113,7 +113,7 @@ var subsystemLoggers = map[string]*logs.Logger{
// InitLog attaches log file and error log file to the backend log.
func InitLog(logFile, errLogFile string) {
err := BackendLog.AddLogFile(logFile, logs.LevelTrace)
err := BackendLog.AddLogFileWithCustomRotator(logFile, logs.LevelTrace, 100*1024, 4)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
os.Exit(1)

View File

@ -35,7 +35,6 @@ package logs
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"os"
"path/filepath"
"runtime"
@ -44,6 +43,8 @@ import (
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/jrick/logrotate/rotator"
)
@ -265,15 +266,27 @@ func callsite(flag uint32) (string, int) {
return file, line
}
const (
defaultThresholdKB = 10 * 1024
defaultMaxRolls = 3
)
// AddLogFile adds a file which the log will write into on a certain
// log level. It'll create the file if it doesn't exist.
// log level with the default log rotation settings. It'll create the file if it doesn't exist.
func (b *Backend) AddLogFile(logFile string, logLevel Level) error {
return b.AddLogFileWithCustomRotator(logFile, logLevel, defaultThresholdKB, defaultMaxRolls)
}
// AddLogFileWithCustomRotator adds a file which the log will write into on a certain
// log level, with the specified log rotation settings.
// It'll create the file if it doesn't exist.
func (b *Backend) AddLogFileWithCustomRotator(logFile string, logLevel Level, thresholdKB int64, maxRolls int) error {
logDir, _ := filepath.Split(logFile)
err := os.MkdirAll(logDir, 0700)
if err != nil {
return errors.Errorf("failed to create log directory: %s", err)
}
r, err := rotator.New(logFile, 10*1024, false, 3)
r, err := rotator.New(logFile, thresholdKB, false, maxRolls)
if err != nil {
return errors.Errorf("failed to create file rotator: %s", err)
}
@ -368,108 +381,90 @@ type Logger struct {
// Trace formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelTrace.
func (l *Logger) Trace(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelTrace {
l.b.print(LevelTrace, l.tag, args...)
}
l.Write(LevelTrace, args...)
}
// Tracef formats message according to format specifier, prepends the prefix as
// necessary, and writes to log with LevelTrace.
func (l *Logger) Tracef(format string, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelTrace {
l.b.printf(LevelTrace, l.tag, format, args...)
}
l.Writef(LevelTrace, format, args...)
}
// Debug formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelDebug.
func (l *Logger) Debug(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelDebug {
l.b.print(LevelDebug, l.tag, args...)
}
l.Write(LevelDebug, args...)
}
// Debugf formats message according to format specifier, prepends the prefix as
// necessary, and writes to log with LevelDebug.
func (l *Logger) Debugf(format string, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelDebug {
l.b.printf(LevelDebug, l.tag, format, args...)
}
l.Writef(LevelDebug, format, args...)
}
// Info formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelInfo.
func (l *Logger) Info(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelInfo {
l.b.print(LevelInfo, l.tag, args...)
}
l.Write(LevelInfo, args...)
}
// Infof formats message according to format specifier, prepends the prefix as
// necessary, and writes to log with LevelInfo.
func (l *Logger) Infof(format string, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelInfo {
l.b.printf(LevelInfo, l.tag, format, args...)
}
l.Writef(LevelInfo, format, args...)
}
// Warn formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelWarn.
func (l *Logger) Warn(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelWarn {
l.b.print(LevelWarn, l.tag, args...)
}
l.Write(LevelWarn, args...)
}
// Warnf formats message according to format specifier, prepends the prefix as
// necessary, and writes to log with LevelWarn.
func (l *Logger) Warnf(format string, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelWarn {
l.b.printf(LevelWarn, l.tag, format, args...)
}
l.Writef(LevelWarn, format, args...)
}
// Error formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelError.
func (l *Logger) Error(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelError {
l.b.print(LevelError, l.tag, args...)
}
l.Write(LevelError, args...)
}
// Errorf formats message according to format specifier, prepends the prefix as
// necessary, and writes to log with LevelError.
func (l *Logger) Errorf(format string, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelError {
l.b.printf(LevelError, l.tag, format, args...)
}
l.Writef(LevelError, format, args...)
}
// Critical formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelCritical.
func (l *Logger) Critical(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelCritical {
l.b.print(LevelCritical, l.tag, args...)
}
l.Write(LevelCritical, args...)
}
// Criticalf formats message according to format specifier, prepends the prefix
// as necessary, and writes to log with LevelCritical.
func (l *Logger) Criticalf(format string, args ...interface{}) {
l.Writef(LevelCritical, format, args...)
}
// Write formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with the given logLevel.
func (l *Logger) Write(logLevel Level, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelCritical {
l.b.printf(LevelCritical, l.tag, format, args...)
if lvl <= logLevel {
l.b.print(logLevel, l.tag, args...)
}
}
// Writef formats message according to format specifier, prepends the prefix
// as necessary, and writes to log with the given logLevel.
func (l *Logger) Writef(logLevel Level, format string, args ...interface{}) {
lvl := l.Level()
if lvl <= logLevel {
l.b.printf(logLevel, l.tag, format, args...)
}
}

Some files were not shown because too many files have changed in this diff Show More