From bfbc72724db23b3c927f4afd829200b310936df0 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Sun, 5 Apr 2020 11:46:16 +0300 Subject: [PATCH 01/77] [NOD-873] Reuse allocated space when updating the UTXO set in database (#688) --- blockdag/dagio.go | 35 ++++++++++----------------- util/buffers/sub_buffer.go | 48 -------------------------------------- 2 files changed, 12 insertions(+), 71 deletions(-) delete mode 100644 util/buffers/sub_buffer.go diff --git a/blockdag/dagio.go b/blockdag/dagio.go index 31b4f12b5..854a3a21b 100644 --- a/blockdag/dagio.go +++ b/blockdag/dagio.go @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "io" - "sync" "github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/dbaccess" @@ -18,7 +17,6 @@ import ( "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/binaryserializer" - "github.com/kaspanet/kaspad/util/buffers" "github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/util/subnetworkid" "github.com/kaspanet/kaspad/wire" @@ -46,14 +44,6 @@ func isNotInDAGErr(err error) bool { return errors.As(err, ¬InDAGErr) } -// outpointKeyPool defines a concurrent safe free list of byte buffers used to -// provide temporary buffers for outpoint database keys. -var outpointKeyPool = sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} // Pointer to a buffer to avoid boxing alloc. - }, -} - // outpointIndexByteOrder is the byte order for serializing the outpoint index. // It uses big endian to ensure that when outpoint is used as database key, the // keys will be iterated in an ascending order by the outpoint index. @@ -91,42 +81,41 @@ func deserializeOutpoint(r io.Reader) (*wire.Outpoint, error) { // updateUTXOSet updates the UTXO set in the database based on the provided // UTXO diff. func updateUTXOSet(dbContext dbaccess.Context, virtualUTXODiff *UTXODiff) error { + outpointBuff := bytes.NewBuffer(make([]byte, outpointSerializeSize)) for outpoint := range virtualUTXODiff.toRemove { - w := outpointKeyPool.Get().(*bytes.Buffer) - w.Reset() - err := serializeOutpoint(w, &outpoint) + outpointBuff.Reset() + err := serializeOutpoint(outpointBuff, &outpoint) if err != nil { return err } - key := w.Bytes() + key := outpointBuff.Bytes() err = dbaccess.RemoveFromUTXOSet(dbContext, key) if err != nil { return err } - outpointKeyPool.Put(w) } // We are preallocating for P2PKH entries because they are the most common ones. // If we have entries with a compressed script bigger than P2PKH's, the buffer will grow. - bytesToPreallocate := (p2pkhUTXOEntrySerializeSize + outpointSerializeSize) * len(virtualUTXODiff.toAdd) - buff := bytes.NewBuffer(make([]byte, bytesToPreallocate)) + utxoEntryBuff := bytes.NewBuffer(make([]byte, p2pkhUTXOEntrySerializeSize)) + for outpoint, entry := range virtualUTXODiff.toAdd { + utxoEntryBuff.Reset() + outpointBuff.Reset() // Serialize and store the UTXO entry. - sBuff := buffers.NewSubBuffer(buff) - err := serializeUTXOEntry(sBuff, entry) + err := serializeUTXOEntry(utxoEntryBuff, entry) if err != nil { return err } - serializedEntry := sBuff.Bytes() + serializedEntry := utxoEntryBuff.Bytes() - sBuff = buffers.NewSubBuffer(buff) - err = serializeOutpoint(sBuff, &outpoint) + err = serializeOutpoint(outpointBuff, &outpoint) if err != nil { return err } - key := sBuff.Bytes() + key := outpointBuff.Bytes() err = dbaccess.AddToUTXOSet(dbContext, key, serializedEntry) if err != nil { return err diff --git a/util/buffers/sub_buffer.go b/util/buffers/sub_buffer.go deleted file mode 100644 index 8f29ca8b4..000000000 --- a/util/buffers/sub_buffer.go +++ /dev/null @@ -1,48 +0,0 @@ -package buffers - -import ( - "bytes" - "github.com/pkg/errors" -) - -// SubBuffer lets you write to an existing buffer -// and let you check with the `Bytes()` method what -// has been written to the underlying buffer using -// the sub buffer. -type SubBuffer struct { - buff *bytes.Buffer - start, end int -} - -// Bytes returns all the bytes that were written to the sub buffer. -func (s *SubBuffer) Bytes() []byte { - return s.buff.Bytes()[s.start:s.end] -} - -// Write writes to the sub buffer's underlying buffer -// and increases s.end by the number of bytes written -// so s.Bytes() will be able to return the written bytes. -func (s *SubBuffer) Write(p []byte) (int, error) { - if s.buff.Len() > s.end || s.buff.Len() < s.start { - return 0, errors.New("a sub buffer cannot be written after another entity wrote or read from its " + - "underlying buffer") - } - - n, err := s.buff.Write(p) - if err != nil { - return 0, err - } - - s.end += n - - return n, nil -} - -// NewSubBuffer returns a new sub buffer. -func NewSubBuffer(buff *bytes.Buffer) *SubBuffer { - return &SubBuffer{ - buff: buff, - start: buff.Len(), - end: buff.Len(), - } -} From 6da3606721811c0068de364e9d844521a328c6c7 Mon Sep 17 00:00:00 2001 From: Mike Zak Date: Sun, 5 Apr 2020 16:23:01 +0300 Subject: [PATCH 02/77] Update to version 0.4.0 --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index 2f4efd536..ee8407794 100644 --- a/version/version.go +++ b/version/version.go @@ -10,7 +10,7 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs const ( appMajor uint = 0 - appMinor uint = 3 + appMinor uint = 4 appPatch uint = 0 ) From a31139d4a579291b88cb2fa0b4f4c4df86b84f3e Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Mon, 6 Apr 2020 11:08:57 +0300 Subject: [PATCH 03/77] [NOD-895] Break down initDAGState to sub-routines (#690) --- blockdag/dagio.go | 227 +++++++++++++++++++++++++++------------------- 1 file changed, 134 insertions(+), 93 deletions(-) diff --git a/blockdag/dagio.go b/blockdag/dagio.go index 854a3a21b..71abb7f85 100644 --- a/blockdag/dagio.go +++ b/blockdag/dagio.go @@ -190,96 +190,23 @@ func (dag *BlockDAG) initDAGState() error { if err != nil { return err } - if !dagState.LocalSubnetworkID.IsEqual(dag.subnetworkID) { - return errors.Errorf("Cannot start kaspad with subnetwork ID %s because"+ - " its database is already built with subnetwork ID %s. If you"+ - " want to switch to a new database, please reset the"+ - " database by starting kaspad with --reset-db flag", dag.subnetworkID, dagState.LocalSubnetworkID) + + err = dag.validateLocalSubnetworkID(dagState) + if err != nil { + return err } log.Debugf("Loading block index...") - var unprocessedBlockNodes []*blockNode - blockIndexCursor, err := dbaccess.BlockIndexCursor(dbaccess.NoTx()) + unprocessedBlockNodes, err := dag.initBlockIndex() if err != nil { return err } - defer blockIndexCursor.Close() - for blockIndexCursor.Next() { - serializedDBNode, err := blockIndexCursor.Value() - if err != nil { - return err - } - node, err := dag.deserializeBlockNode(serializedDBNode) - if err != nil { - return err - } - - // Check to see if this node had been stored in the the block DB - // but not yet accepted. If so, add it to a slice to be processed later. - if node.status == statusDataStored { - unprocessedBlockNodes = append(unprocessedBlockNodes, node) - continue - } - - // If the node is known to be invalid add it as-is to the block - // index and continue. - if node.status.KnownInvalid() { - dag.index.addNode(node) - continue - } - - if dag.blockCount == 0 { - if !node.hash.IsEqual(dag.dagParams.GenesisHash) { - return AssertError(fmt.Sprintf("initDAGState: Expected "+ - "first entry in block index to be genesis block, "+ - "found %s", node.hash)) - } - } else { - if len(node.parents) == 0 { - return AssertError(fmt.Sprintf("initDAGState: block %s "+ - "has no parents but it's not the genesis block", node.hash)) - } - } - - // Add the node to its parents children, connect it, - // and add it to the block index. - node.updateParentsChildren() - dag.index.addNode(node) - - dag.blockCount++ - } log.Debugf("Loading UTXO set...") - fullUTXOCollection := make(utxoCollection) - cursor, err := dbaccess.UTXOSetCursor(dbaccess.NoTx()) + fullUTXOCollection, err := dag.initUTXOSet() if err != nil { return err } - defer cursor.Close() - - for cursor.Next() { - // Deserialize the outpoint - key, err := cursor.Key() - if err != nil { - return err - } - outpoint, err := deserializeOutpoint(bytes.NewReader(key)) - if err != nil { - return err - } - - // Deserialize the utxo entry - value, err := cursor.Value() - if err != nil { - return err - } - entry, err := deserializeUTXOEntry(bytes.NewReader(value)) - if err != nil { - return err - } - - fullUTXOCollection[*outpoint] = entry - } log.Debugf("Loading reachability data...") err = dag.reachabilityStore.init(dbaccess.NoTx()) @@ -300,32 +227,149 @@ func (dag *BlockDAG) initDAGState() error { } log.Debugf("Applying the stored tips to the virtual block...") - tips := newBlockSet() - for _, tipHash := range dagState.TipHashes { - tip := dag.index.LookupNode(tipHash) - if tip == nil { - return AssertError(fmt.Sprintf("initDAGState: cannot find "+ - "DAG tip %s in block index", dagState.TipHashes)) - } - tips.add(tip) + err = dag.initVirtualBlockTips(dagState) + if err != nil { + return err } - dag.virtual.SetTips(tips) log.Debugf("Setting the last finality point...") dag.lastFinalityPoint = dag.index.LookupNode(dagState.LastFinalityPoint) dag.finalizeNodesBelowFinalityPoint(false) log.Debugf("Processing unprocessed blockNodes...") + err = dag.processUnprocessedBlockNodes(unprocessedBlockNodes) + if err != nil { + return err + } + + log.Infof("DAG state initialized.") + + return nil +} + +func (dag *BlockDAG) validateLocalSubnetworkID(state *dagState) error { + if !state.LocalSubnetworkID.IsEqual(dag.subnetworkID) { + return errors.Errorf("Cannot start kaspad with subnetwork ID %s because"+ + " its database is already built with subnetwork ID %s. If you"+ + " want to switch to a new database, please reset the"+ + " database by starting kaspad with --reset-db flag", dag.subnetworkID, state.LocalSubnetworkID) + } + return nil +} + +func (dag *BlockDAG) initBlockIndex() (unprocessedBlockNodes []*blockNode, err error) { + blockIndexCursor, err := dbaccess.BlockIndexCursor(dbaccess.NoTx()) + if err != nil { + return nil, err + } + defer blockIndexCursor.Close() + for blockIndexCursor.Next() { + serializedDBNode, err := blockIndexCursor.Value() + if err != nil { + return nil, err + } + node, err := dag.deserializeBlockNode(serializedDBNode) + if err != nil { + return nil, err + } + + // Check to see if this node had been stored in the the block DB + // but not yet accepted. If so, add it to a slice to be processed later. + if node.status == statusDataStored { + unprocessedBlockNodes = append(unprocessedBlockNodes, node) + continue + } + + // If the node is known to be invalid add it as-is to the block + // index and continue. + if node.status.KnownInvalid() { + dag.index.addNode(node) + continue + } + + if dag.blockCount == 0 { + if !node.hash.IsEqual(dag.dagParams.GenesisHash) { + return nil, AssertError(fmt.Sprintf("Expected "+ + "first entry in block index to be genesis block, "+ + "found %s", node.hash)) + } + } else { + if len(node.parents) == 0 { + return nil, AssertError(fmt.Sprintf("block %s "+ + "has no parents but it's not the genesis block", node.hash)) + } + } + + // Add the node to its parents children, connect it, + // and add it to the block index. + node.updateParentsChildren() + dag.index.addNode(node) + + dag.blockCount++ + } + return unprocessedBlockNodes, nil +} + +func (dag *BlockDAG) initUTXOSet() (fullUTXOCollection utxoCollection, err error) { + fullUTXOCollection = make(utxoCollection) + cursor, err := dbaccess.UTXOSetCursor(dbaccess.NoTx()) + if err != nil { + return nil, err + } + defer cursor.Close() + + for cursor.Next() { + // Deserialize the outpoint + key, err := cursor.Key() + if err != nil { + return nil, err + } + outpoint, err := deserializeOutpoint(bytes.NewReader(key)) + if err != nil { + return nil, err + } + + // Deserialize the utxo entry + value, err := cursor.Value() + if err != nil { + return nil, err + } + entry, err := deserializeUTXOEntry(bytes.NewReader(value)) + if err != nil { + return nil, err + } + + fullUTXOCollection[*outpoint] = entry + } + + return fullUTXOCollection, nil +} + +func (dag *BlockDAG) initVirtualBlockTips(state *dagState) error { + tips := newBlockSet() + for _, tipHash := range state.TipHashes { + tip := dag.index.LookupNode(tipHash) + if tip == nil { + return AssertError(fmt.Sprintf("cannot find "+ + "DAG tip %s in block index", state.TipHashes)) + } + tips.add(tip) + } + dag.virtual.SetTips(tips) + return nil +} + +func (dag *BlockDAG) processUnprocessedBlockNodes(unprocessedBlockNodes []*blockNode) error { for _, node := range unprocessedBlockNodes { // Check to see if the block exists in the block DB. If it // doesn't, the database has certainly been corrupted. blockExists, err := dbaccess.HasBlock(dbaccess.NoTx(), node.hash) if err != nil { - return AssertError(fmt.Sprintf("initDAGState: HasBlock "+ + return AssertError(fmt.Sprintf("HasBlock "+ "for block %s failed: %s", node.hash, err)) } if !blockExists { - return AssertError(fmt.Sprintf("initDAGState: block %s "+ + return AssertError(fmt.Sprintf("block %s "+ "exists in block index but not in block db", node.hash)) } @@ -354,9 +398,6 @@ func (dag *BlockDAG) initDAGState() error { "impossible.", node.hash)) } } - - log.Infof("DAG state initialized.") - return nil } From 3c4a80f16d490bacfc806db9a7e4db8b4f6c4ff4 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Mon, 6 Apr 2020 16:00:48 +0300 Subject: [PATCH 04/77] [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace (#691) * [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace. * [NOD-899] Fix bad variable name. * [NOD-899] Reduce code duplication. --- database/ffldb/ff/write.go | 3 ++- util/panics/panics.go | 55 +++++++++++++++++++++++--------------- 2 files changed, 36 insertions(+), 22 deletions(-) diff --git a/database/ffldb/ff/write.go b/database/ffldb/ff/write.go index b23ad8007..897fa2da2 100644 --- a/database/ffldb/ff/write.go +++ b/database/ffldb/ff/write.go @@ -1,6 +1,7 @@ package ff import ( + "github.com/kaspanet/kaspad/util/panics" "github.com/pkg/errors" "hash/crc32" "os" @@ -154,7 +155,7 @@ func (s *flatFileStore) writeData(data []byte, fieldName string) error { if err != nil { var pathErr *os.PathError if ok := errors.As(err, &pathErr); ok && pathErr.Err == syscall.ENOSPC { - panic("No space left on the hard disk, exiting...") + panics.Exit(log, "No space left on the hard disk.") } return errors.Wrapf(err, "failed to write %s in store %s to file %d "+ "at offset %d", fieldName, s.storeName, cursor.currentFileNumber, diff --git a/util/panics/panics.go b/util/panics/panics.go index 4f04e2e75..36532bd27 100644 --- a/util/panics/panics.go +++ b/util/panics/panics.go @@ -9,33 +9,17 @@ import ( "github.com/kaspanet/kaspad/logs" ) -// HandlePanic recovers panics, log them, runs an optional panicHandler, -// and then initiates a clean shutdown. +const exitHandlerTimeout = 5 * time.Second + +// HandlePanic recovers panics and then initiates a clean shutdown. func HandlePanic(log *logs.Logger, goroutineStackTrace []byte) { err := recover() if err == nil { return } - panicHandlerDone := make(chan struct{}) - go func() { - log.Criticalf("Fatal error: %+v", err) - if goroutineStackTrace != nil { - log.Criticalf("Goroutine stack trace: %s", goroutineStackTrace) - } - log.Criticalf("Stack trace: %s", debug.Stack()) - log.Backend().Close() - close(panicHandlerDone) - }() - - const panicHandlerTimeout = 5 * time.Second - select { - case <-time.After(panicHandlerTimeout): - fmt.Fprintln(os.Stderr, "Couldn't handle a fatal error. Exiting...") - case <-panicHandlerDone: - } - log.Criticalf("Exiting") - os.Exit(1) + reason := fmt.Sprintf("Fatal error: %+v", err) + exit(log, reason, debug.Stack(), goroutineStackTrace) } // GoroutineWrapperFunc returns a goroutine wrapper function that handles panics and writes them to the log. @@ -59,3 +43,32 @@ func AfterFuncWrapperFunc(log *logs.Logger) func(d time.Duration, f func()) *tim }) } } + +// Exit prints the given reason to log and initiates a clean shutdown. +func Exit(log *logs.Logger, reason string) { + exit(log, reason, nil, nil) +} + +// Exit prints the given reason, prints either of the given stack traces (if not nil), +// waits for them to finish writing, and exits. +func exit(log *logs.Logger, reason string, currentThreadStackTrace []byte, goroutineStackTrace []byte) { + exitHandlerDone := make(chan struct{}) + go func() { + log.Criticalf("Exiting: %s", reason) + if goroutineStackTrace != nil { + log.Criticalf("Goroutine stack trace: %s", goroutineStackTrace) + } + if currentThreadStackTrace != nil { + log.Criticalf("Stack trace: %s", currentThreadStackTrace) + } + log.Backend().Close() + close(exitHandlerDone) + }() + + select { + case <-time.After(exitHandlerTimeout): + fmt.Fprintln(os.Stderr, "Couldn't exit gracefully.") + case <-exitHandlerDone: + } + os.Exit(1) +} From df934990d7bb70a319d245c6d145f8e289ed3046 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Tue, 7 Apr 2020 12:45:12 +0300 Subject: [PATCH 05/77] [NOD-822] Don't return rule errors from utxoset code (#693) * [NOD-822] Remove rule errors from the UTXO diff code * [NOD-822] Rename applyTransactions -> applyAndVerifyBlockTransactionsToPastUTXO * [NOD-822] Fix comment --- blockdag/dag.go | 17 +++++++++-------- blockdag/utxoset.go | 19 ++++++++++--------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/blockdag/dag.go b/blockdag/dag.go index 00dbc949a..ee117f3b7 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -1058,11 +1058,16 @@ func (dag *BlockDAG) meldVirtualUTXO(newVirtualUTXODiffSet *DiffUTXOSet) error { return newVirtualUTXODiffSet.meldToBase() } -func (node *blockNode) diffFromTxs(pastUTXO UTXOSet, transactions []*util.Tx) (*UTXODiff, error) { +// applyAndVerifyBlockTransactionsToPastUTXO applies a block's transactions to its +// given past UTXO, and verifies that there are no double spends with its past. +func applyAndVerifyBlockTransactionsToPastUTXO(pastUTXO UTXOSet, blockTransactions []*util.Tx) (UTXOSet, error) { diff := NewUTXODiff() - for _, tx := range transactions { + for _, tx := range blockTransactions { txDiff, err := pastUTXO.diffFromTx(tx.MsgTx(), UnacceptedBlueScore) + if errors.Is(err, errUTXOMissingTxOut) { + return nil, ruleError(ErrMissingTxOut, err.Error()) + } if err != nil { return nil, err } @@ -1072,7 +1077,7 @@ func (node *blockNode) diffFromTxs(pastUTXO UTXOSet, transactions []*util.Tx) (* } } - return diff, nil + return pastUTXO.WithDiff(diff) } // verifyAndBuildUTXO verifies all transactions in the given block and builds its UTXO @@ -1096,11 +1101,7 @@ func (node *blockNode) verifyAndBuildUTXO(dag *BlockDAG, transactions []*util.Tx return nil, nil, nil, nil, err } - diffFromTxs, err := node.diffFromTxs(pastUTXO, transactions) - if err != nil { - return nil, nil, nil, nil, err - } - utxo, err := pastUTXO.WithDiff(diffFromTxs) + utxo, err := applyAndVerifyBlockTransactionsToPastUTXO(pastUTXO, transactions) if err != nil { return nil, nil, nil, nil, err } diff --git a/blockdag/utxoset.go b/blockdag/utxoset.go index f56f6801d..eb5a134c2 100644 --- a/blockdag/utxoset.go +++ b/blockdag/utxoset.go @@ -293,8 +293,8 @@ func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error { } if d.toRemove.contains(outpoint) { // If already exists - this is an error - return ruleError(ErrWithDiff, fmt.Sprintf( - "withDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint)) + return errors.Errorf( + "withDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint) } // If not exists neither in toAdd nor in toRemove - add to toRemove @@ -305,9 +305,9 @@ func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error { if d.toRemove.containsWithBlueScore(outpoint, entryToAdd.blockBlueScore) { // If already exists in toRemove with the same blueScore - remove from toRemove if d.toAdd.contains(outpoint) && !diff.toRemove.contains(outpoint) { - return ruleError(ErrWithDiff, fmt.Sprintf( + return errors.Errorf( "withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd with no "+ - "corresponding entry in diff.toRemove", outpoint)) + "corresponding entry in diff.toRemove", outpoint) } d.toRemove.remove(outpoint) continue @@ -316,8 +316,8 @@ func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error { (existingEntry.blockBlueScore == entryToAdd.blockBlueScore || !diff.toRemove.containsWithBlueScore(outpoint, existingEntry.blockBlueScore)) { // If already exists - this is an error - return ruleError(ErrWithDiff, fmt.Sprintf( - "withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint)) + return errors.Errorf( + "withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint) } // If not exists neither in toAdd nor in toRemove, or exists in toRemove with different blueScore - add to toAdd @@ -406,6 +406,8 @@ type UTXOSet interface { Get(outpoint wire.Outpoint) (*UTXOEntry, bool) } +var errUTXOMissingTxOut = errors.New("missing transaction output in the utxo set") + // diffFromTx is a common implementation for diffFromTx, that works // for both diff-based and full UTXO sets // Returns a diff that is equivalent to provided transaction, @@ -421,9 +423,8 @@ func diffFromTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff return nil, err } } else { - return nil, ruleError(ErrMissingTxOut, fmt.Sprintf( - "Transaction %s is invalid because spends outpoint %s that is not in utxo set", - tx.TxID(), txIn.PreviousOutpoint)) + return nil, errors.Wrapf(errUTXOMissingTxOut, "Transaction %s is invalid because it spends "+ + "outpoint %s that is not in utxo set", tx.TxID(), txIn.PreviousOutpoint) } } } From 7609c50641ec4527be9848a95839bf2b2a001807 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Wed, 8 Apr 2020 12:12:21 +0300 Subject: [PATCH 06/77] [NOD-885] Use database.Key and database.Bucket instead of byte slices (#692) * [NOD-885] Create database.Key type * [NOD-885] Rename FullKey()->FullKeyBytes() and Key()->KeyBytes() * [NOD-885] Make Key.String return a hex string * [NOD-885] Rename key parts * [NOD-885] Rename separator->bucketSeparator * [NOD-885] Rename SuffixBytes->Suffix and PrefixBytes->Prefix * [NOD-885] Change comments * [NOD-885] Change key prefix to bucket * [NOD-885] Don't use database.NewKey inside dbaccess * [NOD-885] Fix nil bug in Bucket.Path() * [NOD-885] Rename helpers.go -> keys.go * [NOD-885] Unexport database.NewKey * [NOD-885] Remove redundant code in Bucket.Path() --- blockdag/dagio.go | 4 +- blockdag/multisetstore.go | 2 +- blockdag/reachabilitystore.go | 4 +- database/bucket.go | 51 -------------- database/cursor.go | 4 +- database/dataaccessor.go | 10 +-- database/ffldb/ffldb.go | 12 ++-- database/ffldb/initialize.go | 2 +- database/ffldb/ldb/cursor.go | 15 ++-- database/ffldb/ldb/leveldb.go | 19 +++-- database/ffldb/ldb/leveldb_test.go | 6 +- database/ffldb/ldb/transaction.go | 23 +++--- database/ffldb/transaction.go | 10 +-- database/keys.go | 85 +++++++++++++++++++++++ database/{bucket_test.go => keys_test.go} | 22 ++++-- dbaccess/acceptanceindex.go | 2 +- dbaccess/block.go | 2 +- dbaccess/blockindex.go | 2 +- dbaccess/common.go | 4 +- dbaccess/dagstate.go | 4 +- dbaccess/fee_data.go | 2 +- dbaccess/multiset.go | 4 +- dbaccess/reachability.go | 4 +- dbaccess/subnetwork.go | 2 +- dbaccess/utxo.go | 4 +- dbaccess/utxodiff.go | 2 +- 26 files changed, 174 insertions(+), 127 deletions(-) delete mode 100644 database/bucket.go create mode 100644 database/keys.go rename database/{bucket_test.go => keys_test.go} (74%) diff --git a/blockdag/dagio.go b/blockdag/dagio.go index 71abb7f85..f72e9f9c3 100644 --- a/blockdag/dagio.go +++ b/blockdag/dagio.go @@ -324,7 +324,7 @@ func (dag *BlockDAG) initUTXOSet() (fullUTXOCollection utxoCollection, err error if err != nil { return nil, err } - outpoint, err := deserializeOutpoint(bytes.NewReader(key)) + outpoint, err := deserializeOutpoint(bytes.NewReader(key.Suffix())) if err != nil { return nil, err } @@ -639,7 +639,7 @@ func (dag *BlockDAG) BlockHashesFrom(lowHash *daghash.Hash, limit int) ([]*dagha if err != nil { return nil, err } - blockHash, err := blockHashFromBlockIndexKey(key) + blockHash, err := blockHashFromBlockIndexKey(key.Suffix()) if err != nil { return nil, err } diff --git a/blockdag/multisetstore.go b/blockdag/multisetstore.go index 7c78109ea..8f9e67d82 100644 --- a/blockdag/multisetstore.go +++ b/blockdag/multisetstore.go @@ -96,7 +96,7 @@ func (store *multisetStore) init(dbContext dbaccess.Context) error { return err } - hash, err := daghash.NewHash(key) + hash, err := daghash.NewHash(key.Suffix()) if err != nil { return err } diff --git a/blockdag/reachabilitystore.go b/blockdag/reachabilitystore.go index 2a7cbb010..beeccb44d 100644 --- a/blockdag/reachabilitystore.go +++ b/blockdag/reachabilitystore.go @@ -137,7 +137,7 @@ func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) err return err } - hash, err := daghash.NewHash(key) + hash, err := daghash.NewHash(key.Suffix()) if err != nil { return err } @@ -155,7 +155,7 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C return err } - hash, err := daghash.NewHash(key) + hash, err := daghash.NewHash(key.Suffix()) if err != nil { return err } diff --git a/database/bucket.go b/database/bucket.go deleted file mode 100644 index 8789a1a4d..000000000 --- a/database/bucket.go +++ /dev/null @@ -1,51 +0,0 @@ -package database - -import "bytes" - -var separator = []byte("/") - -// Bucket is a helper type meant to combine buckets, -// sub-buckets, and keys into a single full key-value -// database key. -type Bucket struct { - path [][]byte -} - -// MakeBucket creates a new Bucket using the given path -// of buckets. -func MakeBucket(path ...[]byte) *Bucket { - return &Bucket{path: path} -} - -// Bucket returns the sub-bucket of the current bucket -// defined by bucketBytes. -func (b *Bucket) Bucket(bucketBytes []byte) *Bucket { - newPath := make([][]byte, len(b.path)+1) - copy(newPath, b.path) - copy(newPath[len(b.path):], [][]byte{bucketBytes}) - - return MakeBucket(newPath...) -} - -// Key returns the key inside of the current bucket. -func (b *Bucket) Key(key []byte) []byte { - bucketPath := b.Path() - - fullKeyLength := len(bucketPath) + len(key) - fullKey := make([]byte, fullKeyLength) - copy(fullKey, bucketPath) - copy(fullKey[len(bucketPath):], key) - - return fullKey -} - -// Path returns the full path of the current bucket. -func (b *Bucket) Path() []byte { - bucketPath := bytes.Join(b.path, separator) - - bucketPathWithFinalSeparator := make([]byte, len(bucketPath)+len(separator)) - copy(bucketPathWithFinalSeparator, bucketPath) - copy(bucketPathWithFinalSeparator[len(bucketPath):], separator) - - return bucketPathWithFinalSeparator -} diff --git a/database/cursor.go b/database/cursor.go index d7fc1af4c..4f2d1a9e6 100644 --- a/database/cursor.go +++ b/database/cursor.go @@ -13,13 +13,13 @@ type Cursor interface { // Seek moves the iterator to the first key/value pair whose key is greater // than or equal to the given key. It returns ErrNotFound if such pair does not // exist. - Seek(key []byte) error + Seek(key *Key) error // Key returns the key of the current key/value pair, or ErrNotFound if done. // Note that the key is trimmed to not include the prefix the cursor was opened // with. The caller should not modify the contents of the returned slice, and // its contents may change on the next call to Next. - Key() ([]byte, error) + Key() (*Key, error) // Value returns the value of the current key/value pair, or ErrNotFound if done. // The caller should not modify the contents of the returned slice, and its diff --git a/database/dataaccessor.go b/database/dataaccessor.go index ab8d61ad7..1af9cca5c 100644 --- a/database/dataaccessor.go +++ b/database/dataaccessor.go @@ -5,19 +5,19 @@ package database type DataAccessor interface { // Put sets the value for the given key. It overwrites // any previous value for that key. - Put(key []byte, value []byte) error + Put(key *Key, value []byte) error // Get gets the value for the given key. It returns // ErrNotFound if the given key does not exist. - Get(key []byte) ([]byte, error) + Get(key *Key) ([]byte, error) // Has returns true if the database does contains the // given key. - Has(key []byte) (bool, error) + Has(key *Key) (bool, error) // Delete deletes the value for the given key. Will not // return an error if the key doesn't exist. - Delete(key []byte) error + Delete(key *Key) error // AppendToStore appends the given data to the store // defined by storeName. This function returns a serialized @@ -32,5 +32,5 @@ type DataAccessor interface { RetrieveFromStore(storeName string, location []byte) ([]byte, error) // Cursor begins a new cursor over the given bucket. - Cursor(bucket []byte) (Cursor, error) + Cursor(bucket *Bucket) (Cursor, error) } diff --git a/database/ffldb/ffldb.go b/database/ffldb/ffldb.go index 0e07f079d..634ac35db 100644 --- a/database/ffldb/ffldb.go +++ b/database/ffldb/ffldb.go @@ -59,28 +59,28 @@ func (db *ffldb) Close() error { // Put sets the value for the given key. It overwrites // any previous value for that key. // This method is part of the DataAccessor interface. -func (db *ffldb) Put(key []byte, value []byte) error { +func (db *ffldb) Put(key *database.Key, value []byte) error { return db.levelDB.Put(key, value) } // Get gets the value for the given key. It returns // ErrNotFound if the given key does not exist. // This method is part of the DataAccessor interface. -func (db *ffldb) Get(key []byte) ([]byte, error) { +func (db *ffldb) Get(key *database.Key) ([]byte, error) { return db.levelDB.Get(key) } // Has returns true if the database does contains the // given key. // This method is part of the DataAccessor interface. -func (db *ffldb) Has(key []byte) (bool, error) { +func (db *ffldb) Has(key *database.Key) (bool, error) { return db.levelDB.Has(key) } // Delete deletes the value for the given key. Will not // return an error if the key doesn't exist. // This method is part of the DataAccessor interface. -func (db *ffldb) Delete(key []byte) error { +func (db *ffldb) Delete(key *database.Key) error { return db.levelDB.Delete(key) } @@ -155,8 +155,8 @@ func (db *ffldb) RetrieveFromStore(storeName string, location []byte) ([]byte, e // Cursor begins a new cursor over the given bucket. // This method is part of the DataAccessor interface. -func (db *ffldb) Cursor(bucket []byte) (database.Cursor, error) { - ldbCursor := db.levelDB.Cursor(bucket) +func (db *ffldb) Cursor(bucket *database.Bucket) (database.Cursor, error) { + ldbCursor := db.levelDB.Cursor(bucket.Path()) return ldbCursor, nil } diff --git a/database/ffldb/initialize.go b/database/ffldb/initialize.go index d3eee6db2..599e5cda5 100644 --- a/database/ffldb/initialize.go +++ b/database/ffldb/initialize.go @@ -33,7 +33,7 @@ func (db *ffldb) flatFiles() (map[string][]byte, error) { if err != nil { return nil, err } - storeName := string(storeNameKey) + storeName := string(storeNameKey.Suffix()) currentLocation, err := flatFilesCursor.Value() if err != nil { diff --git a/database/ffldb/ldb/cursor.go b/database/ffldb/ldb/cursor.go index d1ea0248f..bc9a1d73c 100644 --- a/database/ffldb/ldb/cursor.go +++ b/database/ffldb/ldb/cursor.go @@ -2,7 +2,6 @@ package ldb import ( "bytes" - "encoding/hex" "github.com/kaspanet/kaspad/database" "github.com/pkg/errors" "github.com/syndtr/goleveldb/leveldb/iterator" @@ -48,14 +47,14 @@ func (c *LevelDBCursor) First() bool { // Seek moves the iterator to the first key/value pair whose key is greater // than or equal to the given key. It returns ErrNotFound if such pair does not // exist. -func (c *LevelDBCursor) Seek(key []byte) error { +func (c *LevelDBCursor) Seek(key *database.Key) error { if c.isClosed { return errors.New("cannot seek a closed cursor") } notFoundErr := errors.Wrapf(database.ErrNotFound, "key %s not "+ - "found", hex.EncodeToString(key)) - found := c.ldbIterator.Seek(key) + "found", key) + found := c.ldbIterator.Seek(key.Bytes()) if !found { return notFoundErr } @@ -65,7 +64,7 @@ func (c *LevelDBCursor) Seek(key []byte) error { if currentKey == nil { return notFoundErr } - if !bytes.Equal(currentKey, key) { + if !bytes.Equal(currentKey, key.Bytes()) { return notFoundErr } @@ -76,7 +75,7 @@ func (c *LevelDBCursor) Seek(key []byte) error { // Note that the key is trimmed to not include the prefix the cursor was opened // with. The caller should not modify the contents of the returned slice, and // its contents may change on the next call to Next. -func (c *LevelDBCursor) Key() ([]byte, error) { +func (c *LevelDBCursor) Key() (*database.Key, error) { if c.isClosed { return nil, errors.New("cannot get the key of a closed cursor") } @@ -85,8 +84,8 @@ func (c *LevelDBCursor) Key() ([]byte, error) { return nil, errors.Wrapf(database.ErrNotFound, "cannot get the "+ "key of a done cursor") } - key := bytes.TrimPrefix(fullKeyPath, c.prefix) - return key, nil + suffix := bytes.TrimPrefix(fullKeyPath, c.prefix) + return database.MakeBucket(c.prefix).Key(suffix), nil } // Value returns the value of the current key/value pair, or ErrNotFound if done. diff --git a/database/ffldb/ldb/leveldb.go b/database/ffldb/ldb/leveldb.go index 8fde2e22b..0b3f08e51 100644 --- a/database/ffldb/ldb/leveldb.go +++ b/database/ffldb/ldb/leveldb.go @@ -1,7 +1,6 @@ package ldb import ( - "encoding/hex" "github.com/kaspanet/kaspad/database" "github.com/pkg/errors" "github.com/syndtr/goleveldb/leveldb" @@ -52,19 +51,19 @@ func (db *LevelDB) Close() error { // Put sets the value for the given key. It overwrites // any previous value for that key. -func (db *LevelDB) Put(key []byte, value []byte) error { - err := db.ldb.Put(key, value, nil) +func (db *LevelDB) Put(key *database.Key, value []byte) error { + err := db.ldb.Put(key.Bytes(), value, nil) return errors.WithStack(err) } // Get gets the value for the given key. It returns // ErrNotFound if the given key does not exist. -func (db *LevelDB) Get(key []byte) ([]byte, error) { - data, err := db.ldb.Get(key, nil) +func (db *LevelDB) Get(key *database.Key) ([]byte, error) { + data, err := db.ldb.Get(key.Bytes(), nil) if err != nil { if errors.Is(err, leveldb.ErrNotFound) { return nil, errors.Wrapf(database.ErrNotFound, - "key %s not found", hex.EncodeToString(key)) + "key %s not found", key) } return nil, errors.WithStack(err) } @@ -73,8 +72,8 @@ func (db *LevelDB) Get(key []byte) ([]byte, error) { // Has returns true if the database does contains the // given key. -func (db *LevelDB) Has(key []byte) (bool, error) { - exists, err := db.ldb.Has(key, nil) +func (db *LevelDB) Has(key *database.Key) (bool, error) { + exists, err := db.ldb.Has(key.Bytes(), nil) if err != nil { return false, errors.WithStack(err) } @@ -83,7 +82,7 @@ func (db *LevelDB) Has(key []byte) (bool, error) { // Delete deletes the value for the given key. Will not // return an error if the key doesn't exist. -func (db *LevelDB) Delete(key []byte) error { - err := db.ldb.Delete(key, nil) +func (db *LevelDB) Delete(key *database.Key) error { + err := db.ldb.Delete(key.Bytes(), nil) return errors.WithStack(err) } diff --git a/database/ffldb/ldb/leveldb_test.go b/database/ffldb/ldb/leveldb_test.go index b7c70a32d..5e44d4b1b 100644 --- a/database/ffldb/ldb/leveldb_test.go +++ b/database/ffldb/ldb/leveldb_test.go @@ -28,7 +28,7 @@ func TestLevelDBSanity(t *testing.T) { }() // Put something into the db - key := []byte("key") + key := database.MakeBucket().Key([]byte("key")) putData := []byte("Hello world!") err = ldb.Put(key, putData) if err != nil { @@ -80,7 +80,7 @@ func TestLevelDBTransactionSanity(t *testing.T) { } // Put something into the transaction - key := []byte("key") + key := database.MakeBucket().Key([]byte("key")) putData := []byte("Hello world!") err = tx.Put(key, putData) if err != nil { @@ -124,7 +124,7 @@ func TestLevelDBTransactionSanity(t *testing.T) { // Case 2. Write directly to the DB and then read from a tx // Put something into the db - key = []byte("key2") + key = database.MakeBucket().Key([]byte("key2")) putData = []byte("Goodbye world!") err = ldb.Put(key, putData) if err != nil { diff --git a/database/ffldb/ldb/transaction.go b/database/ffldb/ldb/transaction.go index edaa41e5b..a2df308ae 100644 --- a/database/ffldb/ldb/transaction.go +++ b/database/ffldb/ldb/transaction.go @@ -1,7 +1,6 @@ package ldb import ( - "encoding/hex" "github.com/kaspanet/kaspad/database" "github.com/pkg/errors" "github.com/syndtr/goleveldb/leveldb" @@ -82,27 +81,27 @@ func (tx *LevelDBTransaction) RollbackUnlessClosed() error { // Put sets the value for the given key. It overwrites // any previous value for that key. -func (tx *LevelDBTransaction) Put(key []byte, value []byte) error { +func (tx *LevelDBTransaction) Put(key *database.Key, value []byte) error { if tx.isClosed { return errors.New("cannot put into a closed transaction") } - tx.batch.Put(key, value) + tx.batch.Put(key.Bytes(), value) return nil } // Get gets the value for the given key. It returns // ErrNotFound if the given key does not exist. -func (tx *LevelDBTransaction) Get(key []byte) ([]byte, error) { +func (tx *LevelDBTransaction) Get(key *database.Key) ([]byte, error) { if tx.isClosed { return nil, errors.New("cannot get from a closed transaction") } - data, err := tx.snapshot.Get(key, nil) + data, err := tx.snapshot.Get(key.Bytes(), nil) if err != nil { if errors.Is(err, leveldb.ErrNotFound) { return nil, errors.Wrapf(database.ErrNotFound, - "key %s not found", hex.EncodeToString(key)) + "key %s not found", key) } return nil, errors.WithStack(err) } @@ -111,30 +110,30 @@ func (tx *LevelDBTransaction) Get(key []byte) ([]byte, error) { // Has returns true if the database does contains the // given key. -func (tx *LevelDBTransaction) Has(key []byte) (bool, error) { +func (tx *LevelDBTransaction) Has(key *database.Key) (bool, error) { if tx.isClosed { return false, errors.New("cannot has from a closed transaction") } - return tx.snapshot.Has(key, nil) + return tx.snapshot.Has(key.Bytes(), nil) } // Delete deletes the value for the given key. Will not // return an error if the key doesn't exist. -func (tx *LevelDBTransaction) Delete(key []byte) error { +func (tx *LevelDBTransaction) Delete(key *database.Key) error { if tx.isClosed { return errors.New("cannot delete from a closed transaction") } - tx.batch.Delete(key) + tx.batch.Delete(key.Bytes()) return nil } // Cursor begins a new cursor over the given bucket. -func (tx *LevelDBTransaction) Cursor(bucket []byte) (*LevelDBCursor, error) { +func (tx *LevelDBTransaction) Cursor(bucket *database.Bucket) (*LevelDBCursor, error) { if tx.isClosed { return nil, errors.New("cannot open a cursor from a closed transaction") } - return tx.db.Cursor(bucket), nil + return tx.db.Cursor(bucket.Path()), nil } diff --git a/database/ffldb/transaction.go b/database/ffldb/transaction.go index ca63120e7..d30ab7b2f 100644 --- a/database/ffldb/transaction.go +++ b/database/ffldb/transaction.go @@ -20,28 +20,28 @@ type transaction struct { // Put sets the value for the given key. It overwrites // any previous value for that key. // This method is part of the DataAccessor interface. -func (tx *transaction) Put(key []byte, value []byte) error { +func (tx *transaction) Put(key *database.Key, value []byte) error { return tx.ldbTx.Put(key, value) } // Get gets the value for the given key. It returns // ErrNotFound if the given key does not exist. // This method is part of the DataAccessor interface. -func (tx *transaction) Get(key []byte) ([]byte, error) { +func (tx *transaction) Get(key *database.Key) ([]byte, error) { return tx.ldbTx.Get(key) } // Has returns true if the database does contains the // given key. // This method is part of the DataAccessor interface. -func (tx *transaction) Has(key []byte) (bool, error) { +func (tx *transaction) Has(key *database.Key) (bool, error) { return tx.ldbTx.Has(key) } // Delete deletes the value for the given key. Will not // return an error if the key doesn't exist. // This method is part of the DataAccessor interface. -func (tx *transaction) Delete(key []byte) error { +func (tx *transaction) Delete(key *database.Key) error { return tx.ldbTx.Delete(key) } @@ -66,7 +66,7 @@ func (tx *transaction) RetrieveFromStore(storeName string, location []byte) ([]b // Cursor begins a new cursor over the given bucket. // This method is part of the DataAccessor interface. -func (tx *transaction) Cursor(bucket []byte) (database.Cursor, error) { +func (tx *transaction) Cursor(bucket *database.Bucket) (database.Cursor, error) { return tx.ldbTx.Cursor(bucket) } diff --git a/database/keys.go b/database/keys.go new file mode 100644 index 000000000..6b9aabdb8 --- /dev/null +++ b/database/keys.go @@ -0,0 +1,85 @@ +package database + +import ( + "bytes" + "encoding/hex" +) + +var bucketSeparator = []byte("/") + +// Key is a helper type meant to combine prefix +// and suffix into a single database key. +type Key struct { + bucket *Bucket + suffix []byte +} + +// Bytes returns the full key bytes that are consisted +// from the bucket path concatenated to the suffix. +func (k *Key) Bytes() []byte { + bucketPath := k.bucket.Path() + keyBytes := make([]byte, len(bucketPath)+len(k.suffix)) + copy(keyBytes, bucketPath) + copy(keyBytes[len(bucketPath):], k.suffix) + return keyBytes +} + +func (k *Key) String() string { + return hex.EncodeToString(k.Bytes()) +} + +// Bucket returns the key bucket. +func (k *Key) Bucket() *Bucket { + return k.bucket +} + +// Suffix returns the key suffix. +func (k *Key) Suffix() []byte { + return k.suffix +} + +// newKey returns a new key composed +// of the given bucket and suffix +func newKey(bucket *Bucket, suffix []byte) *Key { + return &Key{bucket: bucket, suffix: suffix} +} + +// Bucket is a helper type meant to combine buckets +// and sub-buckets that can be used to create database +// keys and prefix-based cursors. +type Bucket struct { + path [][]byte +} + +// MakeBucket creates a new Bucket using the given path +// of buckets. +func MakeBucket(path ...[]byte) *Bucket { + return &Bucket{path: path} +} + +// Bucket returns the sub-bucket of the current bucket +// defined by bucketBytes. +func (b *Bucket) Bucket(bucketBytes []byte) *Bucket { + newPath := make([][]byte, len(b.path)+1) + copy(newPath, b.path) + copy(newPath[len(b.path):], [][]byte{bucketBytes}) + + return MakeBucket(newPath...) +} + +// Key returns a key in the current bucket with the +// given suffix. +func (b *Bucket) Key(suffix []byte) *Key { + return newKey(b, suffix) +} + +// Path returns the full path of the current bucket. +func (b *Bucket) Path() []byte { + bucketPath := bytes.Join(b.path, bucketSeparator) + + bucketPathWithFinalSeparator := make([]byte, len(bucketPath)+len(bucketSeparator)) + copy(bucketPathWithFinalSeparator, bucketPath) + copy(bucketPathWithFinalSeparator[len(bucketPath):], bucketSeparator) + + return bucketPathWithFinalSeparator +} diff --git a/database/bucket_test.go b/database/keys_test.go similarity index 74% rename from database/bucket_test.go rename to database/keys_test.go index 8bc79ab84..4c0e8b360 100644 --- a/database/bucket_test.go +++ b/database/keys_test.go @@ -1,6 +1,7 @@ package database import ( + "bytes" "reflect" "testing" ) @@ -45,17 +46,26 @@ func TestBucketKey(t *testing.T) { tests := []struct { bucketByteSlices [][]byte key []byte - expectedKey []byte + expectedKeyBytes []byte + expectedKey *Key }{ { bucketByteSlices: [][]byte{[]byte("hello")}, key: []byte("test"), - expectedKey: []byte("hello/test"), + expectedKeyBytes: []byte("hello/test"), + expectedKey: &Key{ + bucket: MakeBucket([]byte("hello")), + suffix: []byte("test"), + }, }, { bucketByteSlices: [][]byte{[]byte("hello"), []byte("world")}, key: []byte("test"), - expectedKey: []byte("hello/world/test"), + expectedKeyBytes: []byte("hello/world/test"), + expectedKey: &Key{ + bucket: MakeBucket([]byte("hello"), []byte("world")), + suffix: []byte("test"), + }, }, } @@ -63,7 +73,11 @@ func TestBucketKey(t *testing.T) { resultKey := MakeBucket(test.bucketByteSlices...).Key(test.key) if !reflect.DeepEqual(resultKey, test.expectedKey) { t.Errorf("TestBucketKey: got wrong key. Want: %s, got: %s", - string(test.expectedKey), string(resultKey)) + test.expectedKeyBytes, resultKey) + } + if !bytes.Equal(resultKey.Bytes(), test.expectedKeyBytes) { + t.Errorf("TestBucketKey: got wrong key bytes. Want: %s, got: %s", + test.expectedKeyBytes, resultKey.Bytes()) } } } diff --git a/dbaccess/acceptanceindex.go b/dbaccess/acceptanceindex.go index bd57d6faf..a5dee53c3 100644 --- a/dbaccess/acceptanceindex.go +++ b/dbaccess/acceptanceindex.go @@ -10,7 +10,7 @@ var ( acceptanceIndexBucket = database.MakeBucket([]byte("acceptance-index")) ) -func acceptanceIndexKey(hash *daghash.Hash) []byte { +func acceptanceIndexKey(hash *daghash.Hash) *database.Key { return acceptanceIndexBucket.Key(hash[:]) } diff --git a/dbaccess/block.go b/dbaccess/block.go index 440757718..04b38f4ef 100644 --- a/dbaccess/block.go +++ b/dbaccess/block.go @@ -14,7 +14,7 @@ var ( blockLocationsBucket = database.MakeBucket([]byte("block-locations")) ) -func blockLocationKey(hash *daghash.Hash) []byte { +func blockLocationKey(hash *daghash.Hash) *database.Key { return blockLocationsBucket.Key(hash[:]) } diff --git a/dbaccess/blockindex.go b/dbaccess/blockindex.go index 213b7c208..e32c1de47 100644 --- a/dbaccess/blockindex.go +++ b/dbaccess/blockindex.go @@ -30,7 +30,7 @@ func BlockIndexCursor(context Context) (database.Cursor, error) { return nil, err } - return accessor.Cursor(blockIndexBucket.Path()) + return accessor.Cursor(blockIndexBucket) } // BlockIndexCursorFrom opens a cursor over blocks-index blocks diff --git a/dbaccess/common.go b/dbaccess/common.go index 25a8084cc..25c0027dd 100644 --- a/dbaccess/common.go +++ b/dbaccess/common.go @@ -11,8 +11,8 @@ func clearBucket(dbTx *TxContext, bucket *database.Bucket) error { // Collect all of the keys before deleting them. We do this // as to not modify the cursor while we're still iterating // over it. - keys := make([][]byte, 0) - cursor, err := accessor.Cursor(bucket.Path()) + keys := make([]*database.Key, 0) + cursor, err := accessor.Cursor(bucket) if err != nil { return err } diff --git a/dbaccess/dagstate.go b/dbaccess/dagstate.go index b1e50e166..932a9848e 100644 --- a/dbaccess/dagstate.go +++ b/dbaccess/dagstate.go @@ -1,7 +1,9 @@ package dbaccess +import "github.com/kaspanet/kaspad/database" + var ( - dagStateKey = []byte("dag-state") + dagStateKey = database.MakeBucket().Key([]byte("dag-state")) ) // StoreDAGState stores the DAG state in the database. diff --git a/dbaccess/fee_data.go b/dbaccess/fee_data.go index 491eab05e..7fe854ef3 100644 --- a/dbaccess/fee_data.go +++ b/dbaccess/fee_data.go @@ -8,7 +8,7 @@ import ( var feeBucket = database.MakeBucket([]byte("fees")) -func feeDataKey(hash *daghash.Hash) []byte { +func feeDataKey(hash *daghash.Hash) *database.Key { return feeBucket.Key(hash[:]) } diff --git a/dbaccess/multiset.go b/dbaccess/multiset.go index c1b74e552..f26446a80 100644 --- a/dbaccess/multiset.go +++ b/dbaccess/multiset.go @@ -7,7 +7,7 @@ import ( var multisetBucket = database.MakeBucket([]byte("multiset")) -func multisetKey(hash *daghash.Hash) []byte { +func multisetKey(hash *daghash.Hash) *database.Key { return multisetBucket.Key(hash[:]) } @@ -19,7 +19,7 @@ func MultisetCursor(context Context) (database.Cursor, error) { return nil, err } - return accessor.Cursor(multisetBucket.Path()) + return accessor.Cursor(multisetBucket) } // StoreMultiset stores the multiset of a block by its hash. diff --git a/dbaccess/reachability.go b/dbaccess/reachability.go index aed017d80..68401448e 100644 --- a/dbaccess/reachability.go +++ b/dbaccess/reachability.go @@ -7,7 +7,7 @@ import ( var reachabilityDataBucket = database.MakeBucket([]byte("reachability")) -func reachabilityKey(hash *daghash.Hash) []byte { +func reachabilityKey(hash *daghash.Hash) *database.Key { return reachabilityDataBucket.Key(hash[:]) } @@ -19,7 +19,7 @@ func ReachabilityDataCursor(context Context) (database.Cursor, error) { return nil, err } - return accessor.Cursor(reachabilityDataBucket.Path()) + return accessor.Cursor(reachabilityDataBucket) } // StoreReachabilityData stores the reachability data of a block by its hash. diff --git a/dbaccess/subnetwork.go b/dbaccess/subnetwork.go index 1d808f338..6b6c739df 100644 --- a/dbaccess/subnetwork.go +++ b/dbaccess/subnetwork.go @@ -7,7 +7,7 @@ import ( var subnetworkBucket = database.MakeBucket([]byte("subnetworks")) -func subnetworkKey(subnetworkID *subnetworkid.SubnetworkID) []byte { +func subnetworkKey(subnetworkID *subnetworkid.SubnetworkID) *database.Key { return subnetworkBucket.Key(subnetworkID[:]) } diff --git a/dbaccess/utxo.go b/dbaccess/utxo.go index 83050a5e8..555b79dbb 100644 --- a/dbaccess/utxo.go +++ b/dbaccess/utxo.go @@ -8,7 +8,7 @@ var ( utxoBucket = database.MakeBucket([]byte("utxo")) ) -func utxoKey(outpointKey []byte) []byte { +func utxoKey(outpointKey []byte) *database.Key { return utxoBucket.Key(outpointKey) } @@ -44,5 +44,5 @@ func UTXOSetCursor(context Context) (database.Cursor, error) { return nil, err } - return accessor.Cursor(utxoBucket.Path()) + return accessor.Cursor(utxoBucket) } diff --git a/dbaccess/utxodiff.go b/dbaccess/utxodiff.go index fc3c9ece3..be4f64c86 100644 --- a/dbaccess/utxodiff.go +++ b/dbaccess/utxodiff.go @@ -8,7 +8,7 @@ import ( var utxoDiffsBucket = database.MakeBucket([]byte("utxo-diffs")) -func utxoDiffKey(hash *daghash.Hash) []byte { +func utxoDiffKey(hash *daghash.Hash) *database.Key { return utxoDiffsBucket.Key(hash[:]) } From fe91b4c8780100907a367bf3de594bb4689f1857 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Sun, 12 Apr 2020 09:25:40 +0300 Subject: [PATCH 07/77] [NOD-914] Make LevelDB.Cursor receive bucket instead of prefix (#696) --- database/ffldb/ffldb.go | 2 +- database/ffldb/initialize.go | 3 +-- database/ffldb/ldb/cursor.go | 12 ++++++------ database/ffldb/ldb/transaction.go | 2 +- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/database/ffldb/ffldb.go b/database/ffldb/ffldb.go index 634ac35db..31b4a20f8 100644 --- a/database/ffldb/ffldb.go +++ b/database/ffldb/ffldb.go @@ -156,7 +156,7 @@ func (db *ffldb) RetrieveFromStore(storeName string, location []byte) ([]byte, e // Cursor begins a new cursor over the given bucket. // This method is part of the DataAccessor interface. func (db *ffldb) Cursor(bucket *database.Bucket) (database.Cursor, error) { - ldbCursor := db.levelDB.Cursor(bucket.Path()) + ldbCursor := db.levelDB.Cursor(bucket) return ldbCursor, nil } diff --git a/database/ffldb/initialize.go b/database/ffldb/initialize.go index 599e5cda5..e403ca3e8 100644 --- a/database/ffldb/initialize.go +++ b/database/ffldb/initialize.go @@ -18,8 +18,7 @@ func (db *ffldb) initialize() error { } func (db *ffldb) flatFiles() (map[string][]byte, error) { - flatFilesBucketPath := flatFilesBucket.Path() - flatFilesCursor := db.levelDB.Cursor(flatFilesBucketPath) + flatFilesCursor := db.levelDB.Cursor(flatFilesBucket) defer func() { err := flatFilesCursor.Close() if err != nil { diff --git a/database/ffldb/ldb/cursor.go b/database/ffldb/ldb/cursor.go index bc9a1d73c..e2b17ed79 100644 --- a/database/ffldb/ldb/cursor.go +++ b/database/ffldb/ldb/cursor.go @@ -11,17 +11,17 @@ import ( // LevelDBCursor is a thin wrapper around native leveldb iterators. type LevelDBCursor struct { ldbIterator iterator.Iterator - prefix []byte + bucket *database.Bucket isClosed bool } // Cursor begins a new cursor over the given prefix. -func (db *LevelDB) Cursor(prefix []byte) *LevelDBCursor { - ldbIterator := db.ldb.NewIterator(util.BytesPrefix(prefix), nil) +func (db *LevelDB) Cursor(bucket *database.Bucket) *LevelDBCursor { + ldbIterator := db.ldb.NewIterator(util.BytesPrefix(bucket.Path()), nil) return &LevelDBCursor{ ldbIterator: ldbIterator, - prefix: prefix, + bucket: bucket, isClosed: false, } } @@ -84,8 +84,8 @@ func (c *LevelDBCursor) Key() (*database.Key, error) { return nil, errors.Wrapf(database.ErrNotFound, "cannot get the "+ "key of a done cursor") } - suffix := bytes.TrimPrefix(fullKeyPath, c.prefix) - return database.MakeBucket(c.prefix).Key(suffix), nil + suffix := bytes.TrimPrefix(fullKeyPath, c.bucket.Path()) + return c.bucket.Key(suffix), nil } // Value returns the value of the current key/value pair, or ErrNotFound if done. diff --git a/database/ffldb/ldb/transaction.go b/database/ffldb/ldb/transaction.go index a2df308ae..41c3866da 100644 --- a/database/ffldb/ldb/transaction.go +++ b/database/ffldb/ldb/transaction.go @@ -135,5 +135,5 @@ func (tx *LevelDBTransaction) Cursor(bucket *database.Bucket) (*LevelDBCursor, e return nil, errors.New("cannot open a cursor from a closed transaction") } - return tx.db.Cursor(bucket.Path()), nil + return tx.db.Cursor(bucket), nil } From d015286f6536ef080439f32966e2bb9b772acd32 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Mon, 13 Apr 2020 12:28:59 +0300 Subject: [PATCH 08/77] [NOD-909] Add tests for double spends (#694) * [NOD-909] Add tests for double spends * [NOD-909] Add prepareAndProcessBlock that gets parent hashes and transactions as argument * [NOD-909] Use PrepareAndProcessBlockForTest where possible * [NOD-909] Use more meaningful names * [NOD-909] Change a comment * [NOD-909] Fix comment * [NOD-909] Fix comment --- blockdag/common_test.go | 20 +----- blockdag/dag_test.go | 145 +++++++++++++++++++++++++++++++++++--- blockdag/ghostdag_test.go | 10 +-- blockdag/test_utils.go | 23 ++++++ blockdag/validate_test.go | 6 +- mempool/mempool_test.go | 51 ++++++++++++++ 6 files changed, 221 insertions(+), 34 deletions(-) diff --git a/blockdag/common_test.go b/blockdag/common_test.go index 8a245a748..46155e3ff 100644 --- a/blockdag/common_test.go +++ b/blockdag/common_test.go @@ -172,28 +172,12 @@ func checkRuleError(gotErr, wantErr error) error { return nil } -func prepareAndProcessBlock(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock { +func prepareAndProcessBlockByParentMsgBlocks(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock { parentHashes := make([]*daghash.Hash, len(parents)) for i, parent := range parents { parentHashes[i] = parent.BlockHash() } - daghash.Sort(parentHashes) - block, err := PrepareBlockForTest(dag, parentHashes, nil) - if err != nil { - t.Fatalf("error in PrepareBlockForTest: %s", err) - } - utilBlock := util.NewBlock(block) - isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck) - if err != nil { - t.Fatalf("unexpected error in ProcessBlock: %s", err) - } - if isDelayed { - t.Fatalf("block is too far in the future") - } - if isOrphan { - t.Fatalf("block was unexpectedly orphan") - } - return block + return PrepareAndProcessBlockForTest(t, dag, parentHashes, nil) } func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNode { diff --git a/blockdag/dag_test.go b/blockdag/dag_test.go index 450e12873..4a6b9dea6 100644 --- a/blockdag/dag_test.go +++ b/blockdag/dag_test.go @@ -688,7 +688,7 @@ func TestConfirmations(t *testing.T) { chainBlocks := make([]*wire.MsgBlock, 5) chainBlocks[0] = dag.dagParams.GenesisBlock for i := uint32(1); i < 5; i++ { - chainBlocks[i] = prepareAndProcessBlock(t, dag, chainBlocks[i-1]) + chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1]) } // Make sure that each one of the chain blocks has the expected confirmations number @@ -707,8 +707,8 @@ func TestConfirmations(t *testing.T) { branchingBlocks := make([]*wire.MsgBlock, 2) // Add two branching blocks - branchingBlocks[0] = prepareAndProcessBlock(t, dag, chainBlocks[1]) - branchingBlocks[1] = prepareAndProcessBlock(t, dag, branchingBlocks[0]) + branchingBlocks[0] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[1]) + branchingBlocks[1] = prepareAndProcessBlockByParentMsgBlocks(t, dag, branchingBlocks[0]) // Check that the genesis has a confirmations number == len(chainBlocks) genesisConfirmations, err = dag.blockConfirmations(dag.genesis) @@ -738,7 +738,7 @@ func TestConfirmations(t *testing.T) { // Generate 100 blocks to force the "main" chain to become red branchingChainTip := branchingBlocks[1] for i := uint32(0); i < 100; i++ { - nextBranchingChainTip := prepareAndProcessBlock(t, dag, branchingChainTip) + nextBranchingChainTip := prepareAndProcessBlockByParentMsgBlocks(t, dag, branchingChainTip) branchingChainTip = nextBranchingChainTip } @@ -797,7 +797,7 @@ func TestAcceptingBlock(t *testing.T) { chainBlocks := make([]*wire.MsgBlock, numChainBlocks) chainBlocks[0] = dag.dagParams.GenesisBlock for i := uint32(1); i <= numChainBlocks-1; i++ { - chainBlocks[i] = prepareAndProcessBlock(t, dag, chainBlocks[i-1]) + chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1]) } // Make sure that each chain block (including the genesis) is accepted by its child @@ -825,7 +825,7 @@ func TestAcceptingBlock(t *testing.T) { // Generate a chain tip that will be in the anticone of the selected tip and // in dag.virtual.blues. - branchingChainTip := prepareAndProcessBlock(t, dag, chainBlocks[len(chainBlocks)-3]) + branchingChainTip := prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[len(chainBlocks)-3]) // Make sure that branchingChainTip is not in the selected parent chain isBranchingChainTipInSelectedParentChain, err := dag.IsInSelectedParentChain(branchingChainTip.BlockHash()) @@ -863,7 +863,7 @@ func TestAcceptingBlock(t *testing.T) { intersectionBlock := chainBlocks[1] sideChainTip := intersectionBlock for i := 0; i < len(chainBlocks)-3; i++ { - sideChainTip = prepareAndProcessBlock(t, dag, sideChainTip) + sideChainTip = prepareAndProcessBlockByParentMsgBlocks(t, dag, sideChainTip) } // Make sure that the accepting block of the parent of the branching block didn't change @@ -879,7 +879,7 @@ func TestAcceptingBlock(t *testing.T) { // Make sure that a block that is found in the red set of the selected tip // doesn't have an accepting block - prepareAndProcessBlock(t, dag, sideChainTip, chainBlocks[len(chainBlocks)-1]) + prepareAndProcessBlockByParentMsgBlocks(t, dag, sideChainTip, chainBlocks[len(chainBlocks)-1]) sideChainTipAcceptingBlock, err := acceptingBlockByMsgBlock(sideChainTip) if err != nil { @@ -1117,3 +1117,132 @@ func TestIsDAGCurrentMaxDiff(t *testing.T) { } } } + +func TestDoubleSpends(t *testing.T) { + params := dagconfig.SimnetParams + params.BlockCoinbaseMaturity = 0 + // Create a new database and dag instance to run tests against. + dag, teardownFunc, err := DAGSetup("TestDoubleSpends", true, Config{ + DAGParams: ¶ms, + }) + if err != nil { + t.Fatalf("Failed to setup dag instance: %v", err) + } + defer teardownFunc() + + fundingBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{params.GenesisHash}, nil) + cbTx := fundingBlock.Transactions[0] + + signatureScript, err := txscript.PayToScriptHashSignatureScript(OpTrueScript, nil) + if err != nil { + t.Fatalf("Failed to build signature script: %s", err) + } + txIn := &wire.TxIn{ + PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0}, + SignatureScript: signatureScript, + Sequence: wire.MaxTxInSequenceNum, + } + txOut := &wire.TxOut{ + ScriptPubKey: OpTrueScript, + Value: uint64(1), + } + tx1 := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}) + + doubleSpendTxOut := &wire.TxOut{ + ScriptPubKey: OpTrueScript, + Value: uint64(2), + } + doubleSpendTx1 := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{doubleSpendTxOut}) + + blockWithTx1 := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*wire.MsgTx{tx1}) + + // Check that a block will be rejected if it has a transaction that already exists in its past. + anotherBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil) + if err != nil { + t.Fatalf("PrepareBlockForTest: %v", err) + } + + // Manually add tx1. + anotherBlockWithTx1.Transactions = append(anotherBlockWithTx1.Transactions, tx1) + anotherBlockWithTx1UtilTxs := make([]*util.Tx, len(anotherBlockWithTx1.Transactions)) + for i, tx := range anotherBlockWithTx1.Transactions { + anotherBlockWithTx1UtilTxs[i] = util.NewTx(tx) + } + anotherBlockWithTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(anotherBlockWithTx1UtilTxs).Root() + + isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(anotherBlockWithTx1), BFNoPoWCheck) + if err == nil { + t.Errorf("ProcessBlock expected an error") + } else { + var ruleErr RuleError + if ok := errors.As(err, &ruleErr); ok { + if ruleErr.ErrorCode != ErrOverwriteTx { + t.Errorf("ProcessBlock expected an %v error code but got %v", ErrOverwriteTx, ruleErr.ErrorCode) + } + } else { + t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err) + } + } + if isDelayed { + t.Fatalf("ProcessBlock: anotherBlockWithTx1 " + + "is too far in the future") + } + if isOrphan { + t.Fatalf("ProcessBlock: anotherBlockWithTx1 got unexpectedly orphaned") + } + + // Check that a block will be rejected if it has a transaction that double spends + // a transaction from its past. + blockWithDoubleSpendForTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil) + if err != nil { + t.Fatalf("PrepareBlockForTest: %v", err) + } + + // Manually add a transaction that double spends the block past. + blockWithDoubleSpendForTx1.Transactions = append(blockWithDoubleSpendForTx1.Transactions, doubleSpendTx1) + blockWithDoubleSpendForTx1UtilTxs := make([]*util.Tx, len(blockWithDoubleSpendForTx1.Transactions)) + for i, tx := range blockWithDoubleSpendForTx1.Transactions { + blockWithDoubleSpendForTx1UtilTxs[i] = util.NewTx(tx) + } + blockWithDoubleSpendForTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendForTx1UtilTxs).Root() + + isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(blockWithDoubleSpendForTx1), BFNoPoWCheck) + if err == nil { + t.Errorf("ProcessBlock expected an error") + } else { + var ruleErr RuleError + if ok := errors.As(err, &ruleErr); ok { + if ruleErr.ErrorCode != ErrMissingTxOut { + t.Errorf("ProcessBlock expected an %v error code but got %v", ErrMissingTxOut, ruleErr.ErrorCode) + } + } else { + t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err) + } + } + if isDelayed { + t.Fatalf("ProcessBlock: blockWithDoubleSpendForTx1 " + + "is too far in the future") + } + if isOrphan { + t.Fatalf("ProcessBlock: blockWithDoubleSpendForTx1 got unexpectedly orphaned") + } + + blockInAnticoneOfBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*wire.MsgTx{doubleSpendTx1}) + if err != nil { + t.Fatalf("PrepareBlockForTest: %v", err) + } + + // Check that a block will not get rejected if it has a transaction that double spends + // a transaction from its anticone. + isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(blockInAnticoneOfBlockWithTx1), BFNoPoWCheck) + if err != nil { + t.Fatalf("ProcessBlock: %v", err) + } + if isDelayed { + t.Fatalf("ProcessBlock: blockInAnticoneOfBlockWithTx1 " + + "is too far in the future") + } + if isOrphan { + t.Fatalf("ProcessBlock: blockInAnticoneOfBlockWithTx1 got unexpectedly orphaned") + } +} diff --git a/blockdag/ghostdag_test.go b/blockdag/ghostdag_test.go index 80b7e24b7..920bff7fd 100644 --- a/blockdag/ghostdag_test.go +++ b/blockdag/ghostdag_test.go @@ -293,14 +293,14 @@ func TestBlueAnticoneSizeErrors(t *testing.T) { // Prepare a block chain with size K beginning with the genesis block currentBlockA := dag.dagParams.GenesisBlock for i := dagconfig.KType(0); i < dag.dagParams.K; i++ { - newBlock := prepareAndProcessBlock(t, dag, currentBlockA) + newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockA) currentBlockA = newBlock } // Prepare another block chain with size K beginning with the genesis block currentBlockB := dag.dagParams.GenesisBlock for i := dagconfig.KType(0); i < dag.dagParams.K; i++ { - newBlock := prepareAndProcessBlock(t, dag, currentBlockB) + newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockB) currentBlockB = newBlock } @@ -332,11 +332,11 @@ func TestGHOSTDAGErrors(t *testing.T) { defer teardownFunc() // Add two child blocks to the genesis - block1 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock) - block2 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock) + block1 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock) + block2 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock) // Add a child block to the previous two blocks - block3 := prepareAndProcessBlock(t, dag, block1, block2) + block3 := prepareAndProcessBlockByParentMsgBlocks(t, dag, block1, block2) // Clear the reachability store dag.reachabilityStore.loaded = map[daghash.Hash]*reachabilityData{} diff --git a/blockdag/test_utils.go b/blockdag/test_utils.go index e40952a0e..012194d97 100644 --- a/blockdag/test_utils.go +++ b/blockdag/test_utils.go @@ -15,6 +15,7 @@ import ( "sort" "strings" "sync" + "testing" "github.com/kaspanet/kaspad/util/subnetworkid" @@ -279,6 +280,28 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio return block, nil } +// PrepareAndProcessBlockForTest prepares a block that points to the given parent +// hashes and process it. +func PrepareAndProcessBlockForTest(t *testing.T, dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*wire.MsgTx) *wire.MsgBlock { + daghash.Sort(parentHashes) + block, err := PrepareBlockForTest(dag, parentHashes, transactions) + if err != nil { + t.Fatalf("error in PrepareBlockForTest: %s", err) + } + utilBlock := util.NewBlock(block) + isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck) + if err != nil { + t.Fatalf("unexpected error in ProcessBlock: %s", err) + } + if isDelayed { + t.Fatalf("block is too far in the future") + } + if isOrphan { + t.Fatalf("block was unexpectedly orphan") + } + return block +} + // generateDeterministicExtraNonceForTest returns a unique deterministic extra nonce for coinbase data, in order to create unique coinbase transactions. func generateDeterministicExtraNonceForTest() uint64 { extraNonceForTest++ diff --git a/blockdag/validate_test.go b/blockdag/validate_test.go index 4f65d3651..064387826 100644 --- a/blockdag/validate_test.go +++ b/blockdag/validate_test.go @@ -570,9 +570,9 @@ func TestValidateParents(t *testing.T) { } defer teardownFunc() - a := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock) - b := prepareAndProcessBlock(t, dag, a) - c := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock) + a := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock) + b := prepareAndProcessBlockByParentMsgBlocks(t, dag, a) + c := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock) aNode := nodeByMsgBlock(t, dag, a) bNode := nodeByMsgBlock(t, dag, b) diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index 58e799608..1e8ae7337 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -795,6 +795,57 @@ func TestDoubleSpends(t *testing.T) { testPoolMembership(tc, tx2, false, true, false) } +func TestDoubleSpendsFromDAG(t *testing.T) { + tc, spendableOuts, teardownFunc, err := newPoolHarness(t, &dagconfig.SimnetParams, 2, "TestDoubleSpendsFromDAG") + if err != nil { + t.Fatalf("unable to create test pool: %v", err) + } + defer teardownFunc() + harness := tc.harness + + tx, err := harness.createTx(spendableOuts[0], uint64(txRelayFeeForTest), 1) + if err != nil { + t.Fatalf("unable to create transaction: %v", err) + } + + dag := harness.txPool.cfg.DAG + blockdag.PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), []*wire.MsgTx{tx.MsgTx()}) + + // Check that a transaction that double spends the DAG UTXO set is orphaned. + doubleSpendTx, err := harness.createTx(spendableOuts[0], uint64(txRelayFeeForTest), 2) + if err != nil { + t.Fatalf("unable to create transaction: %v", err) + } + + _, err = harness.txPool.ProcessTransaction(doubleSpendTx, true, 0) + if err != nil { + t.Fatalf("ProcessTransaction: %s", err) + } + testPoolMembership(tc, doubleSpendTx, true, false, false) + + // If you send a transaction that some of its outputs exist in the DAG UTXO + // set, it won't be added to the orphan pool, and will completely get rejected + // from the mempool. + // This happens because transactions with the same ID as old transactions + // are not allowed as long as some of the old transaction outputs exist + // in the UTXO. + _, err = harness.txPool.ProcessTransaction(tx, true, 0) + var ruleErr RuleError + if ok := errors.As(err, &ruleErr); ok { + var txRuleErr TxRuleError + if ok := errors.As(ruleErr.Err, &txRuleErr); ok { + if txRuleErr.RejectCode != wire.RejectDuplicate { + t.Errorf("ProcessTransaction expected an %v reject code but got %v", wire.RejectDuplicate, txRuleErr.RejectCode) + } + } else { + t.Errorf("ProcessTransaction expected a ruleErr.Err to be a TxRuleError but got %v", err) + } + } else { + t.Errorf("ProcessTransaction expected a RuleError but got %v", err) + } + testPoolMembership(tc, tx, false, false, false) +} + //TestFetchTransaction checks that FetchTransaction //returns only transaction from the main pool and not from the orphan pool func TestFetchTransaction(t *testing.T) { From 291df8bfefcc1d411693839d3ffc53db9fce7ca0 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Mon, 13 Apr 2020 15:49:46 +0300 Subject: [PATCH 09/77] [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer (#700) * [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer * [NOD-858] SetShouldSendBlockLocator(false) on OnBlockLocator * [NOD-858] Rename shouldSendBlockLocator->wasBlockLocatorRequested * [NOD-858] Move panic to shouldReplaceSyncPeer --- netsync/manager.go | 46 ++++++++++++++++++++++------------ peer/peer.go | 17 +++++++++++++ server/p2p/on_block_locator.go | 1 + 3 files changed, 48 insertions(+), 16 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index bc1cefb20..d10e4e066 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -157,6 +157,7 @@ type SyncManager struct { msgChan chan interface{} wg sync.WaitGroup quit chan struct{} + syncPeerLock sync.Mutex // These fields should only be accessed from the messageHandler thread rejectedTxns map[daghash.TxID]struct{} @@ -170,6 +171,8 @@ type SyncManager struct { // download/sync the blockDAG from. When syncing is already running, it // simply returns. It also examines the candidates for any which are no longer // candidates and removes them as needed. +// +// This function MUST be called with the sync peer lock held. func (sm *SyncManager) startSync() { // Return now if we're already syncing. if sm.syncPeer != nil { @@ -189,6 +192,7 @@ func (sm *SyncManager) startSync() { // TODO(davec): Use a better algorithm to choose the sync peer. // For now, just pick the first available candidate. syncPeer = peer + break } // Start syncing from the sync peer if one was selected. @@ -294,8 +298,8 @@ func (sm *SyncManager) handleNewPeerMsg(peer *peerpkg.Peer) { } // Start syncing by choosing the best candidate if needed. - if isSyncCandidate && sm.syncPeer == nil { - sm.startSync() + if isSyncCandidate { + sm.restartSyncIfNeeded() } } @@ -337,7 +341,7 @@ func (sm *SyncManager) stopSyncFromPeer(peer *peerpkg.Peer) { // sync peer. if sm.syncPeer == peer { sm.syncPeer = nil - sm.startSync() + sm.restartSyncIfNeeded() } } @@ -427,24 +431,34 @@ func (sm *SyncManager) current() bool { // restartSyncIfNeeded finds a new sync candidate if we're not expecting any // blocks from the current one. func (sm *SyncManager) restartSyncIfNeeded() { - if sm.syncPeer != nil { - syncPeerState, exists := sm.peerStates[sm.syncPeer] - if exists { - isWaitingForBlocks := func() bool { - syncPeerState.requestQueueMtx.Lock() - defer syncPeerState.requestQueueMtx.Unlock() - return len(syncPeerState.requestedBlocks) != 0 || len(syncPeerState.requestQueues[wire.InvTypeSyncBlock].queue) != 0 - }() - if isWaitingForBlocks { - return - } - } + sm.syncPeerLock.Lock() + defer sm.syncPeerLock.Unlock() + + if !sm.shouldReplaceSyncPeer() { + return } sm.syncPeer = nil sm.startSync() } +func (sm *SyncManager) shouldReplaceSyncPeer() bool { + if sm.syncPeer == nil { + return true + } + + syncPeerState, exists := sm.peerStates[sm.syncPeer] + if !exists { + panic(errors.Errorf("no peer state for sync peer %s", sm.syncPeer)) + } + + syncPeerState.requestQueueMtx.Lock() + defer syncPeerState.requestQueueMtx.Unlock() + return len(syncPeerState.requestedBlocks) == 0 && + len(syncPeerState.requestQueues[wire.InvTypeSyncBlock].queue) == 0 && + !sm.syncPeer.WasBlockLocatorRequested() +} + // handleBlockMsg handles block messages from all peers. func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { peer := bmsg.peer @@ -905,7 +919,7 @@ func (sm *SyncManager) handleSelectedTipMsg(msg *selectedTipMsg) { return } peer.SetSelectedTipHash(selectedTipHash) - sm.startSync() + sm.restartSyncIfNeeded() } // messageHandler is the main handler for the sync manager. It must be run as a diff --git a/peer/peer.go b/peer/peer.go index 6074aa794..a9559d684 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -414,6 +414,8 @@ type Peer struct { prevGetBlockInvsLow *daghash.Hash prevGetBlockInvsHigh *daghash.Hash + wasBlockLocatorRequested bool + // These fields keep track of statistics for the peer and are protected // by the statsMtx mutex. statsMtx sync.RWMutex @@ -435,6 +437,20 @@ type Peer struct { quit chan struct{} } +// WasBlockLocatorRequested returns whether the node +// is expecting to get a block locator from this +// peer. +func (p *Peer) WasBlockLocatorRequested() bool { + return p.wasBlockLocatorRequested +} + +// SetWasBlockLocatorRequested sets whether the node +// is expecting to get a block locator from this +// peer. +func (p *Peer) SetWasBlockLocatorRequested(wasBlockLocatorRequested bool) { + p.wasBlockLocatorRequested = wasBlockLocatorRequested +} + // String returns the peer's address and directionality as a human-readable // string. // @@ -775,6 +791,7 @@ func (p *Peer) PushAddrMsg(addresses []*wire.NetAddress, subnetworkID *subnetwor // // This function is safe for concurrent access. func (p *Peer) PushGetBlockLocatorMsg(highHash, lowHash *daghash.Hash) { + p.SetWasBlockLocatorRequested(true) msg := wire.NewMsgGetBlockLocator(highHash, lowHash) p.QueueMessage(msg, nil) } diff --git a/server/p2p/on_block_locator.go b/server/p2p/on_block_locator.go index d043ffbb8..6934b16e9 100644 --- a/server/p2p/on_block_locator.go +++ b/server/p2p/on_block_locator.go @@ -8,6 +8,7 @@ import ( // OnBlockLocator is invoked when a peer receives a locator kaspa // message. func (sp *Peer) OnBlockLocator(_ *peer.Peer, msg *wire.MsgBlockLocator) { + sp.SetWasBlockLocatorRequested(false) // Find the highest known shared block between the peers, and asks // the block and its future from the peer. If the block is not // found, create a lower resolution block locator and send it to From 42c53ec3e281fdf05fcd61dec9f9ba7083c331b9 Mon Sep 17 00:00:00 2001 From: Svarog Date: Thu, 16 Apr 2020 15:03:41 +0300 Subject: [PATCH 10/77] [NOD-869] Add a print after os.Exit(1) to see if it is ever called (#701) --- util/panics/panics.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/util/panics/panics.go b/util/panics/panics.go index 36532bd27..b32b8fea3 100644 --- a/util/panics/panics.go +++ b/util/panics/panics.go @@ -70,5 +70,7 @@ func exit(log *logs.Logger, reason string, currentThreadStackTrace []byte, gorou fmt.Fprintln(os.Stderr, "Couldn't exit gracefully.") case <-exitHandlerDone: } + fmt.Print("Exiting...") os.Exit(1) + fmt.Print("After os.Exit(1)") } From 5fe9dae55785b31ed701dbb897e05bcc8a3c63e6 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Mon, 20 Apr 2020 12:14:55 +0300 Subject: [PATCH 11/77] [NOD-863] Write interface tests for the new database (#697) * [NOD-863] Write TestCursorNext. * [NOD-863] Write TestCursorFirst. * [NOD-863] Fix merge errors. * [NOD-863] Add TestCursorSeek. * [NOD-863] Add TestCursorCloseErrors. * [NOD-863] Add TestCursorCloseFirstAndNext. * [NOD-863] Add TestDataAccessorPut. * [NOD-863] Add TestDataAccessorGet. * [NOD-863] Add TestDataAccessorHas. * [NOD-863] Add TestDatabaseDelete. * [NOD-863] Add TestDatabaseAppendToStoreAndRetrieveFromStore. * [NOD-863] Add TestTransactionAppendToStoreAndRetrieveFromStore. * [NOD-863] Add TestTransactionDelete. * [NOD-863] Add TestTransactionHas. * [NOD-863] Add TestTransactionGet. * [NOD-863] Add TestTransactionPut. * [NOD-863] Move cursor tests to the bottom of interface_test.go. * [NOD-863] Move interface_test.go to a database_test package. * [NOD-863] Make each test in interface_test.go run for every database driver. Currently, only ffldb. * [NOD-863] Make each cursor test in interface_test.go run for every database driver. Currently, only ffldb. * [NOD-863] Split interface_test.go into separate files. * [NOD-863] Rename interface_test.go to common_test.go. * [NOD-863] Extract testForAllDatabaseTypes to a separate function. * [NOD-863] Reorganize how test data gets added to the database. * [NOD-863] Add explanations about testForAllDatabaseTypes. * [NOD-863] Add tests that make sure that database changes don't affect previously opened transactions. * [NOD-863] Extract databasePrepareFunc to a type alias. * [NOD-863] Fix comments. * [NOD-863] Add cursor exhaustion test to testCursorFirst. * [NOD-863] Add cursor Next clause to testCursorSeek. * [NOD-863] Add additional varification to testDatabasePut. * [NOD-863] Add an additional verification into to testTransactionGet. * [NOD-863] Add TestTransactionCommit. * [NOD-863] Add TestTransactionRollback. * [NOD-863] Add TestTransactionRollbackUnlessClosed. * [NOD-863] Remove equals sign from databasePrepareFunc declaration. --- database/common_test.go | 84 ++++++ database/cursor_test.go | 331 +++++++++++++++++++++ database/database_test.go | 207 +++++++++++++ database/transaction_test.go | 549 +++++++++++++++++++++++++++++++++++ 4 files changed, 1171 insertions(+) create mode 100644 database/common_test.go create mode 100644 database/cursor_test.go create mode 100644 database/database_test.go create mode 100644 database/transaction_test.go diff --git a/database/common_test.go b/database/common_test.go new file mode 100644 index 000000000..9890dd5bc --- /dev/null +++ b/database/common_test.go @@ -0,0 +1,84 @@ +package database_test + +import ( + "fmt" + "github.com/kaspanet/kaspad/database" + "github.com/kaspanet/kaspad/database/ffldb" + "io/ioutil" + "testing" +) + +type databasePrepareFunc func(t *testing.T, testName string) (db database.Database, name string, teardownFunc func()) + +// databasePrepareFuncs is a set of functions, in which each function +// prepares a separate database type for testing. +// See testForAllDatabaseTypes for further details. +var databasePrepareFuncs = []databasePrepareFunc{ + prepareFFLDBForTest, +} + +func prepareFFLDBForTest(t *testing.T, testName string) (db database.Database, name string, teardownFunc func()) { + // Create a temp db to run tests against + path, err := ioutil.TempDir("", testName) + if err != nil { + t.Fatalf("%s: TempDir unexpectedly "+ + "failed: %s", testName, err) + } + db, err = ffldb.Open(path) + if err != nil { + t.Fatalf("%s: Open unexpectedly "+ + "failed: %s", testName, err) + } + teardownFunc = func() { + err = db.Close() + if err != nil { + t.Fatalf("%s: Close unexpectedly "+ + "failed: %s", testName, err) + } + } + return db, "ffldb", teardownFunc +} + +// testForAllDatabaseTypes runs the given testFunc for every database +// type defined in databasePrepareFuncs. This is to make sure that +// all supported database types adhere to the assumptions defined in +// the interfaces in this package. +func testForAllDatabaseTypes(t *testing.T, testName string, + testFunc func(t *testing.T, db database.Database, testName string)) { + + for _, prepareDatabase := range databasePrepareFuncs { + func() { + db, dbType, teardownFunc := prepareDatabase(t, testName) + defer teardownFunc() + + testName := fmt.Sprintf("%s: %s", dbType, testName) + testFunc(t, db, testName) + }() + } +} + +type keyValuePair struct { + key *database.Key + value []byte +} + +func populateDatabaseForTest(t *testing.T, db database.Database, testName string) []keyValuePair { + // Prepare a list of key/value pairs + entries := make([]keyValuePair, 10) + for i := 0; i < 10; i++ { + key := database.MakeBucket().Key([]byte(fmt.Sprintf("key%d", i))) + value := []byte("value") + entries[i] = keyValuePair{key: key, value: value} + } + + // Put the pairs into the database + for _, entry := range entries { + err := db.Put(entry.key, entry.value) + if err != nil { + t.Fatalf("%s: Put unexpectedly "+ + "failed: %s", testName, err) + } + } + + return entries +} diff --git a/database/cursor_test.go b/database/cursor_test.go new file mode 100644 index 000000000..492425984 --- /dev/null +++ b/database/cursor_test.go @@ -0,0 +1,331 @@ +// All tests within this file should call testForAllDatabaseTypes +// over the actual test. This is to make sure that all supported +// database types adhere to the assumptions defined in the +// interfaces in this package. + +package database_test + +import ( + "bytes" + "github.com/kaspanet/kaspad/database" + "reflect" + "strings" + "testing" +) + +func prepareCursorForTest(t *testing.T, db database.Database, testName string) database.Cursor { + cursor, err := db.Cursor(database.MakeBucket()) + if err != nil { + t.Fatalf("%s: Cursor unexpectedly "+ + "failed: %s", testName, err) + } + + return cursor +} + +func TestCursorNext(t *testing.T) { + testForAllDatabaseTypes(t, "TestCursorNext", testCursorNext) +} + +func testCursorNext(t *testing.T, db database.Database, testName string) { + entries := populateDatabaseForTest(t, db, testName) + cursor := prepareCursorForTest(t, db, testName) + + // Make sure that all the entries exist in the cursor, in their + // correct order + for _, entry := range entries { + hasNext := cursor.Next() + if !hasNext { + t.Fatalf("%s: cursor unexpectedly "+ + "done", testName) + } + cursorKey, err := cursor.Key() + if err != nil { + t.Fatalf("%s: Key unexpectedly "+ + "failed: %s", testName, err) + } + if !reflect.DeepEqual(cursorKey, entry.key) { + t.Fatalf("%s: Cursor returned "+ + "wrong key. Want: %s, got: %s", testName, entry.key, cursorKey) + } + cursorValue, err := cursor.Value() + if err != nil { + t.Fatalf("%s: Value unexpectedly "+ + "failed: %s", testName, err) + } + if !bytes.Equal(cursorValue, entry.value) { + t.Fatalf("%s: Cursor returned "+ + "wrong value. Want: %s, got: %s", testName, entry.value, cursorValue) + } + } + + // The cursor should now be exhausted. Make sure Next now + // returns false + hasNext := cursor.Next() + if hasNext { + t.Fatalf("%s: cursor unexpectedly "+ + "not done", testName) + } + + // Rewind the cursor, close it, and call Next on it again. + // This time it should return false because it's closed. + cursor.First() + err := cursor.Close() + if err != nil { + t.Fatalf("%s: Close unexpectedly "+ + "failed: %s", testName, err) + } + hasNext = cursor.Next() + if hasNext { + t.Fatalf("%s: cursor unexpectedly "+ + "returned true after being closed", testName) + } +} + +func TestCursorFirst(t *testing.T) { + testForAllDatabaseTypes(t, "TestCursorFirst", testCursorFirst) +} + +func testCursorFirst(t *testing.T, db database.Database, testName string) { + entries := populateDatabaseForTest(t, db, testName) + cursor := prepareCursorForTest(t, db, testName) + + // Make sure that First returns true when the cursor is not empty + exists := cursor.First() + if !exists { + t.Fatalf("%s: Cursor unexpectedly "+ + "returned false", testName) + } + + // Make sure that the first key and value are as expected + firstEntryKey := entries[0].key + firstCursorKey, err := cursor.Key() + if err != nil { + t.Fatalf("%s: Key unexpectedly "+ + "failed: %s", testName, err) + } + if !reflect.DeepEqual(firstCursorKey, firstEntryKey) { + t.Fatalf("%s: Cursor returned "+ + "wrong key. Want: %s, got: %s", testName, firstEntryKey, firstCursorKey) + } + firstEntryValue := entries[0].value + firstCursorValue, err := cursor.Value() + if err != nil { + t.Fatalf("%s: Value unexpectedly "+ + "failed: %s", testName, err) + } + if !bytes.Equal(firstCursorValue, firstEntryValue) { + t.Fatalf("%s: Cursor returned "+ + "wrong value. Want: %s, got: %s", testName, firstEntryValue, firstCursorValue) + } + + // Exhaust the cursor + for cursor.Next() { + // Do nothing + } + + // Call first again and make sure it still returns true + exists = cursor.First() + if !exists { + t.Fatalf("%s: First unexpectedly "+ + "returned false", testName) + } + + // Call next and make sure it returns true as well + exists = cursor.Next() + if !exists { + t.Fatalf("%s: Next unexpectedly "+ + "returned false", testName) + } + + // Remove all the entries from the database + for _, entry := range entries { + err := db.Delete(entry.key) + if err != nil { + t.Fatalf("%s: Delete unexpectedly "+ + "failed: %s", testName, err) + } + } + + // Create a new cursor over an empty dataset + cursor = prepareCursorForTest(t, db, testName) + + // Make sure that First returns false when the cursor is empty + exists = cursor.First() + if exists { + t.Fatalf("%s: Cursor unexpectedly "+ + "returned true", testName) + } +} + +func TestCursorSeek(t *testing.T) { + testForAllDatabaseTypes(t, "TestCursorSeek", testCursorSeek) +} + +func testCursorSeek(t *testing.T, db database.Database, testName string) { + entries := populateDatabaseForTest(t, db, testName) + cursor := prepareCursorForTest(t, db, testName) + + // Seek to the fourth entry and make sure it exists + fourthEntry := entries[3] + err := cursor.Seek(fourthEntry.key) + if err != nil { + t.Fatalf("%s: Cursor unexpectedly "+ + "failed: %s", testName, err) + } + + // Make sure that the key and value are as expected + fourthEntryKey := entries[3].key + fourthCursorKey, err := cursor.Key() + if err != nil { + t.Fatalf("%s: Key unexpectedly "+ + "failed: %s", testName, err) + } + if !reflect.DeepEqual(fourthCursorKey, fourthEntryKey) { + t.Fatalf("%s: Cursor returned "+ + "wrong key. Want: %s, got: %s", testName, fourthEntryKey, fourthCursorKey) + } + fourthEntryValue := entries[3].value + fourthCursorValue, err := cursor.Value() + if err != nil { + t.Fatalf("%s: Value unexpectedly "+ + "failed: %s", testName, err) + } + if !bytes.Equal(fourthCursorValue, fourthEntryValue) { + t.Fatalf("%s: Cursor returned "+ + "wrong value. Want: %s, got: %s", testName, fourthEntryValue, fourthCursorValue) + } + + // Call Next and make sure that we are now on the fifth entry + exists := cursor.Next() + if !exists { + t.Fatalf("%s: Next unexpectedly "+ + "returned false", testName) + } + fifthEntryKey := entries[4].key + fifthCursorKey, err := cursor.Key() + if err != nil { + t.Fatalf("%s: Key unexpectedly "+ + "failed: %s", testName, err) + } + if !reflect.DeepEqual(fifthCursorKey, fifthEntryKey) { + t.Fatalf("%s: Cursor returned "+ + "wrong key. Want: %s, got: %s", testName, fifthEntryKey, fifthCursorKey) + } + fifthEntryValue := entries[4].value + fifthCursorValue, err := cursor.Value() + if err != nil { + t.Fatalf("%s: Value unexpectedly "+ + "failed: %s", testName, err) + } + if !bytes.Equal(fifthCursorValue, fifthEntryValue) { + t.Fatalf("%s: Cursor returned "+ + "wrong value. Want: %s, got: %s", testName, fifthEntryValue, fifthCursorValue) + } + + // Seek to a value that doesn't exist and make sure that + // the returned error is ErrNotFound + err = cursor.Seek(database.MakeBucket().Key([]byte("doesn't exist"))) + if err == nil { + t.Fatalf("%s: Seek unexpectedly "+ + "succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Seek returned "+ + "wrong error: %s", testName, err) + } +} + +func TestCursorCloseErrors(t *testing.T) { + testForAllDatabaseTypes(t, "TestCursorCloseErrors", testCursorCloseErrors) +} + +func testCursorCloseErrors(t *testing.T, db database.Database, testName string) { + populateDatabaseForTest(t, db, testName) + cursor := prepareCursorForTest(t, db, testName) + + // Close the cursor + err := cursor.Close() + if err != nil { + t.Fatalf("%s: Close "+ + "unexpectedly failed: %s", testName, err) + } + + tests := []struct { + name string + function func() error + }{ + { + name: "Seek", + function: func() error { + return cursor.Seek(database.MakeBucket().Key([]byte{})) + }, + }, + { + name: "Key", + function: func() error { + _, err := cursor.Key() + return err + }, + }, + { + name: "Value", + function: func() error { + _, err := cursor.Value() + return err + }, + }, + { + name: "Close", + function: func() error { + return cursor.Close() + }, + }, + } + + for _, test := range tests { + expectedErrContainsString := "closed cursor" + + // Make sure that the test function returns a "closed cursor" error + err = test.function() + if err == nil { + t.Fatalf("%s: %s "+ + "unexpectedly succeeded", testName, test.name) + } + if !strings.Contains(err.Error(), expectedErrContainsString) { + t.Fatalf("%s: %s "+ + "returned wrong error. Want: %s, got: %s", + testName, test.name, expectedErrContainsString, err) + } + } +} + +func TestCursorCloseFirstAndNext(t *testing.T) { + testForAllDatabaseTypes(t, "TestCursorCloseFirstAndNext", testCursorCloseFirstAndNext) +} + +func testCursorCloseFirstAndNext(t *testing.T, db database.Database, testName string) { + populateDatabaseForTest(t, db, testName) + cursor := prepareCursorForTest(t, db, testName) + + // Close the cursor + err := cursor.Close() + if err != nil { + t.Fatalf("%s: Close "+ + "unexpectedly failed: %s", testName, err) + } + + // We expect First to return false + result := cursor.First() + if result { + t.Fatalf("%s: First "+ + "unexpectedly returned true", testName) + } + + // We expect Next to return false + result = cursor.Next() + if result { + t.Fatalf("%s: Next "+ + "unexpectedly returned true", testName) + } +} diff --git a/database/database_test.go b/database/database_test.go new file mode 100644 index 000000000..310db42b0 --- /dev/null +++ b/database/database_test.go @@ -0,0 +1,207 @@ +// All tests within this file should call testForAllDatabaseTypes +// over the actual test. This is to make sure that all supported +// database types adhere to the assumptions defined in the +// interfaces in this package. + +package database_test + +import ( + "bytes" + "github.com/kaspanet/kaspad/database" + "testing" +) + +func TestDatabasePut(t *testing.T) { + testForAllDatabaseTypes(t, "TestDatabasePut", testDatabasePut) +} + +func testDatabasePut(t *testing.T, db database.Database, testName string) { + // Put value1 into the database + key := database.MakeBucket().Key([]byte("key")) + value1 := []byte("value1") + err := db.Put(key, value1) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value is value1 + returnedValue, err := db.Get(key) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value1) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value1), string(returnedValue)) + } + + // Put value2 into the database with the same key + value2 := []byte("value2") + err = db.Put(key, value2) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value is value2 + returnedValue, err = db.Get(key) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value2) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value2), string(returnedValue)) + } +} + +func TestDatabaseGet(t *testing.T) { + testForAllDatabaseTypes(t, "TestDatabaseGet", testDatabaseGet) +} + +func testDatabaseGet(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key := database.MakeBucket().Key([]byte("key")) + value := []byte("value") + err := db.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Get the value back and make sure it's the same one + returnedValue, err := db.Get(key) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value), string(returnedValue)) + } + + // Try getting a non-existent value and make sure + // the returned error is ErrNotFound + _, err = db.Get(database.MakeBucket().Key([]byte("doesn't exist"))) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Get "+ + "returned wrong error: %s", testName, err) + } +} + +func TestDatabaseHas(t *testing.T) { + testForAllDatabaseTypes(t, "TestDatabaseHas", testDatabaseHas) +} + +func testDatabaseHas(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key := database.MakeBucket().Key([]byte("key")) + value := []byte("value") + err := db.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that Has returns true for the value we just put + exists, err := db.Has(key) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if !exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value does not exist", testName) + } + + // Make sure that Has returns false for a non-existent value + exists, err = db.Has(database.MakeBucket().Key([]byte("doesn't exist"))) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value exists", testName) + } +} + +func TestDatabaseDelete(t *testing.T) { + testForAllDatabaseTypes(t, "TestDatabaseDelete", testDatabaseDelete) +} + +func testDatabaseDelete(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key := database.MakeBucket().Key([]byte("key")) + value := []byte("value") + err := db.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Delete the value + err = db.Delete(key) + if err != nil { + t.Fatalf("%s: Delete "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that Has returns false for the deleted value + exists, err := db.Has(key) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value exists", testName) + } +} + +func TestDatabaseAppendToStoreAndRetrieveFromStore(t *testing.T) { + testForAllDatabaseTypes(t, "TestDatabaseAppendToStoreAndRetrieveFromStore", testDatabaseAppendToStoreAndRetrieveFromStore) +} + +func testDatabaseAppendToStoreAndRetrieveFromStore(t *testing.T, db database.Database, testName string) { + // Append some data into the store + storeName := "store" + data := []byte("data") + location, err := db.AppendToStore(storeName, data) + if err != nil { + t.Fatalf("%s: AppendToStore "+ + "unexpectedly failed: %s", testName, err) + } + + // Retrieve the data and make sure it's equal to what was appended + retrievedData, err := db.RetrieveFromStore(storeName, location) + if err != nil { + t.Fatalf("%s: RetrieveFromStore "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(retrievedData, data) { + t.Fatalf("%s: RetrieveFromStore "+ + "returned unexpected data. Want: %s, got: %s", + testName, string(data), string(retrievedData)) + } + + // Make sure that an invalid location returns ErrNotFound + fakeLocation := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11} + _, err = db.RetrieveFromStore(storeName, fakeLocation) + if err == nil { + t.Fatalf("%s: RetrieveFromStore "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: RetrieveFromStore "+ + "returned wrong error: %s", testName, err) + } +} diff --git a/database/transaction_test.go b/database/transaction_test.go new file mode 100644 index 000000000..c49f49614 --- /dev/null +++ b/database/transaction_test.go @@ -0,0 +1,549 @@ +// All tests within this file should call testForAllDatabaseTypes +// over the actual test. This is to make sure that all supported +// database types adhere to the assumptions defined in the +// interfaces in this package. + +package database_test + +import ( + "bytes" + "github.com/kaspanet/kaspad/database" + "strings" + "testing" +) + +func TestTransactionPut(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionPut", testTransactionPut) +} + +func testTransactionPut(t *testing.T, db database.Database, testName string) { + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Put value1 into the transaction + key := database.MakeBucket().Key([]byte("key")) + value1 := []byte("value1") + err = dbTx.Put(key, value1) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Put value2 into the transaction with the same key + value2 := []byte("value2") + err = dbTx.Put(key, value2) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Commit the transaction + err = dbTx.Commit() + if err != nil { + t.Fatalf("%s: Commit "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value is value2 + returnedValue, err := db.Get(key) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value2) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value2), string(returnedValue)) + } +} + +func TestTransactionGet(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionGet", testTransactionGet) +} + +func testTransactionGet(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key1 := database.MakeBucket().Key([]byte("key1")) + value1 := []byte("value1") + err := db.Put(key1, value1) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Get the value back and make sure it's the same one + returnedValue, err := dbTx.Get(key1) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value1) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value1), string(returnedValue)) + } + + // Try getting a non-existent value and make sure + // the returned error is ErrNotFound + _, err = dbTx.Get(database.MakeBucket().Key([]byte("doesn't exist"))) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Get "+ + "returned wrong error: %s", testName, err) + } + + // Put a new value into the database outside of the transaction + key2 := database.MakeBucket().Key([]byte("key2")) + value2 := []byte("value2") + err = db.Put(key2, value2) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the new value doesn't exist inside the transaction + _, err = dbTx.Get(key2) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Get "+ + "returned wrong error: %s", testName, err) + } + + // Put a new value into the transaction + key3 := database.MakeBucket().Key([]byte("key3")) + value3 := []byte("value3") + err = dbTx.Put(key3, value3) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the new value doesn't exist outside the transaction + _, err = db.Get(key3) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Get "+ + "returned wrong error: %s", testName, err) + } +} + +func TestTransactionHas(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionHas", testTransactionHas) +} + +func testTransactionHas(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key1 := database.MakeBucket().Key([]byte("key1")) + value1 := []byte("value1") + err := db.Put(key1, value1) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Make sure that Has returns true for the value we just put + exists, err := dbTx.Has(key1) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if !exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value does not exist", testName) + } + + // Make sure that Has returns false for a non-existent value + exists, err = dbTx.Has(database.MakeBucket().Key([]byte("doesn't exist"))) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value exists", testName) + } + + // Put a new value into the database outside of the transaction + key2 := database.MakeBucket().Key([]byte("key2")) + value2 := []byte("value2") + err = db.Put(key2, value2) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the new value doesn't exist inside the transaction + exists, err = dbTx.Has(key2) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value exists", testName) + } +} + +func TestTransactionDelete(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionDelete", testTransactionDelete) +} + +func testTransactionDelete(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key := database.MakeBucket().Key([]byte("key")) + value := []byte("value") + err := db.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Begin two new transactions + dbTx1, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + dbTx2, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx1.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + err = dbTx2.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Delete the value in the first transaction + err = dbTx1.Delete(key) + if err != nil { + t.Fatalf("%s: Delete "+ + "unexpectedly failed: %s", testName, err) + } + + // Commit the first transaction + err = dbTx1.Commit() + if err != nil { + t.Fatalf("%s: Commit "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that Has returns false for the deleted value + exists, err := db.Has(key) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value exists", testName) + } + + // Make sure that the second transaction was no affected + exists, err = dbTx2.Has(key) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if !exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value does not exist", testName) + } +} + +func TestTransactionAppendToStoreAndRetrieveFromStore(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionAppendToStoreAndRetrieveFromStore", testTransactionAppendToStoreAndRetrieveFromStore) +} + +func testTransactionAppendToStoreAndRetrieveFromStore(t *testing.T, db database.Database, testName string) { + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Append some data into the store + storeName := "store" + data := []byte("data") + location, err := dbTx.AppendToStore(storeName, data) + if err != nil { + t.Fatalf("%s: AppendToStore "+ + "unexpectedly failed: %s", testName, err) + } + + // Retrieve the data and make sure it's equal to what was appended + retrievedData, err := dbTx.RetrieveFromStore(storeName, location) + if err != nil { + t.Fatalf("%s: RetrieveFromStore "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(retrievedData, data) { + t.Fatalf("%s: RetrieveFromStore "+ + "returned unexpected data. Want: %s, got: %s", + testName, string(data), string(retrievedData)) + } + + // Make sure that an invalid location returns ErrNotFound + fakeLocation := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11} + _, err = dbTx.RetrieveFromStore(storeName, fakeLocation) + if err == nil { + t.Fatalf("%s: RetrieveFromStore "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: RetrieveFromStore "+ + "returned wrong error: %s", testName, err) + } +} + +func TestTransactionCommit(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionCommit", testTransactionCommit) +} + +func testTransactionCommit(t *testing.T, db database.Database, testName string) { + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Put a value into the transaction + key := database.MakeBucket().Key([]byte("key")) + value := []byte("value") + err = dbTx.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Commit the transaction + err = dbTx.Commit() + if err != nil { + t.Fatalf("%s: Commit "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value exists and is as expected + returnedValue, err := db.Get(key) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value), string(returnedValue)) + } + + // Make sure that further operations on the transaction return an error + _, err = dbTx.Get(key) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + expectedError := "closed transaction" + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("%s: Get "+ + "returned wrong error. Want: %s, got: %s", + testName, expectedError, err) + } +} + +func TestTransactionRollback(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionRollback", testTransactionRollback) +} + +func testTransactionRollback(t *testing.T, db database.Database, testName string) { + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Put a value into the transaction + key := database.MakeBucket().Key([]byte("key")) + value := []byte("value") + err = dbTx.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Rollback the transaction + err = dbTx.Rollback() + if err != nil { + t.Fatalf("%s: Rollback "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value did not get added to the database + _, err = db.Get(key) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Get "+ + "returned wrong error", testName) + } + + // Make sure that further operations on the transaction return an error + _, err = dbTx.Get(key) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + expectedError := "closed transaction" + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("%s: Get "+ + "returned wrong error. Want: %s, got: %s", + testName, expectedError, err) + } +} + +func TestTransactionRollbackUnlessClosed(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionRollbackUnlessClosed", testTransactionRollbackUnlessClosed) +} + +func testTransactionRollbackUnlessClosed(t *testing.T, db database.Database, testName string) { + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Put a value into the transaction + key := database.MakeBucket().Key([]byte("key")) + value := []byte("value") + err = dbTx.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // RollbackUnlessClosed the transaction + err = dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value did not get added to the database + _, err = db.Get(key) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Get "+ + "returned wrong error", testName) + } + + // Make sure that further operations on the transaction return an error + _, err = dbTx.Get(key) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + expectedError := "closed transaction" + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("%s: Get "+ + "returned wrong error. Want: %s, got: %s", + testName, expectedError, err) + } + + // Make sure that further calls to RollbackUnlessClosed don't return an error + err = dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } +} From 3af945692e1521b66a18f11b85ca0cfe8547db50 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Thu, 23 Apr 2020 16:55:25 +0300 Subject: [PATCH 12/77] [NOD-922] Panic from cursor Next and First (#703) * [NOD-922] Panic in Cursor First and Next if the cursor is closed. * [NOD-922] Fix broken tests. * [NOD-922] Fix a comment. --- database/cursor.go | 4 +-- database/cursor_test.go | 52 +++++++++++++++++++++++------------- database/ffldb/ldb/cursor.go | 8 +++--- 3 files changed, 39 insertions(+), 25 deletions(-) diff --git a/database/cursor.go b/database/cursor.go index 4f2d1a9e6..91a4bd807 100644 --- a/database/cursor.go +++ b/database/cursor.go @@ -3,11 +3,11 @@ package database // Cursor iterates over database entries given some bucket. type Cursor interface { // Next moves the iterator to the next key/value pair. It returns whether the - // iterator is exhausted. Returns false if the cursor is closed. + // iterator is exhausted. Panics if the cursor is closed. Next() bool // First moves the iterator to the first key/value pair. It returns false if - // such a pair does not exist or if the cursor is closed. + // such a pair does not exist. Panics if the cursor is closed. First() bool // Seek moves the iterator to the first key/value pair whose key is greater diff --git a/database/cursor_test.go b/database/cursor_test.go index 492425984..c89c6fe4f 100644 --- a/database/cursor_test.go +++ b/database/cursor_test.go @@ -7,6 +7,7 @@ package database_test import ( "bytes" + "fmt" "github.com/kaspanet/kaspad/database" "reflect" "strings" @@ -23,6 +24,20 @@ func prepareCursorForTest(t *testing.T, db database.Database, testName string) d return cursor } +func recoverFromClosedCursorPanic(t *testing.T, testName string) { + panicErr := recover() + if panicErr == nil { + t.Fatalf("%s: cursor unexpectedly "+ + "didn't panic after being closed", testName) + } + expectedPanicErr := "closed cursor" + if !strings.Contains(fmt.Sprintf("%v", panicErr), expectedPanicErr) { + t.Fatalf("%s: cursor panicked "+ + "with wrong message. Want: %v, got: %s", + testName, expectedPanicErr, panicErr) + } +} + func TestCursorNext(t *testing.T) { testForAllDatabaseTypes(t, "TestCursorNext", testCursorNext) } @@ -67,19 +82,20 @@ func testCursorNext(t *testing.T, db database.Database, testName string) { "not done", testName) } - // Rewind the cursor, close it, and call Next on it again. - // This time it should return false because it's closed. + // Rewind the cursor and close it cursor.First() err := cursor.Close() if err != nil { t.Fatalf("%s: Close unexpectedly "+ "failed: %s", testName, err) } - hasNext = cursor.Next() - if hasNext { - t.Fatalf("%s: cursor unexpectedly "+ - "returned true after being closed", testName) - } + + // Call Next on the cursor. This time it should panic + // because it's closed. + func() { + defer recoverFromClosedCursorPanic(t, testName) + cursor.Next() + }() } func TestCursorFirst(t *testing.T) { @@ -315,17 +331,15 @@ func testCursorCloseFirstAndNext(t *testing.T, db database.Database, testName st "unexpectedly failed: %s", testName, err) } - // We expect First to return false - result := cursor.First() - if result { - t.Fatalf("%s: First "+ - "unexpectedly returned true", testName) - } + // We expect First to panic + func() { + defer recoverFromClosedCursorPanic(t, testName) + cursor.First() + }() - // We expect Next to return false - result = cursor.Next() - if result { - t.Fatalf("%s: Next "+ - "unexpectedly returned true", testName) - } + // We expect Next to panic + func() { + defer recoverFromClosedCursorPanic(t, testName) + cursor.Next() + }() } diff --git a/database/ffldb/ldb/cursor.go b/database/ffldb/ldb/cursor.go index e2b17ed79..a9d1eb3d2 100644 --- a/database/ffldb/ldb/cursor.go +++ b/database/ffldb/ldb/cursor.go @@ -27,19 +27,19 @@ func (db *LevelDB) Cursor(bucket *database.Bucket) *LevelDBCursor { } // Next moves the iterator to the next key/value pair. It returns whether the -// iterator is exhausted. Returns false if the cursor is closed. +// iterator is exhausted. Panics if the cursor is closed. func (c *LevelDBCursor) Next() bool { if c.isClosed { - return false + panic("cannot call next on a closed cursor") } return c.ldbIterator.Next() } // First moves the iterator to the first key/value pair. It returns false if -// such a pair does not exist or if the cursor is closed. +// such a pair does not exist. Panics if the cursor is closed. func (c *LevelDBCursor) First() bool { if c.isClosed { - return false + panic("cannot call first on a closed cursor") } return c.ldbIterator.First() } From 2910724b490def25d670a88d51a52761398a0962 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Thu, 23 Apr 2020 17:01:09 +0300 Subject: [PATCH 13/77] [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect (#702) * [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect. * [NOD-922] Inline parseNetAddress. * [NOD-922] Fix debug logs. --- connmgr/connmanager.go | 8 ++++ server/p2p/p2p.go | 90 +++++++++++++++++++++++++++++------------- 2 files changed, 70 insertions(+), 28 deletions(-) diff --git a/connmgr/connmanager.go b/connmgr/connmanager.go index 99080213f..aaa245c87 100644 --- a/connmgr/connmanager.go +++ b/connmgr/connmanager.go @@ -151,6 +151,10 @@ type Config struct { // connection is established. OnConnection func(*ConnReq, net.Conn) + // OnConnectionFailed is a callback that is fired when a new outbound + // connection has failed to be established. + OnConnectionFailed func(*ConnReq) + // OnDisconnection is a callback that is fired when an outbound // connection is disconnected. OnDisconnection func(*ConnReq) @@ -419,6 +423,10 @@ out: connReq, msg.err) } cm.handleFailedConn(connReq, msg.err) + + if cm.cfg.OnConnectionFailed != nil { + cm.cfg.OnConnectionFailed(connReq) + } } case <-cm.quit: diff --git a/server/p2p/p2p.go b/server/p2p/p2p.go index bc3218ff2..8d85b34c5 100644 --- a/server/p2p/p2p.go +++ b/server/p2p/p2p.go @@ -115,6 +115,10 @@ type outboundPeerConnectedMsg struct { conn net.Conn } +type outboundPeerConnectionFailedMsg struct { + connReq *connmgr.ConnReq +} + // Peer extends the peer to maintain state shared by the server and // the blockmanager. type Peer struct { @@ -227,18 +231,19 @@ type Server struct { DAG *blockdag.BlockDAG TxMemPool *mempool.TxPool - modifyRebroadcastInv chan interface{} - newPeers chan *Peer - donePeers chan *Peer - banPeers chan *Peer - newOutboundConnection chan *outboundPeerConnectedMsg - Query chan interface{} - relayInv chan relayMsg - broadcast chan broadcastMsg - wg sync.WaitGroup - nat serverutils.NAT - TimeSource blockdag.TimeSource - services wire.ServiceFlag + modifyRebroadcastInv chan interface{} + newPeers chan *Peer + donePeers chan *Peer + banPeers chan *Peer + newOutboundConnection chan *outboundPeerConnectedMsg + newOutboundConnectionFailed chan *outboundPeerConnectionFailedMsg + Query chan interface{} + relayInv chan relayMsg + broadcast chan broadcastMsg + wg sync.WaitGroup + nat serverutils.NAT + TimeSource blockdag.TimeSource + services wire.ServiceFlag // We add to quitWaitGroup before every instance in which we wait for // the quit channel so that all those instances finish before we shut @@ -1039,6 +1044,26 @@ func (s *Server) outboundPeerConnected(state *peerState, msg *outboundPeerConnec state.outboundGroups[addrmgr.GroupKey(sp.NA())]++ } +// outboundPeerConnected is invoked by the connection manager when a new +// outbound connection failed to be established. +func (s *Server) outboundPeerConnectionFailed(msg *outboundPeerConnectionFailedMsg) { + host, portStr, err := net.SplitHostPort(msg.connReq.Addr.String()) + if err != nil { + srvrLog.Debugf("Cannot extract address host and port %s: %s", msg.connReq.Addr, err) + } + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + srvrLog.Debugf("Cannot parse port %s: %s", msg.connReq.Addr, err) + } + + // defaultServices is used here because Attempt makes no use + // of the services field and NewNetAddressIPPort does not + // take nil for it. + netAddress := wire.NewNetAddressIPPort(net.ParseIP(host), uint16(port), defaultServices) + + s.addrManager.Attempt(netAddress) +} + // peerDoneHandler handles peer disconnects by notifiying the server that it's // done along with other performing other desirable cleanup. func (s *Server) peerDoneHandler(sp *Peer) { @@ -1144,6 +1169,9 @@ out: case opcMsg := <-s.newOutboundConnection: s.outboundPeerConnected(state, opcMsg) + + case opcfMsg := <-s.newOutboundConnectionFailed: + s.outboundPeerConnectionFailed(opcfMsg) } } @@ -1517,22 +1545,23 @@ func NewServer(listenAddrs []string, dagParams *dagconfig.Params, interrupt <-ch maxPeers := config.ActiveConfig().TargetOutboundPeers + config.ActiveConfig().MaxInboundPeers s := Server{ - DAGParams: dagParams, - addrManager: amgr, - newPeers: make(chan *Peer, maxPeers), - donePeers: make(chan *Peer, maxPeers), - banPeers: make(chan *Peer, maxPeers), - Query: make(chan interface{}), - relayInv: make(chan relayMsg, maxPeers), - broadcast: make(chan broadcastMsg, maxPeers), - quit: make(chan struct{}), - modifyRebroadcastInv: make(chan interface{}), - newOutboundConnection: make(chan *outboundPeerConnectedMsg, config.ActiveConfig().TargetOutboundPeers), - nat: nat, - TimeSource: blockdag.NewTimeSource(), - services: services, - SigCache: txscript.NewSigCache(config.ActiveConfig().SigCacheMaxSize), - notifyNewTransactions: notifyNewTransactions, + DAGParams: dagParams, + addrManager: amgr, + newPeers: make(chan *Peer, maxPeers), + donePeers: make(chan *Peer, maxPeers), + banPeers: make(chan *Peer, maxPeers), + Query: make(chan interface{}), + relayInv: make(chan relayMsg, maxPeers), + broadcast: make(chan broadcastMsg, maxPeers), + quit: make(chan struct{}), + modifyRebroadcastInv: make(chan interface{}), + newOutboundConnection: make(chan *outboundPeerConnectedMsg, config.ActiveConfig().TargetOutboundPeers), + newOutboundConnectionFailed: make(chan *outboundPeerConnectionFailedMsg, config.ActiveConfig().TargetOutboundPeers), + nat: nat, + TimeSource: blockdag.NewTimeSource(), + services: services, + SigCache: txscript.NewSigCache(config.ActiveConfig().SigCacheMaxSize), + notifyNewTransactions: notifyNewTransactions, } // Create indexes if needed. @@ -1657,6 +1686,11 @@ func NewServer(listenAddrs []string, dagParams *dagconfig.Params, interrupt <-ch conn: conn, } }, + OnConnectionFailed: func(c *connmgr.ConnReq) { + s.newOutboundConnectionFailed <- &outboundPeerConnectionFailedMsg{ + connReq: c, + } + }, GetNewAddress: newAddressFunc, }) if err != nil { From 3c89e1f7b39e3c16966947f3e8ed66e3a97b8f17 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Mon, 27 Apr 2020 13:50:09 +0300 Subject: [PATCH 14/77] [NOD-952] Fix nil derefernce bug on outboundPeerConnectionFailed (#704) --- database/cursor.go | 3 +-- server/p2p/p2p.go | 6 ++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/database/cursor.go b/database/cursor.go index 91a4bd807..0727f47ca 100644 --- a/database/cursor.go +++ b/database/cursor.go @@ -16,8 +16,7 @@ type Cursor interface { Seek(key *Key) error // Key returns the key of the current key/value pair, or ErrNotFound if done. - // Note that the key is trimmed to not include the prefix the cursor was opened - // with. The caller should not modify the contents of the returned slice, and + // The caller should not modify the contents of the returned key, and // its contents may change on the next call to Next. Key() (*Key, error) diff --git a/server/p2p/p2p.go b/server/p2p/p2p.go index 8d85b34c5..05832eb64 100644 --- a/server/p2p/p2p.go +++ b/server/p2p/p2p.go @@ -1047,6 +1047,12 @@ func (s *Server) outboundPeerConnected(state *peerState, msg *outboundPeerConnec // outboundPeerConnected is invoked by the connection manager when a new // outbound connection failed to be established. func (s *Server) outboundPeerConnectionFailed(msg *outboundPeerConnectionFailedMsg) { + // If the connection request has no address + // associated to it, do nothing. + if msg.connReq.Addr == nil { + return + } + host, portStr, err := net.SplitHostPort(msg.connReq.Addr.String()) if err != nil { srvrLog.Debugf("Cannot extract address host and port %s: %s", msg.connReq.Addr, err) From 2ef5c2cbac7b0d8ef20269e4c1c3a656889a589e Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Thu, 30 Apr 2020 14:43:38 +0300 Subject: [PATCH 15/77] [NOD-915] Check if lockableFile underlying file is nil before closing it (#709) --- database/ffldb/ff/lockablefile.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/database/ffldb/ff/lockablefile.go b/database/ffldb/ff/lockablefile.go index 9dfe98ded..111dd7c30 100644 --- a/database/ffldb/ff/lockablefile.go +++ b/database/ffldb/ff/lockablefile.go @@ -36,5 +36,9 @@ func (lf *lockableFile) Close() error { lf.Lock() defer lf.Unlock() + if lf.file == nil { + return nil + } + return errors.WithStack(lf.file.Close()) } From 2e2492cc5d5f755ad97019b4a2893229044c2bef Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Sun, 3 May 2020 12:19:09 +0300 Subject: [PATCH 16/77] [NOD-849] Database tests (#695) * [NOD-849] Cover ffldb/transaction with tests. * [NOD-849] Cover cursor.go with tests. * [NOD-849] Cover ldb/transaction with tests. * [NOD-849] Cover location.go with tests. * [NOD-849] Write TestFlatFileMultiFileRollback. * [NOD-849] Fix merge errors. * [NOD-849] Fix a comment. * [NOD-849] Fix a comment. * [NOD-849] Add a test that makes sure that files get deleted on rollback. * [NOD-849] Add a test that makes sure that serializeLocation serialized to an expected value. * [NOD-849] Improve TestFlatFileLocationDeserializationErrors. * [NOD-849] Fix a copy+paste error. * [NOD-849] Explain maxFileSize = 16. * [NOD-849] Remove redundant RollbackUnlessClosed call. * [NOD-849] Extract bucket to a variable in TestCursorSanity. * [NOD-849] Rename TestKeyValueTransactionCommit to TestTransactionCommitForLevelDBMethods. * [NOD-849] Extract prepareXXX into separate functions. * [NOD-849] Simplify function calls in TestTransactionCloseErrors. * [NOD-849] Extract validateCurrentCursorKeyAndValue to a separate function. * [NOD-849] Add a comment over TestCursorSanity. * [NOD-849] Add a comment over function in TestCursorCloseErrors. * [NOD-849] Add a comment over function in TestTransactionCloseErrors. * [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors. * [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors. * [NOD-849] Fix copy+paste error in comments. * [NOD-849] Fix merge errors. * [NOD-849] Merge TestTransactionCommitErrors and TestTransactionRollbackErrors into TestTransactionCloseErrors. * [NOD-849] Move prepareDatabaseForTest into ffldb_test.go. * [NOD-849] Add cursorKey to Value error messages in validateCurrentCursorKeyAndValue. --- database/ffldb/ff/flatfile.go | 6 +- database/ffldb/ff/flatfile_test.go | 117 +++++- database/ffldb/ff/location_test.go | 62 +++ database/ffldb/ffldb.go | 5 +- database/ffldb/ffldb_test.go | 22 ++ database/ffldb/ldb/cursor_test.go | 246 ++++++++++++ database/ffldb/ldb/leveldb_test.go | 54 ++- database/ffldb/ldb/transaction_test.go | 146 ++++++++ database/ffldb/transaction.go | 49 ++- database/ffldb/transaction_test.go | 500 +++++++++++++++++++++++++ 10 files changed, 1161 insertions(+), 46 deletions(-) create mode 100644 database/ffldb/ff/location_test.go create mode 100644 database/ffldb/ldb/cursor_test.go create mode 100644 database/ffldb/ldb/transaction_test.go create mode 100644 database/ffldb/transaction_test.go diff --git a/database/ffldb/ff/flatfile.go b/database/ffldb/ff/flatfile.go index 95070046e..f1c81adf2 100644 --- a/database/ffldb/ff/flatfile.go +++ b/database/ffldb/ff/flatfile.go @@ -16,12 +16,14 @@ const ( // cache. Note that this does not include the current/write file, so there // will typically be one more than this value open. maxOpenFiles = 25 +) +var ( // maxFileSize is the maximum size for each file used to store data. // // NOTE: The current code uses uint32 for all offsets, so this value - // must be less than 2^32 (4 GiB). This is also why it's a typed - // constant. + // must be less than 2^32 (4 GiB). + // NOTE: This is a var rather than a const for testing purposes. maxFileSize uint32 = 512 * 1024 * 1024 // 512 MiB ) diff --git a/database/ffldb/ff/flatfile_test.go b/database/ffldb/ff/flatfile_test.go index 3ac8ada80..eadf8be8a 100644 --- a/database/ffldb/ff/flatfile_test.go +++ b/database/ffldb/ff/flatfile_test.go @@ -1,24 +1,40 @@ package ff import ( + "bytes" + "github.com/kaspanet/kaspad/database" "io/ioutil" + "os" "reflect" "testing" ) -func TestFlatFileStoreSanity(t *testing.T) { - // Open a test store - path, err := ioutil.TempDir("", "TestFlatFileStoreSanity") +func prepareStoreForTest(t *testing.T, testName string) (store *flatFileStore, teardownFunc func()) { + // Create a temp db to run tests against + path, err := ioutil.TempDir("", testName) if err != nil { - t.Fatalf("TestFlatFileStoreSanity: TempDir unexpectedly "+ - "failed: %s", err) + t.Fatalf("%s: TempDir unexpectedly "+ + "failed: %s", testName, err) } name := "test" - store, err := openFlatFileStore(path, name) + store, err = openFlatFileStore(path, name) if err != nil { - t.Fatalf("TestFlatFileStoreSanity: openFlatFileStore "+ - "unexpectedly failed: %s", err) + t.Fatalf("%s: openFlatFileStore "+ + "unexpectedly failed: %s", testName, err) } + teardownFunc = func() { + err = store.Close() + if err != nil { + t.Fatalf("%s: Close unexpectedly "+ + "failed: %s", testName, err) + } + } + return store, teardownFunc +} + +func TestFlatFileStoreSanity(t *testing.T) { + store, teardownFunc := prepareStoreForTest(t, "TestFlatFileStoreSanity") + defer teardownFunc() // Write something to the store writeData := []byte("Hello world!") @@ -72,3 +88,88 @@ func TestFlatFilePath(t *testing.T) { } } } + +func TestFlatFileMultiFileRollback(t *testing.T) { + store, teardownFunc := prepareStoreForTest(t, "TestFlatFileMultiFileRollback") + defer teardownFunc() + + // Set the maxFileSize to 16 bytes so that we don't have to write + // an enormous amount of data to disk to get multiple files, all + // for the sake of this test. + currentMaxFileSize := maxFileSize + maxFileSize = 16 + defer func() { + maxFileSize = currentMaxFileSize + }() + + // Write five 8 byte chunks and keep the last location written to + var lastWriteLocation1 *flatFileLocation + for i := byte(0); i < 5; i++ { + writeData := []byte{i, i, i, i, i, i, i, i} + var err error + lastWriteLocation1, err = store.write(writeData) + if err != nil { + t.Fatalf("TestFlatFileMultiFileRollback: write returned "+ + "unexpected error: %s", err) + } + } + + // Grab the current location and the current file number + currentLocation := store.currentLocation() + fileNumberBeforeWriting := store.writeCursor.currentFileNumber + + // Write (2 * maxOpenFiles) more 8 byte chunks and keep the last location written to + var lastWriteLocation2 *flatFileLocation + for i := byte(0); i < byte(2*maxFileSize); i++ { + writeData := []byte{0, 1, 2, 3, 4, 5, 6, 7} + var err error + lastWriteLocation2, err = store.write(writeData) + if err != nil { + t.Fatalf("TestFlatFileMultiFileRollback: write returned "+ + "unexpected error: %s", err) + } + } + + // Grab the file number again to later make sure its file no longer exists + fileNumberAfterWriting := store.writeCursor.currentFileNumber + + // Rollback + err := store.rollback(currentLocation) + if err != nil { + t.Fatalf("TestFlatFileMultiFileRollback: rollback returned "+ + "unexpected error: %s", err) + } + + // Make sure that lastWriteLocation1 still exists + expectedData := []byte{4, 4, 4, 4, 4, 4, 4, 4} + data, err := store.read(lastWriteLocation1) + if err != nil { + t.Fatalf("TestFlatFileMultiFileRollback: read returned "+ + "unexpected error: %s", err) + } + if !bytes.Equal(data, expectedData) { + t.Fatalf("TestFlatFileMultiFileRollback: read returned "+ + "unexpected data. Want: %s, got: %s", string(expectedData), + string(data)) + } + + // Make sure that lastWriteLocation2 does NOT exist + _, err = store.read(lastWriteLocation2) + if err == nil { + t.Fatalf("TestFlatFileMultiFileRollback: read " + + "unexpectedly succeeded") + } + if !database.IsNotFoundError(err) { + t.Fatalf("TestFlatFileMultiFileRollback: read "+ + "returned unexpected error: %s", err) + } + + // Make sure that all the appropriate files have been deleted + for i := fileNumberAfterWriting; i > fileNumberBeforeWriting; i-- { + filePath := flatFilePath(store.basePath, store.storeName, i) + if _, err := os.Stat(filePath); err == nil || !os.IsNotExist(err) { + t.Fatalf("TestFlatFileMultiFileRollback: file "+ + "unexpectedly still exists: %s", filePath) + } + } +} diff --git a/database/ffldb/ff/location_test.go b/database/ffldb/ff/location_test.go new file mode 100644 index 000000000..7d15e4407 --- /dev/null +++ b/database/ffldb/ff/location_test.go @@ -0,0 +1,62 @@ +package ff + +import ( + "bytes" + "encoding/hex" + "reflect" + "strings" + "testing" +) + +func TestFlatFileLocationSerialization(t *testing.T) { + location := &flatFileLocation{ + fileNumber: 1, + fileOffset: 2, + dataLength: 3, + } + + serializedLocation := serializeLocation(location) + expectedSerializedLocation := []byte{1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0} + if !bytes.Equal(serializedLocation, expectedSerializedLocation) { + t.Fatalf("TestFlatFileLocationSerialization: serializeLocation "+ + "returned unexpected bytes. Want: %s, got: %s", + hex.EncodeToString(expectedSerializedLocation), hex.EncodeToString(serializedLocation)) + } + + deserializedLocation, err := deserializeLocation(serializedLocation) + if err != nil { + t.Fatalf("TestFlatFileLocationSerialization: deserializeLocation "+ + "unexpectedly failed: %s", err) + } + if !reflect.DeepEqual(deserializedLocation, location) { + t.Fatalf("TestFlatFileLocationSerialization: original "+ + "location and deserialized location aren't the same. Want: %v, "+ + "got: %v", location, deserializedLocation) + } +} + +func TestFlatFileLocationDeserializationErrors(t *testing.T) { + expectedError := "unexpected serializedLocation length" + + tooShortSerializedLocation := []byte{0, 1, 2, 3, 4, 5} + _, err := deserializeLocation(tooShortSerializedLocation) + if err == nil { + t.Fatalf("TestFlatFileLocationSerialization: deserializeLocation " + + "unexpectedly succeeded") + } + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("TestFlatFileLocationSerialization: deserializeLocation "+ + "returned unexpected error. Want: %s, got: %s", expectedError, err) + } + + tooLongSerializedLocation := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14} + _, err = deserializeLocation(tooLongSerializedLocation) + if err == nil { + t.Fatalf("TestFlatFileLocationSerialization: deserializeLocation " + + "unexpectedly succeeded") + } + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("TestFlatFileLocationSerialization: deserializeLocation "+ + "returned unexpected error. Want: %s, got: %s", expectedError, err) + } +} diff --git a/database/ffldb/ffldb.go b/database/ffldb/ffldb.go index 31b4a20f8..7e1c183ca 100644 --- a/database/ffldb/ffldb.go +++ b/database/ffldb/ffldb.go @@ -170,8 +170,9 @@ func (db *ffldb) Begin() (database.Transaction, error) { } transaction := &transaction{ - ldbTx: ldbTx, - ffdb: db.flatFileDB, + ldbTx: ldbTx, + ffdb: db.flatFileDB, + isClosed: false, } return transaction, nil } diff --git a/database/ffldb/ffldb_test.go b/database/ffldb/ffldb_test.go index c58cb4970..a97d27a68 100644 --- a/database/ffldb/ffldb_test.go +++ b/database/ffldb/ffldb_test.go @@ -7,6 +7,28 @@ import ( "testing" ) +func prepareDatabaseForTest(t *testing.T, testName string) (db database.Database, teardownFunc func()) { + // Create a temp db to run tests against + path, err := ioutil.TempDir("", testName) + if err != nil { + t.Fatalf("%s: TempDir unexpectedly "+ + "failed: %s", testName, err) + } + db, err = Open(path) + if err != nil { + t.Fatalf("%s: Open unexpectedly "+ + "failed: %s", testName, err) + } + teardownFunc = func() { + err = db.Close() + if err != nil { + t.Fatalf("%s: Close unexpectedly "+ + "failed: %s", testName, err) + } + } + return db, teardownFunc +} + func TestRepairFlatFiles(t *testing.T) { // Create a temp db to run tests against path, err := ioutil.TempDir("", "TestRepairFlatFiles") diff --git a/database/ffldb/ldb/cursor_test.go b/database/ffldb/ldb/cursor_test.go new file mode 100644 index 000000000..71d580c8c --- /dev/null +++ b/database/ffldb/ldb/cursor_test.go @@ -0,0 +1,246 @@ +package ldb + +import ( + "bytes" + "fmt" + "github.com/kaspanet/kaspad/database" + "reflect" + "strings" + "testing" +) + +func validateCurrentCursorKeyAndValue(t *testing.T, testName string, cursor *LevelDBCursor, + expectedKey *database.Key, expectedValue []byte) { + + cursorKey, err := cursor.Key() + if err != nil { + t.Fatalf("%s: Key "+ + "unexpectedly failed: %s", testName, err) + } + if !reflect.DeepEqual(cursorKey, expectedKey) { + t.Fatalf("%s: Key "+ + "returned wrong key. Want: %s, got: %s", + testName, string(expectedKey.Bytes()), string(cursorKey.Bytes())) + } + cursorValue, err := cursor.Value() + if err != nil { + t.Fatalf("%s: Value "+ + "unexpectedly failed for key %s: %s", + testName, cursorKey, err) + } + if !bytes.Equal(cursorValue, expectedValue) { + t.Fatalf("%s: Value "+ + "returned wrong value for key %s. Want: %s, got: %s", + testName, cursorKey, string(expectedValue), string(cursorValue)) + } +} + +func recoverFromClosedCursorPanic(t *testing.T, testName string) { + panicErr := recover() + if panicErr == nil { + t.Fatalf("%s: cursor unexpectedly "+ + "didn't panic after being closed", testName) + } + expectedPanicErr := "closed cursor" + if !strings.Contains(fmt.Sprintf("%v", panicErr), expectedPanicErr) { + t.Fatalf("%s: cursor panicked "+ + "with wrong message. Want: %v, got: %s", + testName, expectedPanicErr, panicErr) + } +} + +// TestCursorSanity validates typical cursor usage, including +// opening a cursor over some existing data, seeking back +// and forth over that data, and getting some keys/values out +// of the cursor. +func TestCursorSanity(t *testing.T) { + ldb, teardownFunc := prepareDatabaseForTest(t, "TestCursorSanity") + defer teardownFunc() + + // Write some data to the database + bucket := database.MakeBucket([]byte("bucket")) + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key%d", i) + value := fmt.Sprintf("value%d", i) + err := ldb.Put(bucket.Key([]byte(key)), []byte(value)) + if err != nil { + t.Fatalf("TestCursorSanity: Put "+ + "unexpectedly failed: %s", err) + } + } + + // Open a new cursor + cursor := ldb.Cursor(bucket) + defer func() { + err := cursor.Close() + if err != nil { + t.Fatalf("TestCursorSanity: Close "+ + "unexpectedly failed: %s", err) + } + }() + + // Seek to first key and make sure its key and value are correct + hasNext := cursor.First() + if !hasNext { + t.Fatalf("TestCursorSanity: First " + + "unexpectedly returned non-existance") + } + expectedKey := bucket.Key([]byte("key0")) + expectedValue := []byte("value0") + validateCurrentCursorKeyAndValue(t, "TestCursorSanity", cursor, expectedKey, expectedValue) + + // Seek to a non-existant key + err := cursor.Seek(database.MakeBucket().Key([]byte("doesn't exist"))) + if err == nil { + t.Fatalf("TestCursorSanity: Seek " + + "unexpectedly succeeded") + } + if !database.IsNotFoundError(err) { + t.Fatalf("TestCursorSanity: Seek "+ + "returned wrong error: %s", err) + } + + // Seek to the last key + err = cursor.Seek(bucket.Key([]byte("key9"))) + if err != nil { + t.Fatalf("TestCursorSanity: Seek "+ + "unexpectedly failed: %s", err) + } + expectedKey = bucket.Key([]byte("key9")) + expectedValue = []byte("value9") + validateCurrentCursorKeyAndValue(t, "TestCursorSanity", cursor, expectedKey, expectedValue) + + // Call Next to get to the end of the cursor. This should + // return false to signify that there are no items after that. + // Key and Value calls should return ErrNotFound. + hasNext = cursor.Next() + if hasNext { + t.Fatalf("TestCursorSanity: Next " + + "after last value is unexpectedly not done") + } + _, err = cursor.Key() + if err == nil { + t.Fatalf("TestCursorSanity: Key " + + "unexpectedly succeeded") + } + if !database.IsNotFoundError(err) { + t.Fatalf("TestCursorSanity: Key "+ + "returned wrong error: %s", err) + } + _, err = cursor.Value() + if err == nil { + t.Fatalf("TestCursorSanity: Value " + + "unexpectedly succeeded") + } + if !database.IsNotFoundError(err) { + t.Fatalf("TestCursorSanity: Value "+ + "returned wrong error: %s", err) + } +} + +func TestCursorCloseErrors(t *testing.T) { + tests := []struct { + name string + + // function is the LevelDBCursor function that we're + // verifying returns an error after the cursor had + // been closed. + function func(dbTx *LevelDBCursor) error + }{ + { + name: "Seek", + function: func(cursor *LevelDBCursor) error { + return cursor.Seek(database.MakeBucket().Key([]byte{})) + }, + }, + { + name: "Key", + function: func(cursor *LevelDBCursor) error { + _, err := cursor.Key() + return err + }, + }, + { + name: "Value", + function: func(cursor *LevelDBCursor) error { + _, err := cursor.Value() + return err + }, + }, + { + name: "Close", + function: func(cursor *LevelDBCursor) error { + return cursor.Close() + }, + }, + } + + for _, test := range tests { + func() { + ldb, teardownFunc := prepareDatabaseForTest(t, "TestCursorCloseErrors") + defer teardownFunc() + + // Open a new cursor + cursor := ldb.Cursor(database.MakeBucket()) + + // Close the cursor + err := cursor.Close() + if err != nil { + t.Fatalf("TestCursorCloseErrors: Close "+ + "unexpectedly failed: %s", err) + } + + expectedErrContainsString := "closed cursor" + + // Make sure that the test function returns a "closed transaction" error + err = test.function(cursor) + if err == nil { + t.Fatalf("TestCursorCloseErrors: %s "+ + "unexpectedly succeeded", test.name) + } + if !strings.Contains(err.Error(), expectedErrContainsString) { + t.Fatalf("TestCursorCloseErrors: %s "+ + "returned wrong error. Want: %s, got: %s", + test.name, expectedErrContainsString, err) + } + }() + } +} + +func TestCursorCloseFirstAndNext(t *testing.T) { + ldb, teardownFunc := prepareDatabaseForTest(t, "TestCursorCloseFirstAndNext") + defer teardownFunc() + + // Write some data to the database + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key%d", i) + value := fmt.Sprintf("value%d", i) + err := ldb.Put(database.MakeBucket([]byte("bucket")).Key([]byte(key)), []byte(value)) + if err != nil { + t.Fatalf("TestCursorCloseFirstAndNext: Put "+ + "unexpectedly failed: %s", err) + } + } + + // Open a new cursor + cursor := ldb.Cursor(database.MakeBucket([]byte("bucket"))) + + // Close the cursor + err := cursor.Close() + if err != nil { + t.Fatalf("TestCursorCloseFirstAndNext: Close "+ + "unexpectedly failed: %s", err) + } + + // We expect First to panic + func() { + defer recoverFromClosedCursorPanic(t, "TestCursorCloseFirstAndNext") + cursor.First() + }() + + // We expect Next to panic + func() { + defer recoverFromClosedCursorPanic(t, "TestCursorCloseFirstAndNext") + cursor.Next() + }() +} diff --git a/database/ffldb/ldb/leveldb_test.go b/database/ffldb/ldb/leveldb_test.go index 5e44d4b1b..efa00e186 100644 --- a/database/ffldb/ldb/leveldb_test.go +++ b/database/ffldb/ldb/leveldb_test.go @@ -7,30 +7,36 @@ import ( "testing" ) -func TestLevelDBSanity(t *testing.T) { - // Open a test db - path, err := ioutil.TempDir("", "TestLevelDBSanity") +func prepareDatabaseForTest(t *testing.T, testName string) (ldb *LevelDB, teardownFunc func()) { + // Create a temp db to run tests against + path, err := ioutil.TempDir("", testName) if err != nil { - t.Fatalf("TestLevelDBSanity: TempDir unexpectedly "+ - "failed: %s", err) + t.Fatalf("%s: TempDir unexpectedly "+ + "failed: %s", testName, err) } - ldb, err := NewLevelDB(path) + ldb, err = NewLevelDB(path) if err != nil { - t.Fatalf("TestLevelDBSanity: NewLevelDB "+ - "unexpectedly failed: %s", err) + t.Fatalf("%s: NewLevelDB unexpectedly "+ + "failed: %s", testName, err) } - defer func() { - err := ldb.Close() + teardownFunc = func() { + err = ldb.Close() if err != nil { - t.Fatalf("TestLevelDBSanity: Close "+ - "unexpectedly failed: %s", err) + t.Fatalf("%s: Close unexpectedly "+ + "failed: %s", testName, err) } - }() + } + return ldb, teardownFunc +} + +func TestLevelDBSanity(t *testing.T) { + ldb, teardownFunc := prepareDatabaseForTest(t, "TestLevelDBSanity") + defer teardownFunc() // Put something into the db key := database.MakeBucket().Key([]byte("key")) putData := []byte("Hello world!") - err = ldb.Put(key, putData) + err := ldb.Put(key, putData) if err != nil { t.Fatalf("TestLevelDBSanity: Put returned "+ "unexpected error: %s", err) @@ -52,24 +58,8 @@ func TestLevelDBSanity(t *testing.T) { } func TestLevelDBTransactionSanity(t *testing.T) { - // Open a test db - path, err := ioutil.TempDir("", "TestLevelDBTransactionSanity") - if err != nil { - t.Fatalf("TestLevelDBTransactionSanity: TempDir unexpectedly "+ - "failed: %s", err) - } - ldb, err := NewLevelDB(path) - if err != nil { - t.Fatalf("TestLevelDBTransactionSanity: NewLevelDB "+ - "unexpectedly failed: %s", err) - } - defer func() { - err := ldb.Close() - if err != nil { - t.Fatalf("TestLevelDBTransactionSanity: Close "+ - "unexpectedly failed: %s", err) - } - }() + ldb, teardownFunc := prepareDatabaseForTest(t, "TestLevelDBTransactionSanity") + defer teardownFunc() // Case 1. Write in tx and then read directly from the DB // Begin a new transaction diff --git a/database/ffldb/ldb/transaction_test.go b/database/ffldb/ldb/transaction_test.go new file mode 100644 index 000000000..c7dc8416b --- /dev/null +++ b/database/ffldb/ldb/transaction_test.go @@ -0,0 +1,146 @@ +package ldb + +import ( + "github.com/kaspanet/kaspad/database" + "strings" + "testing" +) + +func TestTransactionCloseErrors(t *testing.T) { + tests := []struct { + name string + + // function is the LevelDBTransaction function that + // we're verifying whether it returns an error after + // the transaction had been closed. + function func(dbTx *LevelDBTransaction) error + shouldReturnError bool + }{ + { + name: "Put", + function: func(dbTx *LevelDBTransaction) error { + return dbTx.Put(database.MakeBucket().Key([]byte("key")), []byte("value")) + }, + shouldReturnError: true, + }, + { + name: "Get", + function: func(dbTx *LevelDBTransaction) error { + _, err := dbTx.Get(database.MakeBucket().Key([]byte("key"))) + return err + }, + shouldReturnError: true, + }, + { + name: "Has", + function: func(dbTx *LevelDBTransaction) error { + _, err := dbTx.Has(database.MakeBucket().Key([]byte("key"))) + return err + }, + shouldReturnError: true, + }, + { + name: "Delete", + function: func(dbTx *LevelDBTransaction) error { + return dbTx.Delete(database.MakeBucket().Key([]byte("key"))) + }, + shouldReturnError: true, + }, + { + name: "Cursor", + function: func(dbTx *LevelDBTransaction) error { + _, err := dbTx.Cursor(database.MakeBucket([]byte("bucket"))) + return err + }, + shouldReturnError: true, + }, + { + name: "Rollback", + function: (*LevelDBTransaction).Rollback, + shouldReturnError: true, + }, + { + name: "Commit", + function: (*LevelDBTransaction).Commit, + shouldReturnError: true, + }, + { + name: "RollbackUnlessClosed", + function: (*LevelDBTransaction).RollbackUnlessClosed, + shouldReturnError: false, + }, + } + + for _, test := range tests { + func() { + ldb, teardownFunc := prepareDatabaseForTest(t, "TestTransactionCloseErrors") + defer teardownFunc() + + // Begin a new transaction to test Commit + commitTx, err := ldb.Begin() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Begin "+ + "unexpectedly failed: %s", err) + } + defer func() { + err := commitTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: RollbackUnlessClosed "+ + "unexpectedly failed: %s", err) + } + }() + + // Commit the Commit test transaction + err = commitTx.Commit() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Commit "+ + "unexpectedly failed: %s", err) + } + + // Begin a new transaction to test Rollback + rollbackTx, err := ldb.Begin() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Begin "+ + "unexpectedly failed: %s", err) + } + defer func() { + err := rollbackTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: RollbackUnlessClosed "+ + "unexpectedly failed: %s", err) + } + }() + + // Rollback the Rollback test transaction + err = rollbackTx.Rollback() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Rollback "+ + "unexpectedly failed: %s", err) + } + + expectedErrContainsString := "closed transaction" + + // Make sure that the test function returns a "closed transaction" error + // for both the commitTx and the rollbackTx + for _, closedTx := range []*LevelDBTransaction{commitTx, rollbackTx} { + err = test.function(closedTx) + if test.shouldReturnError { + if err == nil { + t.Fatalf("TestTransactionCloseErrors: %s "+ + "unexpectedly succeeded", test.name) + } + if !strings.Contains(err.Error(), expectedErrContainsString) { + t.Fatalf("TestTransactionCloseErrors: %s "+ + "returned wrong error. Want: %s, got: %s", + test.name, expectedErrContainsString, err) + } + } else { + if err != nil { + t.Fatalf("TestTransactionCloseErrors: %s "+ + "unexpectedly failed: %s", test.name, err) + } + } + } + }() + } +} diff --git a/database/ffldb/transaction.go b/database/ffldb/transaction.go index d30ab7b2f..c7652c0ca 100644 --- a/database/ffldb/transaction.go +++ b/database/ffldb/transaction.go @@ -4,6 +4,7 @@ import ( "github.com/kaspanet/kaspad/database" "github.com/kaspanet/kaspad/database/ffldb/ff" "github.com/kaspanet/kaspad/database/ffldb/ldb" + "github.com/pkg/errors" ) // transaction is an ffldb transaction. @@ -13,14 +14,19 @@ import ( // NO guarantee that if one puts data into the transaction then // it will be available to get within the same transaction. type transaction struct { - ldbTx *ldb.LevelDBTransaction - ffdb *ff.FlatFileDB + ldbTx *ldb.LevelDBTransaction + ffdb *ff.FlatFileDB + isClosed bool } // Put sets the value for the given key. It overwrites // any previous value for that key. // This method is part of the DataAccessor interface. func (tx *transaction) Put(key *database.Key, value []byte) error { + if tx.isClosed { + return errors.New("cannot put into a closed transaction") + } + return tx.ldbTx.Put(key, value) } @@ -28,6 +34,10 @@ func (tx *transaction) Put(key *database.Key, value []byte) error { // ErrNotFound if the given key does not exist. // This method is part of the DataAccessor interface. func (tx *transaction) Get(key *database.Key) ([]byte, error) { + if tx.isClosed { + return nil, errors.New("cannot get from a closed transaction") + } + return tx.ldbTx.Get(key) } @@ -35,6 +45,10 @@ func (tx *transaction) Get(key *database.Key) ([]byte, error) { // given key. // This method is part of the DataAccessor interface. func (tx *transaction) Has(key *database.Key) (bool, error) { + if tx.isClosed { + return false, errors.New("cannot has from a closed transaction") + } + return tx.ldbTx.Has(key) } @@ -42,6 +56,10 @@ func (tx *transaction) Has(key *database.Key) (bool, error) { // return an error if the key doesn't exist. // This method is part of the DataAccessor interface. func (tx *transaction) Delete(key *database.Key) error { + if tx.isClosed { + return errors.New("cannot delete from a closed transaction") + } + return tx.ldbTx.Delete(key) } @@ -52,6 +70,10 @@ func (tx *transaction) Delete(key *database.Key) error { // that has just now been inserted. // This method is part of the DataAccessor interface. func (tx *transaction) AppendToStore(storeName string, data []byte) ([]byte, error) { + if tx.isClosed { + return nil, errors.New("cannot append to store on a closed transaction") + } + return appendToStore(tx, tx.ffdb, storeName, data) } @@ -61,12 +83,20 @@ func (tx *transaction) AppendToStore(storeName string, data []byte) ([]byte, err // AppendToStore for further details. // This method is part of the DataAccessor interface. func (tx *transaction) RetrieveFromStore(storeName string, location []byte) ([]byte, error) { + if tx.isClosed { + return nil, errors.New("cannot retrieve from store on a closed transaction") + } + return tx.ffdb.Read(storeName, location) } // Cursor begins a new cursor over the given bucket. // This method is part of the DataAccessor interface. func (tx *transaction) Cursor(bucket *database.Bucket) (database.Cursor, error) { + if tx.isClosed { + return nil, errors.New("cannot open a cursor from a closed transaction") + } + return tx.ldbTx.Cursor(bucket) } @@ -74,6 +104,11 @@ func (tx *transaction) Cursor(bucket *database.Bucket) (database.Cursor, error) // database within this transaction. // This method is part of the Transaction interface. func (tx *transaction) Rollback() error { + if tx.isClosed { + return errors.New("cannot rollback a closed transaction") + } + tx.isClosed = true + return tx.ldbTx.Rollback() } @@ -81,6 +116,11 @@ func (tx *transaction) Rollback() error { // within this transaction. // This method is part of the Transaction interface. func (tx *transaction) Commit() error { + if tx.isClosed { + return errors.New("cannot commit a closed transaction") + } + tx.isClosed = true + return tx.ldbTx.Commit() } @@ -88,5 +128,10 @@ func (tx *transaction) Commit() error { // the database within the transaction, unless the transaction // had already been closed using either Rollback or Commit. func (tx *transaction) RollbackUnlessClosed() error { + if tx.isClosed { + return nil + } + tx.isClosed = true + return tx.ldbTx.RollbackUnlessClosed() } diff --git a/database/ffldb/transaction_test.go b/database/ffldb/transaction_test.go new file mode 100644 index 000000000..c76844858 --- /dev/null +++ b/database/ffldb/transaction_test.go @@ -0,0 +1,500 @@ +package ffldb + +import ( + "bytes" + "github.com/kaspanet/kaspad/database" + "strings" + "testing" +) + +func TestTransactionCommitForLevelDBMethods(t *testing.T) { + db, teardownFunc := prepareDatabaseForTest(t, "TestTransactionCommitForLevelDBMethods") + defer teardownFunc() + + // Put a value into the database + key1 := database.MakeBucket().Key([]byte("key1")) + value1 := []byte("value1") + err := db.Put(key1, value1) + if err != nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Put "+ + "unexpectedly failed: %s", err) + } + + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Begin "+ + "unexpectedly failed: %s", err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: RollbackUnlessClosed "+ + "unexpectedly failed: %s", err) + } + }() + + // Make sure that Has returns that the original value exists + exists, err := dbTx.Has(key1) + if err != nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Has "+ + "unexpectedly failed: %s", err) + } + if !exists { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Has " + + "unexpectedly returned that the value does not exist") + } + + // Get the existing value and make sure it's equal to the original + existingValue, err := dbTx.Get(key1) + if err != nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+ + "unexpectedly failed: %s", err) + } + if !bytes.Equal(existingValue, value1) { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+ + "returned unexpected value. Want: %s, got: %s", + string(value1), string(existingValue)) + } + + // Delete the existing value + err = dbTx.Delete(key1) + if err != nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Delete "+ + "unexpectedly failed: %s", err) + } + + // Try to get a value that does not exist and make sure it returns ErrNotFound + _, err = dbTx.Get(database.MakeBucket().Key([]byte("doesn't exist"))) + if err == nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Get " + + "unexpectedly succeeded") + } + if !database.IsNotFoundError(err) { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+ + "returned unexpected error: %s", err) + } + + // Put a new value + key2 := database.MakeBucket().Key([]byte("key2")) + value2 := []byte("value2") + err = dbTx.Put(key2, value2) + if err != nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Put "+ + "unexpectedly failed: %s", err) + } + + // Commit the transaction + err = dbTx.Commit() + if err != nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Commit "+ + "unexpectedly failed: %s", err) + } + + // Make sure that Has returns that the original value does NOT exist + exists, err = db.Has(key1) + if err != nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Has "+ + "unexpectedly failed: %s", err) + } + if exists { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Has " + + "unexpectedly returned that the value exists") + } + + // Try to Get the existing value and make sure an ErrNotFound is returned + _, err = db.Get(key1) + if err == nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Get " + + "unexpectedly succeeded") + } + if !database.IsNotFoundError(err) { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+ + "returned unexpected err: %s", err) + } + + // Make sure that Has returns that the new value exists + exists, err = db.Has(key2) + if err != nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Has "+ + "unexpectedly failed: %s", err) + } + if !exists { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Has " + + "unexpectedly returned that the value does not exist") + } + + // Get the new value and make sure it's equal to the original + existingValue, err = db.Get(key2) + if err != nil { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+ + "unexpectedly failed: %s", err) + } + if !bytes.Equal(existingValue, value2) { + t.Fatalf("TestTransactionCommitForLevelDBMethods: Get "+ + "returned unexpected value. Want: %s, got: %s", + string(value2), string(existingValue)) + } +} + +func TestTransactionRollbackForLevelDBMethods(t *testing.T) { + db, teardownFunc := prepareDatabaseForTest(t, "TestTransactionRollbackForLevelDBMethods") + defer teardownFunc() + + // Put a value into the database + key1 := database.MakeBucket().Key([]byte("key1")) + value1 := []byte("value1") + err := db.Put(key1, value1) + if err != nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Put "+ + "unexpectedly failed: %s", err) + } + + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Begin "+ + "unexpectedly failed: %s", err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: RollbackUnlessClosed "+ + "unexpectedly failed: %s", err) + } + }() + + // Make sure that Has returns that the original value exists + exists, err := dbTx.Has(key1) + if err != nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has "+ + "unexpectedly failed: %s", err) + } + if !exists { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has " + + "unexpectedly returned that the value does not exist") + } + + // Get the existing value and make sure it's equal to the original + existingValue, err := dbTx.Get(key1) + if err != nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get "+ + "unexpectedly failed: %s", err) + } + if !bytes.Equal(existingValue, value1) { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get "+ + "returned unexpected value. Want: %s, got: %s", + string(value1), string(existingValue)) + } + + // Delete the existing value + err = dbTx.Delete(key1) + if err != nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Delete "+ + "unexpectedly failed: %s", err) + } + + // Put a new value + key2 := database.MakeBucket().Key([]byte("key2")) + value2 := []byte("value2") + err = dbTx.Put(key2, value2) + if err != nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Put "+ + "unexpectedly failed: %s", err) + } + + // Rollback the transaction + err = dbTx.Rollback() + if err != nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Rollback "+ + "unexpectedly failed: %s", err) + } + + // Make sure that Has returns that the original value still exists + exists, err = db.Has(key1) + if err != nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has "+ + "unexpectedly failed: %s", err) + } + if !exists { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has " + + "unexpectedly returned that the value does not exist") + } + + // Get the existing value and make sure it is still returned + existingValue, err = db.Get(key1) + if err != nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get "+ + "unexpectedly failed: %s", err) + } + if !bytes.Equal(existingValue, value1) { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get "+ + "returned unexpected value. Want: %s, got: %s", + string(value1), string(existingValue)) + } + + // Make sure that Has returns that the new value does NOT exist + exists, err = db.Has(key2) + if err != nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has "+ + "unexpectedly failed: %s", err) + } + if exists { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Has " + + "unexpectedly returned that the value exists") + } + + // Try to Get the new value and make sure it returns an ErrNotFound + _, err = db.Get(key2) + if err == nil { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get " + + "unexpectedly succeeded") + } + if !database.IsNotFoundError(err) { + t.Fatalf("TestTransactionRollbackForLevelDBMethods: Get "+ + "returned unexpected error: %s", err) + } +} + +func TestTransactionCloseErrors(t *testing.T) { + tests := []struct { + name string + function func(dbTx database.Transaction) error + shouldReturnError bool + }{ + { + name: "Put", + function: func(dbTx database.Transaction) error { + return dbTx.Put(database.MakeBucket().Key([]byte("key")), []byte("value")) + }, + shouldReturnError: true, + }, + { + name: "Get", + function: func(dbTx database.Transaction) error { + _, err := dbTx.Get(database.MakeBucket().Key([]byte("key"))) + return err + }, + shouldReturnError: true, + }, + { + name: "Has", + function: func(dbTx database.Transaction) error { + _, err := dbTx.Has(database.MakeBucket().Key([]byte("key"))) + return err + }, + shouldReturnError: true, + }, + { + name: "Delete", + function: func(dbTx database.Transaction) error { + return dbTx.Delete(database.MakeBucket().Key([]byte("key"))) + }, + shouldReturnError: true, + }, + { + name: "Cursor", + function: func(dbTx database.Transaction) error { + _, err := dbTx.Cursor(database.MakeBucket([]byte("bucket"))) + return err + }, + shouldReturnError: true, + }, + { + name: "AppendToStore", + function: func(dbTx database.Transaction) error { + _, err := dbTx.AppendToStore("store", []byte("data")) + return err + }, + shouldReturnError: true, + }, + { + name: "RetrieveFromStore", + function: func(dbTx database.Transaction) error { + _, err := dbTx.RetrieveFromStore("store", []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) + return err + }, + shouldReturnError: true, + }, + { + name: "Rollback", + function: func(dbTx database.Transaction) error { + return dbTx.Rollback() + }, + shouldReturnError: true, + }, + { + name: "Commit", + function: func(dbTx database.Transaction) error { + return dbTx.Commit() + }, + shouldReturnError: true, + }, + { + name: "RollbackUnlessClosed", + function: func(dbTx database.Transaction) error { + return dbTx.RollbackUnlessClosed() + }, + shouldReturnError: false, + }, + } + + for _, test := range tests { + func() { + db, teardownFunc := prepareDatabaseForTest(t, "TestTransactionCloseErrors") + defer teardownFunc() + + // Begin a new transaction to test Commit + commitTx, err := db.Begin() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Begin "+ + "unexpectedly failed: %s", err) + } + defer func() { + err := commitTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: RollbackUnlessClosed "+ + "unexpectedly failed: %s", err) + } + }() + + // Commit the Commit test transaction + err = commitTx.Commit() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Commit "+ + "unexpectedly failed: %s", err) + } + + // Begin a new transaction to test Rollback + rollbackTx, err := db.Begin() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Begin "+ + "unexpectedly failed: %s", err) + } + defer func() { + err := rollbackTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: RollbackUnlessClosed "+ + "unexpectedly failed: %s", err) + } + }() + + // Rollback the Rollback test transaction + err = rollbackTx.Rollback() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Rollback "+ + "unexpectedly failed: %s", err) + } + + expectedErrContainsString := "closed transaction" + + // Make sure that the test function returns a "closed transaction" error + // for both the commitTx and the rollbackTx + for _, closedTx := range []database.Transaction{commitTx, rollbackTx} { + err = test.function(closedTx) + if test.shouldReturnError { + if err == nil { + t.Fatalf("TestTransactionCloseErrors: %s "+ + "unexpectedly succeeded", test.name) + } + if !strings.Contains(err.Error(), expectedErrContainsString) { + t.Fatalf("TestTransactionCloseErrors: %s "+ + "returned wrong error. Want: %s, got: %s", + test.name, expectedErrContainsString, err) + } + } else { + if err != nil { + t.Fatalf("TestTransactionCloseErrors: %s "+ + "unexpectedly failed: %s", test.name, err) + } + } + } + }() + } +} + +func TestTransactionRollbackUnlessClosed(t *testing.T) { + db, teardownFunc := prepareDatabaseForTest(t, "TestTransactionRollbackUnlessClosed") + defer teardownFunc() + + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("TestTransactionRollbackUnlessClosed: Begin "+ + "unexpectedly failed: %s", err) + } + + // Roll it back + err = dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("TestTransactionRollbackUnlessClosed: RollbackUnlessClosed "+ + "unexpectedly failed: %s", err) + } +} + +func TestTransactionCommitForFlatFileMethods(t *testing.T) { + db, teardownFunc := prepareDatabaseForTest(t, "TestTransactionCommitForFlatFileMethods") + defer teardownFunc() + + // Put a value into the database + store := "store" + value1 := []byte("value1") + location1, err := db.AppendToStore(store, value1) + if err != nil { + t.Fatalf("TestTransactionCommitForFlatFileMethods: AppendToStore "+ + "unexpectedly failed: %s", err) + } + + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("TestTransactionCommitForFlatFileMethods: Begin "+ + "unexpectedly failed: %s", err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("TestTransactionCommitForFlatFileMethods: RollbackUnlessClosed "+ + "unexpectedly failed: %s", err) + } + }() + + // Retrieve the existing value and make sure it's equal to the original + existingValue, err := dbTx.RetrieveFromStore(store, location1) + if err != nil { + t.Fatalf("TestTransactionCommitForFlatFileMethods: RetrieveFromStore "+ + "unexpectedly failed: %s", err) + } + if !bytes.Equal(existingValue, value1) { + t.Fatalf("TestTransactionCommitForFlatFileMethods: RetrieveFromStore "+ + "returned unexpected value. Want: %s, got: %s", + string(value1), string(existingValue)) + } + + // Put a new value + value2 := []byte("value2") + location2, err := dbTx.AppendToStore(store, value2) + if err != nil { + t.Fatalf("TestTransactionCommitForFlatFileMethods: AppendToStore "+ + "unexpectedly failed: %s", err) + } + + // Commit the transaction + err = dbTx.Commit() + if err != nil { + t.Fatalf("TestTransactionCommitForFlatFileMethods: Commit "+ + "unexpectedly failed: %s", err) + } + + // Retrieve the new value and make sure it's equal to the original + newValue, err := db.RetrieveFromStore(store, location2) + if err != nil { + t.Fatalf("TestTransactionCommitForFlatFileMethods: RetrieveFromStore "+ + "unexpectedly failed: %s", err) + } + if !bytes.Equal(newValue, value2) { + t.Fatalf("TestTransactionCommitForFlatFileMethods: RetrieveFromStore "+ + "returned unexpected value. Want: %s, got: %s", + string(value2), string(newValue)) + } +} From 5b74e51db1358c709e5df2d5893e90f4a9aeefe4 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Sun, 3 May 2020 14:56:47 +0300 Subject: [PATCH 17/77] [NOD-956] Increase K to 15. (#710) --- dagconfig/params.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dagconfig/params.go b/dagconfig/params.go index 46a8c7fe8..bdcbebf70 100644 --- a/dagconfig/params.go +++ b/dagconfig/params.go @@ -48,7 +48,7 @@ var ( ) const ( - ghostdagK = 10 + ghostdagK = 15 difficultyAdjustmentWindowSize = 2640 timestampDeviationTolerance = 132 finalityDuration = 24 * time.Hour From 73ad0adf72e67cc6d0bb8a1e2910e901b51dae21 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Mon, 4 May 2020 09:09:23 +0300 Subject: [PATCH 18/77] [NOD-913] Use sync rate in getBlockTemplate (#705) * [NOD-913] Use sync rate in getBlockTemplate * [NOD-913] Rename addBlockProcessTime->addBlockProcessTimestamp, maxDiff->maxTipAge * [NOD-913] Pass maxDeviation as an argument * [NOD-913] Change maxDeviation to +5% * [NOD-913] Rename variables * [NOD-913] Rename variables and functions and change comments * [NOD-913] Split addBlockProcessingTimestamp --- blockdag/dag.go | 4 ++ blockdag/process.go | 2 + blockdag/sync_rate.go | 57 +++++++++++++++++++++++++ server/p2p/p2p.go | 26 ----------- server/rpc/handle_get_block_template.go | 22 ++++++++-- server/rpc/rpcserver.go | 23 +++++----- 6 files changed, 92 insertions(+), 42 deletions(-) create mode 100644 blockdag/sync_rate.go diff --git a/blockdag/dag.go b/blockdag/dag.go index ee117f3b7..ad1bb4182 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -153,6 +153,9 @@ type BlockDAG struct { utxoDiffStore *utxoDiffStore reachabilityStore *reachabilityStore multisetStore *multisetStore + + recentBlockProcessingTimestamps []time.Time + startTime time.Time } // IsKnownBlock returns whether or not the DAG instance has the block represented @@ -2092,6 +2095,7 @@ func New(config *Config) (*BlockDAG, error) { deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments), blockCount: 0, subnetworkID: config.SubnetworkID, + startTime: time.Now(), } dag.virtual = newVirtualBlock(dag, nil) diff --git a/blockdag/process.go b/blockdag/process.go index 8af27846b..437b6f6c6 100644 --- a/blockdag/process.go +++ b/blockdag/process.go @@ -253,6 +253,8 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags) } } + dag.addBlockProcessingTimestamp() + log.Debugf("Accepted block %s", blockHash) return false, false, nil diff --git a/blockdag/sync_rate.go b/blockdag/sync_rate.go new file mode 100644 index 000000000..0ab955621 --- /dev/null +++ b/blockdag/sync_rate.go @@ -0,0 +1,57 @@ +package blockdag + +import "time" + +const syncRateWindowDuration = 15 * time.Minute + +// addBlockProcessingTimestamp adds the last block processing timestamp in order to measure the recent sync rate. +// +// This function MUST be called with the DAG state lock held (for writes). +func (dag *BlockDAG) addBlockProcessingTimestamp() { + now := time.Now() + dag.recentBlockProcessingTimestamps = append(dag.recentBlockProcessingTimestamps, now) + dag.removeNonRecentTimestampsFromRecentBlockProcessingTimestamps() +} + +// removeNonRecentTimestampsFromRecentBlockProcessingTimestamps removes timestamps older than syncRateWindowDuration +// from dag.recentBlockProcessingTimestamps +// +// This function MUST be called with the DAG state lock held (for writes). +func (dag *BlockDAG) removeNonRecentTimestampsFromRecentBlockProcessingTimestamps() { + dag.recentBlockProcessingTimestamps = dag.recentBlockProcessingTimestampsRelevantWindow() +} + +func (dag *BlockDAG) recentBlockProcessingTimestampsRelevantWindow() []time.Time { + minTime := time.Now().Add(-syncRateWindowDuration) + windowStartIndex := len(dag.recentBlockProcessingTimestamps) + for i, processTime := range dag.recentBlockProcessingTimestamps { + if processTime.After(minTime) { + windowStartIndex = i + break + } + } + return dag.recentBlockProcessingTimestamps[windowStartIndex:] +} + +// syncRate returns the rate of processed +// blocks in the last syncRateWindowDuration +// duration. +func (dag *BlockDAG) syncRate() float64 { + dag.RLock() + defer dag.RUnlock() + return float64(len(dag.recentBlockProcessingTimestampsRelevantWindow())) / syncRateWindowDuration.Seconds() +} + +// IsSyncRateBelowThreshold checks whether the sync rate +// is below the expected threshold. +func (dag *BlockDAG) IsSyncRateBelowThreshold(maxDeviation float64) bool { + if dag.uptime() < syncRateWindowDuration { + return false + } + + return dag.syncRate() < 1/dag.dagParams.TargetTimePerBlock.Seconds()*maxDeviation +} + +func (dag *BlockDAG) uptime() time.Duration { + return time.Now().Sub(dag.startTime) +} diff --git a/server/p2p/p2p.go b/server/p2p/p2p.go index 05832eb64..8a41757b8 100644 --- a/server/p2p/p2p.go +++ b/server/p2p/p2p.go @@ -780,10 +780,6 @@ type getConnCountMsg struct { reply chan int32 } -type getShouldMineOnGenesisMsg struct { - reply chan bool -} - //GetPeersMsg is the message type which is used by the rpc server to get the peers list from the p2p server type GetPeersMsg struct { Reply chan []*Peer @@ -832,17 +828,6 @@ func (s *Server) handleQuery(state *peerState, querymsg interface{}) { }) msg.reply <- nconnected - case getShouldMineOnGenesisMsg: - shouldMineOnGenesis := true - if state.Count() != 0 { - shouldMineOnGenesis = state.forAllPeers(func(sp *Peer) bool { - return sp.SelectedTipHash().IsEqual(s.DAGParams.GenesisHash) - }) - } else { - shouldMineOnGenesis = false - } - msg.reply <- shouldMineOnGenesis - case GetPeersMsg: peers := make([]*Peer, 0, state.Count()) state.forAllPeers(func(sp *Peer) bool { @@ -1241,17 +1226,6 @@ func (s *Server) ConnectedCount() int32 { return <-replyChan } -// ShouldMineOnGenesis checks if the node is connected to at least one -// peer, and at least one of its peers knows of any blocks that were mined -// on top of the genesis block. -func (s *Server) ShouldMineOnGenesis() bool { - replyChan := make(chan bool) - - s.Query <- getShouldMineOnGenesisMsg{reply: replyChan} - - return <-replyChan -} - // OutboundGroupCount returns the number of peers connected to the given // outbound group key. func (s *Server) OutboundGroupCount(key string) int { diff --git a/server/rpc/handle_get_block_template.go b/server/rpc/handle_get_block_template.go index 7248f0fcb..7e9a7da3a 100644 --- a/server/rpc/handle_get_block_template.go +++ b/server/rpc/handle_get_block_template.go @@ -109,9 +109,7 @@ func handleGetBlockTemplate(s *Server, cmd interface{}, closeChan <-chan struct{ // the DAG is synced. Note that we make a special check for when // we have nothing besides the genesis block (blueScore == 0), // because in that state IsCurrent may still return true. - currentBlueScore := s.cfg.DAG.SelectedTipBlueScore() - if (currentBlueScore != 0 && !s.cfg.DAG.IsCurrent()) || - (currentBlueScore == 0 && !s.cfg.shouldMineOnGenesis()) { + if !isSyncedForMining(s) { return nil, &rpcmodel.RPCError{ Code: rpcmodel.ErrRPCClientInInitialDownload, Message: "Kaspa is downloading blocks...", @@ -131,6 +129,24 @@ func handleGetBlockTemplate(s *Server, cmd interface{}, closeChan <-chan struct{ } } +// isSyncedForMining checks if the node is synced enough for mining blocks +// on top of its world view. +// To do that, first it checks if the selected tip timestamp is not older than maxTipAge. If that's the case, it means +// the node is synced since blocks' timestamps are not allowed to deviate too much into the future. +// If that's not the case it checks the rate it added new blocks to the DAG recently. If it's faster than +// blockRate * maxSyncRateDeviation it means the node is not synced, since when the node is synced it shouldn't add +// blocks to the DAG faster than the block rate. +func isSyncedForMining(s *Server) bool { + const maxTipAge = 5 * time.Minute + isCloseToCurrentTime := s.cfg.DAG.Now().Sub(s.cfg.DAG.SelectedTipHeader().Timestamp) <= maxTipAge + if isCloseToCurrentTime { + return true + } + + const maxSyncRateDeviation = 1.05 + return s.cfg.DAG.IsSyncRateBelowThreshold(maxSyncRateDeviation) +} + // handleGetBlockTemplateRequest is a helper for handleGetBlockTemplate which // deals with generating and returning block templates to the caller. It // handles both long poll requests as specified by BIP 0022 as well as regular diff --git a/server/rpc/rpcserver.go b/server/rpc/rpcserver.go index da7c14705..90cb640a1 100644 --- a/server/rpc/rpcserver.go +++ b/server/rpc/rpcserver.go @@ -783,8 +783,6 @@ type rpcserverConfig struct { // These fields define any optional indexes the RPC server can make use // of to provide additional data when queried. AcceptanceIndex *indexers.AcceptanceIndex - - shouldMineOnGenesis func() bool } // setupRPCListeners returns a slice of listeners that are configured for use @@ -853,17 +851,16 @@ func NewRPCServer( return nil, errors.New("RPCS: No valid listen address") } cfg := &rpcserverConfig{ - Listeners: rpcListeners, - StartupTime: startupTime, - ConnMgr: &rpcConnManager{p2pServer}, - SyncMgr: &rpcSyncMgr{p2pServer, p2pServer.SyncManager}, - TimeSource: p2pServer.TimeSource, - DAGParams: p2pServer.DAGParams, - TxMemPool: p2pServer.TxMemPool, - Generator: blockTemplateGenerator, - AcceptanceIndex: p2pServer.AcceptanceIndex, - DAG: p2pServer.DAG, - shouldMineOnGenesis: p2pServer.ShouldMineOnGenesis, + Listeners: rpcListeners, + StartupTime: startupTime, + ConnMgr: &rpcConnManager{p2pServer}, + SyncMgr: &rpcSyncMgr{p2pServer, p2pServer.SyncManager}, + TimeSource: p2pServer.TimeSource, + DAGParams: p2pServer.DAGParams, + TxMemPool: p2pServer.TxMemPool, + Generator: blockTemplateGenerator, + AcceptanceIndex: p2pServer.AcceptanceIndex, + DAG: p2pServer.DAG, } rpc := Server{ cfg: *cfg, From e70a615135918cea6b774f6cf753c40fbd93ff75 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Mon, 4 May 2020 13:07:40 +0300 Subject: [PATCH 19/77] [NOD-872] Defer all currently undeferred unlocks in the database package (#706) * [NOD-872] Defer unlocks in write.go. * [NOD-872] Defer unlocks in rollback.go. * [NOD-872] Defer unlocks in read.go. * [NOD-872] Fix duplicate RUnlock. * [NOD-872] Remove a redundant empty line. * [NOD-872] Extract closeCurrentWriteCursorFile to a separate method. --- database/ffldb/ff/read.go | 32 +++++++++----------------------- database/ffldb/ff/rollback.go | 11 ++--------- database/ffldb/ff/write.go | 32 +++++++++++++++++++++----------- 3 files changed, 32 insertions(+), 43 deletions(-) diff --git a/database/ffldb/ff/read.go b/database/ffldb/ff/read.go index 5c7d1b6bc..e8ce811e9 100644 --- a/database/ffldb/ff/read.go +++ b/database/ffldb/ff/read.go @@ -33,10 +33,11 @@ func (s *flatFileStore) read(location *flatFileLocation) ([]byte, error) { if err != nil { return nil, err } + flatFile.RLock() + defer flatFile.RUnlock() data := make([]byte, location.dataLength) n, err := flatFile.file.ReadAt(data, int64(location.fileOffset)) - flatFile.RUnlock() if err != nil { return nil, errors.Wrapf(err, "failed to read data in store '%s' "+ "from file %d, offset %d", s.storeName, location.fileNumber, @@ -62,43 +63,31 @@ func (s *flatFileStore) read(location *flatFileLocation) ([]byte, error) { // will also open the file when it's not already open subject to the rules // described in openFile. Also handles closing files as needed to avoid going // over the max allowed open files. -// -// NOTE: The returned flat file will already have the read lock acquired and -// the caller MUST call .RUnlock() to release it once it has finished all read -// operations. This is necessary because otherwise it would be possible for a -// separate goroutine to close the file after it is returned from here, but -// before the caller has acquired a read lock. func (s *flatFileStore) flatFile(fileNumber uint32) (*lockableFile, error) { // When the requested flat file is open for writes, return it. s.writeCursor.RLock() + defer s.writeCursor.RUnlock() if fileNumber == s.writeCursor.currentFileNumber && s.writeCursor.currentFile.file != nil { openFile := s.writeCursor.currentFile - openFile.RLock() - s.writeCursor.RUnlock() return openFile, nil } - s.writeCursor.RUnlock() // Try to return an open file under the overall files read lock. s.openFilesMutex.RLock() + defer s.openFilesMutex.RUnlock() if openFile, ok := s.openFiles[fileNumber]; ok { s.lruMutex.Lock() - s.openFilesLRU.MoveToFront(s.fileNumberToLRUElement[fileNumber]) - s.lruMutex.Unlock() + defer s.lruMutex.Unlock() + + s.openFilesLRU.MoveToFront(s.fileNumberToLRUElement[fileNumber]) - openFile.RLock() - s.openFilesMutex.RUnlock() return openFile, nil } - s.openFilesMutex.RUnlock() // Since the file isn't open already, need to check the open files map // again under write lock in case multiple readers got here and a // separate one is already opening the file. - s.openFilesMutex.Lock() if openFlatFile, ok := s.openFiles[fileNumber]; ok { - openFlatFile.RLock() - s.openFilesMutex.Unlock() return openFlatFile, nil } @@ -106,11 +95,8 @@ func (s *flatFileStore) flatFile(fileNumber uint32) (*lockableFile, error) { // recently used one as needed. openFile, err := s.openFile(fileNumber) if err != nil { - s.openFilesMutex.Unlock() return nil, err } - openFile.RLock() - s.openFilesMutex.Unlock() return openFile, nil } @@ -142,6 +128,7 @@ func (s *flatFileStore) openFile(fileNumber uint32) (*lockableFile, error) { // recently used list to indicate it is the most recently used file and // therefore should be closed last. s.lruMutex.Lock() + defer s.lruMutex.Unlock() lruList := s.openFilesLRU if lruList.Len() >= maxOpenFiles { lruFileNumber := lruList.Remove(lruList.Back()).(uint32) @@ -151,14 +138,13 @@ func (s *flatFileStore) openFile(fileNumber uint32) (*lockableFile, error) { // any readers are currently reading from it so it's not closed // out from under them. oldFile.Lock() + defer oldFile.Unlock() _ = oldFile.file.Close() - oldFile.Unlock() delete(s.openFiles, lruFileNumber) delete(s.fileNumberToLRUElement, lruFileNumber) } s.fileNumberToLRUElement[fileNumber] = lruList.PushFront(fileNumber) - s.lruMutex.Unlock() // Store a reference to it in the open files map. s.openFiles[fileNumber] = flatFile diff --git a/database/ffldb/ff/rollback.go b/database/ffldb/ff/rollback.go index e11675f49..2da000ecb 100644 --- a/database/ffldb/ff/rollback.go +++ b/database/ffldb/ff/rollback.go @@ -64,12 +64,7 @@ func (s *flatFileStore) rollback(targetLocation *flatFileLocation) error { // Close the current write file if it needs to be deleted. if s.writeCursor.currentFileNumber > targetFileNumber { - s.writeCursor.currentFile.Lock() - if s.writeCursor.currentFile.file != nil { - s.writeCursor.currentFile.file.Close() - s.writeCursor.currentFile.file = nil - } - s.writeCursor.currentFile.Unlock() + s.closeCurrentWriteCursorFile() } // Delete all files that are newer than the provided rollback file @@ -90,10 +85,10 @@ func (s *flatFileStore) rollback(targetLocation *flatFileLocation) error { // Open the file for the current write cursor if needed. s.writeCursor.currentFile.Lock() + defer s.writeCursor.currentFile.Unlock() if s.writeCursor.currentFile.file == nil { openFile, err := s.openWriteFile(s.writeCursor.currentFileNumber) if err != nil { - s.writeCursor.currentFile.Unlock() return err } s.writeCursor.currentFile.file = openFile @@ -102,14 +97,12 @@ func (s *flatFileStore) rollback(targetLocation *flatFileLocation) error { // Truncate the file to the provided target offset. err := s.writeCursor.currentFile.file.Truncate(int64(targetFileOffset)) if err != nil { - s.writeCursor.currentFile.Unlock() return errors.Wrapf(err, "ROLLBACK: Failed to truncate file %d "+ "in store '%s'", s.writeCursor.currentFileNumber, s.storeName) } // Sync the file to disk. err = s.writeCursor.currentFile.file.Sync() - s.writeCursor.currentFile.Unlock() if err != nil { return errors.Wrapf(err, "ROLLBACK: Failed to sync file %d in "+ "store '%s'", s.writeCursor.currentFileNumber, s.storeName) diff --git a/database/ffldb/ff/write.go b/database/ffldb/ff/write.go index 897fa2da2..362fbc46e 100644 --- a/database/ffldb/ff/write.go +++ b/database/ffldb/ff/write.go @@ -47,18 +47,16 @@ func (s *flatFileStore) write(data []byte) (*flatFileLocation, error) { // with LRU tracking. The close is done under the write lock // for the file to prevent it from being closed out from under // any readers currently reading from it. - cursor.Lock() - cursor.currentFile.Lock() - if cursor.currentFile.file != nil { - _ = cursor.currentFile.file.Close() - cursor.currentFile.file = nil - } - cursor.currentFile.Unlock() + func() { + cursor.Lock() + defer cursor.Unlock() - // Start writes into next file. - cursor.currentFileNumber++ - cursor.currentOffset = 0 - cursor.Unlock() + s.closeCurrentWriteCursorFile() + + // Start writes into next file. + cursor.currentFileNumber++ + cursor.currentOffset = 0 + }() } // All writes are done under the write lock for the file to ensure any @@ -164,3 +162,15 @@ func (s *flatFileStore) writeData(data []byte, fieldName string) error { return nil } + +// closeCurrentWriteCursorFile closes the currently open writeCursor file if +// it's open. +// This method MUST be called with the writeCursor lock held for writes. +func (s *flatFileStore) closeCurrentWriteCursorFile() { + s.writeCursor.currentFile.Lock() + defer s.writeCursor.currentFile.Unlock() + if s.writeCursor.currentFile.file != nil { + _ = s.writeCursor.currentFile.file.Close() + s.writeCursor.currentFile.file = nil + } +} From f8e851a6ed75b3cf76516e9d864b6178ff12807a Mon Sep 17 00:00:00 2001 From: Svarog Date: Mon, 4 May 2020 16:33:23 +0300 Subject: [PATCH 20/77] [NOD-968] Wrap all ldb errors with pkg/errors (#712) --- database/ffldb/ldb/cursor.go | 16 ++++++---------- database/ffldb/ldb/transaction.go | 5 +++-- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/database/ffldb/ldb/cursor.go b/database/ffldb/ldb/cursor.go index a9d1eb3d2..ebc020bba 100644 --- a/database/ffldb/ldb/cursor.go +++ b/database/ffldb/ldb/cursor.go @@ -2,6 +2,7 @@ package ldb import ( "bytes" + "github.com/kaspanet/kaspad/database" "github.com/pkg/errors" "github.com/syndtr/goleveldb/leveldb/iterator" @@ -52,20 +53,15 @@ func (c *LevelDBCursor) Seek(key *database.Key) error { return errors.New("cannot seek a closed cursor") } - notFoundErr := errors.Wrapf(database.ErrNotFound, "key %s not "+ - "found", key) found := c.ldbIterator.Seek(key.Bytes()) if !found { - return notFoundErr + return errors.Wrapf(database.ErrNotFound, "key %s not found", key) } // Use c.ldbIterator.Key because c.Key removes the prefix from the key currentKey := c.ldbIterator.Key() - if currentKey == nil { - return notFoundErr - } - if !bytes.Equal(currentKey, key.Bytes()) { - return notFoundErr + if currentKey == nil || !bytes.Equal(currentKey, key.Bytes()) { + return errors.Wrapf(database.ErrNotFound, "key %s not found", key) } return nil @@ -82,7 +78,7 @@ func (c *LevelDBCursor) Key() (*database.Key, error) { fullKeyPath := c.ldbIterator.Key() if fullKeyPath == nil { return nil, errors.Wrapf(database.ErrNotFound, "cannot get the "+ - "key of a done cursor") + "key of an exhausted cursor") } suffix := bytes.TrimPrefix(fullKeyPath, c.bucket.Path()) return c.bucket.Key(suffix), nil @@ -98,7 +94,7 @@ func (c *LevelDBCursor) Value() ([]byte, error) { value := c.ldbIterator.Value() if value == nil { return nil, errors.Wrapf(database.ErrNotFound, "cannot get the "+ - "value of a done cursor") + "value of an exhausted cursor") } return value, nil } diff --git a/database/ffldb/ldb/transaction.go b/database/ffldb/ldb/transaction.go index 41c3866da..4c3e974b0 100644 --- a/database/ffldb/ldb/transaction.go +++ b/database/ffldb/ldb/transaction.go @@ -53,7 +53,7 @@ func (tx *LevelDBTransaction) Commit() error { tx.isClosed = true tx.snapshot.Release() - return tx.db.ldb.Write(tx.batch, nil) + return errors.WithStack(tx.db.ldb.Write(tx.batch, nil)) } // Rollback rolls back whatever changes were made to the @@ -115,7 +115,8 @@ func (tx *LevelDBTransaction) Has(key *database.Key) (bool, error) { return false, errors.New("cannot has from a closed transaction") } - return tx.snapshot.Has(key.Bytes(), nil) + res, err := tx.snapshot.Has(key.Bytes(), nil) + return res, errors.WithStack(err) } // Delete deletes the value for the given key. Will not From 3d04e6bdedf08ac6302931ec34715a199cf29dc3 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Tue, 5 May 2020 17:26:54 +0300 Subject: [PATCH 21/77] [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult (#708) * [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult * [NOD-943] Remove intermediate variables * [NOD-943] Add block hash to error message * [NOD-943] Change comment --- blockdag/dag.go | 15 +++++++++++++++ rpcmodel/rpc_results.go | 5 +++-- server/rpc/common.go | 10 ++++++++-- server/rpc/rpcserverhelp.go | 1 + 4 files changed, 27 insertions(+), 4 deletions(-) diff --git a/blockdag/dag.go b/blockdag/dag.go index ad1bb4182..c30858152 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -1489,6 +1489,21 @@ func (dag *BlockDAG) BlueScoreByBlockHash(hash *daghash.Hash) (uint64, error) { return node.blueScore, nil } +// BluesByBlockHash returns the blues of the block for the given hash. +func (dag *BlockDAG) BluesByBlockHash(hash *daghash.Hash) ([]*daghash.Hash, error) { + node := dag.index.LookupNode(hash) + if node == nil { + return nil, errors.Errorf("block %s is unknown", hash) + } + + hashes := make([]*daghash.Hash, len(node.blues)) + for i, blue := range node.blues { + hashes[i] = blue.hash + } + + return hashes, nil +} + // BlockConfirmationsByHash returns the confirmations number for a block with the // given hash. See blockConfirmations for further details. // diff --git a/rpcmodel/rpc_results.go b/rpcmodel/rpc_results.go index a791d56b6..0517d525c 100644 --- a/rpcmodel/rpc_results.go +++ b/rpcmodel/rpc_results.go @@ -46,8 +46,9 @@ type GetBlockVerboseResult struct { Bits string `json:"bits"` Difficulty float64 `json:"difficulty"` ParentHashes []string `json:"parentHashes"` - SelectedParentHash string `json:"selectedParentHash,omitempty"` - ChildHashes []string `json:"childHashes,omitempty"` + SelectedParentHash string `json:"selectedParentHash"` + ChildHashes []string `json:"childHashes"` + AcceptedBlockHashes []string `json:"acceptedBlockHashes"` } // CreateMultiSigResult models the data returned from the createmultisig diff --git a/server/rpc/common.go b/server/rpc/common.go index 8005a7e8f..92f761f6a 100644 --- a/server/rpc/common.go +++ b/server/rpc/common.go @@ -221,7 +221,6 @@ func buildGetBlockVerboseResult(s *Server, block *util.Block, isVerboseTx bool) context := "No next block" return nil, internalRPCError(err.Error(), context) } - childHashStrings := daghash.Strings(childHashes) blockConfirmations, err := s.cfg.DAG.BlockConfirmationsByHashNoLock(hash) if err != nil { @@ -245,6 +244,12 @@ func buildGetBlockVerboseResult(s *Server, block *util.Block, isVerboseTx bool) return nil, internalRPCError(err.Error(), context) } + acceptedBlockHashes, err := s.cfg.DAG.BluesByBlockHash(hash) + if err != nil { + context := fmt.Sprintf("Could not get block accepted blocks for block %s", hash) + return nil, internalRPCError(err.Error(), context) + } + result := &rpcmodel.GetBlockVerboseResult{ Hash: hash.String(), Version: blockHeader.Version, @@ -262,7 +267,8 @@ func buildGetBlockVerboseResult(s *Server, block *util.Block, isVerboseTx bool) Size: int32(block.MsgBlock().SerializeSize()), Bits: strconv.FormatInt(int64(blockHeader.Bits), 16), Difficulty: getDifficultyRatio(blockHeader.Bits, params), - ChildHashes: childHashStrings, + ChildHashes: daghash.Strings(childHashes), + AcceptedBlockHashes: daghash.Strings(acceptedBlockHashes), } if isVerboseTx { diff --git a/server/rpc/rpcserverhelp.go b/server/rpc/rpcserverhelp.go index 93d965598..00c61931c 100644 --- a/server/rpc/rpcserverhelp.go +++ b/server/rpc/rpcserverhelp.go @@ -252,6 +252,7 @@ var helpDescsEnUS = map[string]string{ "getBlockVerboseResult-parentHashes": "The hashes of the parent blocks", "getBlockVerboseResult-selectedParentHash": "The selected parent hash", "getBlockVerboseResult-childHashes": "The hashes of the child blocks (only if there are any)", + "getBlockVerboseResult-acceptedBlockHashes": "The hashes of the blocks accepted by this block", // GetBlockCountCmd help. "getBlockCount--synopsis": "Returns the number of blocks in the block DAG.", From c8a381d5bb7c7e16fd3b70ab2a62d76390180008 Mon Sep 17 00:00:00 2001 From: Svarog Date: Wed, 6 May 2020 13:05:48 +0300 Subject: [PATCH 22/77] [NOD-981] Fixed error message when both --notls and --rpccert ommited (#713) --- cmd/kaspaminer/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kaspaminer/config.go b/cmd/kaspaminer/config.go index 20293319d..20375e805 100644 --- a/cmd/kaspaminer/config.go +++ b/cmd/kaspaminer/config.go @@ -75,7 +75,7 @@ func parseConfig() (*configFlags, error) { } if cfg.RPCCert == "" && !cfg.DisableTLS { - return nil, errors.New("--notls has to be disabled if --cert is used") + return nil, errors.New("either --notls or --rpccert must be specified") } if cfg.RPCCert != "" && cfg.DisableTLS { return nil, errors.New("--rpccert should be omitted if --notls is used") From 585510d76cb8474c71cf05106aadc531ec66610e Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Tue, 12 May 2020 13:47:15 +0300 Subject: [PATCH 23/77] [NOD-847] Fix CIDR protection and prevent connecting to the same address twice (#714) * [NOD-847] Fix CIDR protection and prevent connecting to the same address twice * [NOD-847] Fix Tests * [NOD-847] Add TestDuplicateOutboundConnections and TestSameOutboundGroupConnections * [NOD-847] Fix TestRetryPermanent, TestNetworkFailure and wait 10 ms before restoring the previous active config * [NOD-847] Add "is" before boolean methods * [NOD-847] Fix Connect's lock * [NOD-847] Make numAddressesInAddressManager an argument * [NOD-847] Add teardown function for address manager * [NOD-847] Add stack trace to ConnManager errors * [NOD-847] Change emptyAddressManagerForTest->createEmptyAddressManagerForTest and fix typos * [NOD-847] Fix wrong test name for addressManagerForTest * [NOD-847] Change error message if New fails * [NOD-847] Add new line on releaseAddress * [NOD-847] Always try to reconnect on disconnect --- connmgr/connmanager.go | 198 ++++++++++++++----- connmgr/connmanager_test.go | 366 ++++++++++++++++++++++++++++++++---- server/p2p/p2p.go | 116 ++---------- wire/netaddress.go | 8 + 4 files changed, 500 insertions(+), 188 deletions(-) diff --git a/connmgr/connmanager.go b/connmgr/connmanager.go index aaa245c87..3f9bf6ee9 100644 --- a/connmgr/connmanager.go +++ b/connmgr/connmanager.go @@ -7,6 +7,9 @@ package connmgr import ( nativeerrors "errors" "fmt" + "github.com/kaspanet/kaspad/addrmgr" + "github.com/kaspanet/kaspad/config" + "github.com/kaspanet/kaspad/wire" "net" "sync" "sync/atomic" @@ -30,10 +33,6 @@ var ( // defaultRetryDuration is the default duration of time for retrying // persistent connections. defaultRetryDuration = time.Second * 5 - - // defaultTargetOutbound is the default number of outbound connections to - // maintain. - defaultTargetOutbound = uint32(8) ) var ( @@ -54,6 +53,9 @@ var ( // ErrPeerNotFound is an error that is thrown if the peer was not found. ErrPeerNotFound = errors.New("peer not found") + + //ErrAddressManagerNil is used to indicate that Address Manager cannot be nil in the configuration. + ErrAddressManagerNil = errors.New("Config: Address manager cannot be nil") ) // ConnState represents the state of the requested connection. @@ -77,7 +79,7 @@ type ConnReq struct { // The following variables must only be used atomically. id uint64 - Addr net.Addr + Addr *net.TCPAddr Permanent bool conn net.Conn @@ -159,9 +161,7 @@ type Config struct { // connection is disconnected. OnDisconnection func(*ConnReq) - // GetNewAddress is a way to get an address to make a network connection - // to. If nil, no new connections will be made automatically. - GetNewAddress func() (net.Addr, error) + AddrManager *addrmgr.AddrManager // Dial connects to the address on the named network. It cannot be nil. Dial func(net.Addr) (net.Conn, error) @@ -201,7 +201,9 @@ type ConnManager struct { start int32 stop int32 - newConnReqMtx sync.Mutex + addressMtx sync.Mutex + usedOutboundGroups map[string]int64 + usedAddresses map[string]struct{} cfg Config wg sync.WaitGroup @@ -237,9 +239,12 @@ func (cm *ConnManager) handleFailedConn(c *ConnReq, err error) { log.Debugf("Retrying further connections to %s every %s", c, d) } spawnAfter(d, func() { - cm.Connect(c) + cm.connect(c) }) - } else if cm.cfg.GetNewAddress != nil { + } else { + if c.Addr != nil { + cm.releaseAddress(c.Addr) + } cm.failedAttempts++ if cm.failedAttempts >= maxFailedAttempts { if shouldWriteLog { @@ -254,6 +259,43 @@ func (cm *ConnManager) handleFailedConn(c *ConnReq, err error) { } } +func (cm *ConnManager) releaseAddress(addr *net.TCPAddr) { + cm.addressMtx.Lock() + defer cm.addressMtx.Unlock() + + groupKey := usedOutboundGroupsKey(addr) + cm.usedOutboundGroups[groupKey]-- + if cm.usedOutboundGroups[groupKey] < 0 { + panic(fmt.Errorf("cm.usedOutboundGroups[%s] has a negative value of %d. This should never happen", groupKey, cm.usedOutboundGroups[groupKey])) + } + delete(cm.usedAddresses, usedAddressesKey(addr)) +} + +func (cm *ConnManager) markAddressAsUsed(addr *net.TCPAddr) { + cm.usedOutboundGroups[usedOutboundGroupsKey(addr)]++ + cm.usedAddresses[usedAddressesKey(addr)] = struct{}{} +} + +func (cm *ConnManager) isOutboundGroupUsed(addr *net.TCPAddr) bool { + _, ok := cm.usedOutboundGroups[usedOutboundGroupsKey(addr)] + return ok +} + +func (cm *ConnManager) isAddressUsed(addr *net.TCPAddr) bool { + _, ok := cm.usedAddresses[usedAddressesKey(addr)] + return ok +} + +func usedOutboundGroupsKey(addr *net.TCPAddr) string { + // A fake service flag is used since it doesn't affect the group key. + na := wire.NewNetAddress(addr, wire.SFNodeNetwork) + return addrmgr.GroupKey(na) +} + +func usedAddressesKey(addr *net.TCPAddr) string { + return addr.String() +} + // throttledError defines an error type whose logs get throttled. This is to // prevent flooding the logs with identical errors. type throttledError error @@ -392,21 +434,16 @@ out: continue } - // Otherwise, we will attempt a reconnection if - // we do not have enough peers, or if this is a - // persistent peer. The connection request is - // re added to the pending map, so that - // subsequent processing of connections and - // failures do not ignore the request. - if uint32(len(conns)) < cm.cfg.TargetOutbound || - connReq.Permanent { - - connReq.updateState(ConnPending) - log.Debugf("Reconnecting to %s", - connReq) - pending[msg.id] = connReq - cm.handleFailedConn(connReq, nil) - } + // Otherwise, we will attempt a reconnection. + // The connection request is re added to the + // pending map, so that subsequent processing + // of connections and failures do not ignore + // the request. + connReq.updateState(ConnPending) + log.Debugf("Reconnecting to %s", + connReq) + pending[msg.id] = connReq + cm.handleFailedConn(connReq, nil) case handleFailed: connReq := msg.c @@ -448,14 +485,9 @@ func (cm *ConnManager) NotifyConnectionRequestComplete() { // NewConnReq creates a new connection request and connects to the // corresponding address. func (cm *ConnManager) NewConnReq() { - cm.newConnReqMtx.Lock() - defer cm.newConnReqMtx.Unlock() if atomic.LoadInt32(&cm.stop) != 0 { return } - if cm.cfg.GetNewAddress == nil { - return - } c := &ConnReq{} atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1)) @@ -478,8 +510,7 @@ func (cm *ConnManager) NewConnReq() { case <-cm.quit: return } - - addr, err := cm.cfg.GetNewAddress() + err := cm.associateAddressToConnReq(c) if err != nil { select { case cm.requests <- handleFailed{c, err}: @@ -488,17 +519,52 @@ func (cm *ConnManager) NewConnReq() { return } - c.Addr = addr + cm.connect(c) +} - cm.Connect(c) +func (cm *ConnManager) associateAddressToConnReq(c *ConnReq) error { + cm.addressMtx.Lock() + defer cm.addressMtx.Unlock() + + addr, err := cm.getNewAddress() + if err != nil { + return err + } + + cm.markAddressAsUsed(addr) + c.Addr = addr + return nil } // Connect assigns an id and dials a connection to the address of the // connection request. -func (cm *ConnManager) Connect(c *ConnReq) { +func (cm *ConnManager) Connect(c *ConnReq) error { + err := func() error { + cm.addressMtx.Lock() + defer cm.addressMtx.Unlock() + + if cm.isAddressUsed(c.Addr) { + return fmt.Errorf("address %s is already in use", c.Addr) + } + cm.markAddressAsUsed(c.Addr) + return nil + }() + if err != nil { + return err + } + + cm.connect(c) + return nil +} + +// connect assigns an id and dials a connection to the address of the +// connection request. This function assumes that the connection address +// has checked and already marked as used. +func (cm *ConnManager) connect(c *ConnReq) { if atomic.LoadInt32(&cm.stop) != 0 { return } + if atomic.LoadUint64(&c.id) == 0 { atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1)) @@ -645,23 +711,69 @@ func (cm *ConnManager) Stop() { log.Trace("Connection manager stopped") } +func (cm *ConnManager) getNewAddress() (*net.TCPAddr, error) { + for tries := 0; tries < 100; tries++ { + addr := cm.cfg.AddrManager.GetAddress() + if addr == nil { + break + } + + // Check if there's already a connection to the same address. + netAddr := addr.NetAddress().TCPAddress() + if cm.isAddressUsed(netAddr) { + continue + } + + // Address will not be invalid, local or unroutable + // because addrmanager rejects those on addition. + // Just check that we don't already have an address + // in the same group so that we are not connecting + // to the same network segment at the expense of + // others. + // + // Networks that accept unroutable connections are exempt + // from this rule, since they're meant to run within a + // private subnet, like 10.0.0.0/16. + if !config.ActiveConfig().NetParams().AcceptUnroutable && cm.isOutboundGroupUsed(netAddr) { + continue + } + + // only allow recent nodes (10mins) after we failed 30 + // times + if tries < 30 && time.Since(addr.LastAttempt()) < 10*time.Minute { + continue + } + + // allow nondefault ports after 50 failed tries. + if tries < 50 && fmt.Sprintf("%d", netAddr.Port) != + config.ActiveConfig().NetParams().DefaultPort { + continue + } + + return netAddr, nil + } + return nil, ErrNoAddress +} + // New returns a new connection manager. // Use Start to start connecting to the network. func New(cfg *Config) (*ConnManager, error) { if cfg.Dial == nil { - return nil, ErrDialNil + return nil, errors.WithStack(ErrDialNil) + } + if cfg.AddrManager == nil { + return nil, errors.WithStack(ErrAddressManagerNil) } // Default to sane values if cfg.RetryDuration <= 0 { cfg.RetryDuration = defaultRetryDuration } - if cfg.TargetOutbound == 0 { - cfg.TargetOutbound = defaultTargetOutbound - } cm := ConnManager{ - cfg: *cfg, // Copy so caller can't mutate - requests: make(chan interface{}), - quit: make(chan struct{}), + cfg: *cfg, // Copy so caller can't mutate + requests: make(chan interface{}), + quit: make(chan struct{}), + usedAddresses: make(map[string]struct{}), + usedOutboundGroups: make(map[string]int64), } return &cm, nil } diff --git a/connmgr/connmanager_test.go b/connmgr/connmanager_test.go index 3948c38b4..7a981437a 100644 --- a/connmgr/connmanager_test.go +++ b/connmgr/connmanager_test.go @@ -5,9 +5,15 @@ package connmgr import ( + "fmt" + "github.com/kaspanet/kaspad/addrmgr" + "github.com/kaspanet/kaspad/config" + "github.com/kaspanet/kaspad/dagconfig" "github.com/pkg/errors" "io" + "io/ioutil" "net" + "os" "sync/atomic" "testing" "time" @@ -70,13 +76,28 @@ func mockDialer(addr net.Addr) (net.Conn, error) { // TestNewConfig tests that new ConnManager config is validated as expected. func TestNewConfig(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + _, err := New(&Config{}) - if err == nil { - t.Fatalf("New expected error: 'Dial can't be nil', got nil") + if !errors.Is(err, ErrDialNil) { + t.Fatalf("New expected error: %s, got %s", ErrDialNil, err) } + _, err = New(&Config{ Dial: mockDialer, }) + if !errors.Is(err, ErrAddressManagerNil) { + t.Fatalf("New expected error: %s, got %s", ErrAddressManagerNil, err) + } + + amgr, teardown := addressManagerForTest(t, "TestNewConfig", 10) + defer teardown() + + _, err = New(&Config{ + Dial: mockDialer, + AddrManager: amgr, + }) if err != nil { t.Fatalf("New unexpected error: %v", err) } @@ -85,17 +106,19 @@ func TestNewConfig(t *testing.T) { // TestStartStop tests that the connection manager starts and stops as // expected. func TestStartStop(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + connected := make(chan *ConnReq) disconnected := make(chan *ConnReq) + + amgr, teardown := addressManagerForTest(t, "TestStartStop", 10) + defer teardown() + cmgr, err := New(&Config{ TargetOutbound: 1, - GetNewAddress: func() (net.Addr, error) { - return &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }, nil - }, - Dial: mockDialer, + AddrManager: amgr, + Dial: mockDialer, OnConnection: func(c *ConnReq, conn net.Conn) { connected <- c }, @@ -104,7 +127,7 @@ func TestStartStop(t *testing.T) { }, }) if err != nil { - t.Fatalf("New error: %v", err) + t.Fatalf("unexpected error from New: %s", err) } cmgr.Start() gotConnReq := <-connected @@ -119,7 +142,10 @@ func TestStartStop(t *testing.T) { }, Permanent: true, } - cmgr.Connect(cr) + err = cmgr.Connect(cr) + if err != nil { + t.Fatalf("Connect error: %s", err) + } if cr.ID() != 0 { t.Fatalf("start/stop: got id: %v, want: 0", cr.ID()) } @@ -133,21 +159,78 @@ func TestStartStop(t *testing.T) { } } +func overrideActiveConfig() func() { + originalActiveCfg := config.ActiveConfig() + config.SetActiveConfig(&config.Config{ + Flags: &config.Flags{ + NetworkFlags: config.NetworkFlags{ + ActiveNetParams: &dagconfig.SimnetParams}, + }, + }) + return func() { + // Give some extra time to all open NewConnReq goroutines + // to finish before restoring the active config to prevent + // potential panics. + time.Sleep(10 * time.Millisecond) + + config.SetActiveConfig(originalActiveCfg) + } +} + +func addressManagerForTest(t *testing.T, testName string, numAddresses uint8) (*addrmgr.AddrManager, func()) { + amgr, teardown := createEmptyAddressManagerForTest(t, testName) + + for i := uint8(0); i < numAddresses; i++ { + ip := fmt.Sprintf("173.%d.115.66:16511", i) + err := amgr.AddAddressByIP(ip, nil) + if err != nil { + t.Fatalf("AddAddressByIP unexpectedly failed to add IP %s: %s", ip, err) + } + } + + return amgr, teardown +} + +func createEmptyAddressManagerForTest(t *testing.T, testName string) (*addrmgr.AddrManager, func()) { + path, err := ioutil.TempDir("", fmt.Sprintf("%s-addressmanager", testName)) + if err != nil { + t.Fatalf("createEmptyAddressManagerForTest: TempDir unexpectedly "+ + "failed: %s", err) + } + + return addrmgr.New(path, nil, nil), func() { + // Wait for the connection manager to finish + time.Sleep(10 * time.Millisecond) + + err := os.RemoveAll(path) + if err != nil { + t.Fatalf("couldn't remove path %s", path) + } + } +} + // TestConnectMode tests that the connection manager works in the connect mode. // // In connect mode, automatic connections are disabled, so we test that // requests using Connect are handled and that no other connections are made. func TestConnectMode(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + connected := make(chan *ConnReq) + amgr, teardown := addressManagerForTest(t, "TestConnectMode", 10) + defer teardown() + cmgr, err := New(&Config{ - TargetOutbound: 2, + TargetOutbound: 0, Dial: mockDialer, OnConnection: func(c *ConnReq, conn net.Conn) { connected <- c }, + AddrManager: amgr, }) if err != nil { - t.Fatalf("New error: %v", err) + t.Fatalf("unexpected error from New: %s", err) } cr := &ConnReq{ Addr: &net.TCPAddr{ @@ -176,6 +259,7 @@ func TestConnectMode(t *testing.T) { break } cmgr.Stop() + cmgr.Wait() } // TestTargetOutbound tests the target number of outbound connections. @@ -183,23 +267,26 @@ func TestConnectMode(t *testing.T) { // We wait until all connections are established, then test they there are the // only connections made. func TestTargetOutbound(t *testing.T) { - targetOutbound := uint32(10) + restoreConfig := overrideActiveConfig() + defer restoreConfig() + + const numAddressesInAddressManager = 10 + targetOutbound := uint32(numAddressesInAddressManager - 2) connected := make(chan *ConnReq) + + amgr, teardown := addressManagerForTest(t, "TestTargetOutbound", 10) + defer teardown() + cmgr, err := New(&Config{ TargetOutbound: targetOutbound, Dial: mockDialer, - GetNewAddress: func() (net.Addr, error) { - return &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }, nil - }, + AddrManager: amgr, OnConnection: func(c *ConnReq, conn net.Conn) { connected <- c }, }) if err != nil { - t.Fatalf("New error: %v", err) + t.Fatalf("unexpected error from New: %s", err) } cmgr.Start() for i := uint32(0); i < targetOutbound; i++ { @@ -213,6 +300,146 @@ func TestTargetOutbound(t *testing.T) { break } cmgr.Stop() + cmgr.Wait() +} + +// TestDuplicateOutboundConnections tests that connection requests cannot use an already used address. +// It checks it by creating one connection request for each address in the address manager, so that +// the next connection request will have to fail because no unused address will be available. +func TestDuplicateOutboundConnections(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + + const numAddressesInAddressManager = 10 + targetOutbound := uint32(numAddressesInAddressManager - 1) + connected := make(chan struct{}) + failedConnections := make(chan struct{}) + + amgr, teardown := addressManagerForTest(t, "TestDuplicateOutboundConnections", 10) + defer teardown() + + cmgr, err := New(&Config{ + TargetOutbound: targetOutbound, + Dial: mockDialer, + AddrManager: amgr, + OnConnection: func(c *ConnReq, conn net.Conn) { + connected <- struct{}{} + }, + OnConnectionFailed: func(_ *ConnReq) { + failedConnections <- struct{}{} + }, + }) + if err != nil { + t.Fatalf("unexpected error from New: %s", err) + } + cmgr.Start() + for i := uint32(0); i < targetOutbound; i++ { + <-connected + } + + time.Sleep(time.Millisecond) + + // Here we check that making a manual connection request beyond the target outbound connection + // doesn't fail, so we can know that the reason such connection request will fail is an address + // related issue. + cmgr.NewConnReq() + select { + case <-connected: + break + case <-time.After(time.Millisecond): + t.Fatalf("connection request unexpectedly didn't connect") + } + + select { + case <-failedConnections: + t.Fatalf("a connection request unexpectedly failed") + case <-time.After(time.Millisecond): + break + } + + // After we created numAddressesInAddressManager connection requests, this request should fail + // because there aren't any more available addresses. + cmgr.NewConnReq() + select { + case <-connected: + t.Fatalf("connection request unexpectedly succeeded") + case <-time.After(time.Millisecond): + t.Fatalf("connection request didn't fail as expected") + case <-failedConnections: + break + } + + cmgr.Stop() + cmgr.Wait() +} + +// TestSameOutboundGroupConnections tests that connection requests cannot use an address with an already used +// address CIDR group. +// It checks it by creating an address manager with only two addresses, that both belong to the same CIDR group +// and checks that the second connection request fails. +func TestSameOutboundGroupConnections(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + + amgr, teardown := createEmptyAddressManagerForTest(t, "TestSameOutboundGroupConnections") + defer teardown() + + err := amgr.AddAddressByIP("173.190.115.66:16511", nil) + if err != nil { + t.Fatalf("AddAddressByIP unexpectedly failed: %s", err) + } + + err = amgr.AddAddressByIP("173.190.115.67:16511", nil) + if err != nil { + t.Fatalf("AddAddressByIP unexpectedly failed: %s", err) + } + + connected := make(chan struct{}) + failedConnections := make(chan struct{}) + cmgr, err := New(&Config{ + TargetOutbound: 0, + Dial: mockDialer, + AddrManager: amgr, + OnConnection: func(c *ConnReq, conn net.Conn) { + connected <- struct{}{} + }, + OnConnectionFailed: func(_ *ConnReq) { + failedConnections <- struct{}{} + }, + }) + if err != nil { + t.Fatalf("unexpected error from New: %s", err) + } + + cmgr.Start() + + cmgr.NewConnReq() + select { + case <-connected: + break + case <-time.After(time.Millisecond): + t.Fatalf("connection request unexpectedly didn't connect") + } + + select { + case <-failedConnections: + t.Fatalf("a connection request unexpectedly failed") + case <-time.After(time.Millisecond): + break + } + + cmgr.NewConnReq() + select { + case <-connected: + t.Fatalf("connection request unexpectedly succeeded") + case <-time.After(time.Millisecond): + t.Fatalf("connection request didn't fail as expected") + case <-failedConnections: + break + } + + cmgr.Stop() + cmgr.Wait() } // TestRetryPermanent tests that permanent connection requests are retried. @@ -220,11 +447,18 @@ func TestTargetOutbound(t *testing.T) { // We make a permanent connection request using Connect, disconnect it using // Disconnect and we wait for it to be connected back. func TestRetryPermanent(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + connected := make(chan *ConnReq) disconnected := make(chan *ConnReq) + + amgr, teardown := addressManagerForTest(t, "TestRetryPermanent", 10) + defer teardown() + cmgr, err := New(&Config{ RetryDuration: time.Millisecond, - TargetOutbound: 1, + TargetOutbound: 0, Dial: mockDialer, OnConnection: func(c *ConnReq, conn net.Conn) { connected <- c @@ -232,9 +466,10 @@ func TestRetryPermanent(t *testing.T) { OnDisconnection: func(c *ConnReq) { disconnected <- c }, + AddrManager: amgr, }) if err != nil { - t.Fatalf("New error: %v", err) + t.Fatalf("unexpected error from New: %s", err) } cr := &ConnReq{ @@ -289,6 +524,9 @@ func TestRetryPermanent(t *testing.T) { cmgr.Remove(cr.ID()) gotConnReq = <-disconnected + + // Wait for status to be updated + time.Sleep(10 * time.Millisecond) wantID = cr.ID() gotID = gotConnReq.ID() if gotID != wantID { @@ -300,6 +538,7 @@ func TestRetryPermanent(t *testing.T) { t.Fatalf("retry: %v - want state %v, got state %v", cr.Addr, wantState, gotState) } cmgr.Stop() + cmgr.Wait() } // TestMaxRetryDuration tests the maximum retry duration. @@ -307,6 +546,9 @@ func TestRetryPermanent(t *testing.T) { // We have a timed dialer which initially returns err but after RetryDuration // hits maxRetryDuration returns a mock conn. func TestMaxRetryDuration(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + networkUp := make(chan struct{}) time.AfterFunc(5*time.Millisecond, func() { close(networkUp) @@ -320,6 +562,9 @@ func TestMaxRetryDuration(t *testing.T) { } } + amgr, teardown := addressManagerForTest(t, "TestMaxRetryDuration", 10) + defer teardown() + connected := make(chan *ConnReq) cmgr, err := New(&Config{ RetryDuration: time.Millisecond, @@ -328,9 +573,10 @@ func TestMaxRetryDuration(t *testing.T) { OnConnection: func(c *ConnReq, conn net.Conn) { connected <- c }, + AddrManager: amgr, }) if err != nil { - t.Fatalf("New error: %v", err) + t.Fatalf("unexpected error from New: %s", err) } cr := &ConnReq{ @@ -350,35 +596,40 @@ func TestMaxRetryDuration(t *testing.T) { case <-time.Tick(100 * time.Millisecond): t.Fatalf("max retry duration: connection timeout") } + cmgr.Stop() + cmgr.Wait() } // TestNetworkFailure tests that the connection manager handles a network // failure gracefully. func TestNetworkFailure(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + var dials uint32 errDialer := func(net net.Addr) (net.Conn, error) { atomic.AddUint32(&dials, 1) return nil, errors.New("network down") } + + amgr, teardown := addressManagerForTest(t, "TestNetworkFailure", 10) + defer teardown() + cmgr, err := New(&Config{ TargetOutbound: 5, RetryDuration: 5 * time.Millisecond, Dial: errDialer, - GetNewAddress: func() (net.Addr, error) { - return &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }, nil - }, + AddrManager: amgr, OnConnection: func(c *ConnReq, conn net.Conn) { t.Fatalf("network failure: got unexpected connection - %v", c.Addr) }, }) if err != nil { - t.Fatalf("New error: %v", err) + t.Fatalf("unexpected error from New: %s", err) } cmgr.Start() - time.AfterFunc(10*time.Millisecond, cmgr.Stop) + time.Sleep(10 * time.Millisecond) + cmgr.Stop() cmgr.Wait() wantMaxDials := uint32(75) if atomic.LoadUint32(&dials) > wantMaxDials { @@ -394,17 +645,25 @@ func TestNetworkFailure(t *testing.T) { // err so that the handler assumes that the conn manager is stopped and ignores // the failure. func TestStopFailed(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + done := make(chan struct{}, 1) waitDialer := func(addr net.Addr) (net.Conn, error) { done <- struct{}{} time.Sleep(time.Millisecond) return nil, errors.New("network down") } + + amgr, teardown := addressManagerForTest(t, "TestStopFailed", 10) + defer teardown() + cmgr, err := New(&Config{ - Dial: waitDialer, + Dial: waitDialer, + AddrManager: amgr, }) if err != nil { - t.Fatalf("New error: %v", err) + t.Fatalf("unexpected error from New: %s", err) } cmgr.Start() go func() { @@ -428,6 +687,9 @@ func TestStopFailed(t *testing.T) { // TestRemovePendingConnection tests that it's possible to cancel a pending // connection, removing its internal state from the ConnMgr. func TestRemovePendingConnection(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + // Create a ConnMgr instance with an instance of a dialer that'll never // succeed. wait := make(chan struct{}) @@ -435,11 +697,16 @@ func TestRemovePendingConnection(t *testing.T) { <-wait return nil, errors.Errorf("error") } + + amgr, teardown := addressManagerForTest(t, "TestRemovePendingConnection", 10) + defer teardown() + cmgr, err := New(&Config{ - Dial: indefiniteDialer, + Dial: indefiniteDialer, + AddrManager: amgr, }) if err != nil { - t.Fatalf("New error: %v", err) + t.Fatalf("unexpected error from New: %s", err) } cmgr.Start() @@ -474,12 +741,16 @@ func TestRemovePendingConnection(t *testing.T) { close(wait) cmgr.Stop() + cmgr.Wait() } // TestCancelIgnoreDelayedConnection tests that a canceled connection request will // not execute the on connection callback, even if an outstanding retry // succeeds. func TestCancelIgnoreDelayedConnection(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + retryTimeout := 10 * time.Millisecond // Setup a dialer that will continue to return an error until the @@ -497,18 +768,22 @@ func TestCancelIgnoreDelayedConnection(t *testing.T) { } connected := make(chan *ConnReq) + + amgr, teardown := addressManagerForTest(t, "TestCancelIgnoreDelayedConnection", 10) + defer teardown() + cmgr, err := New(&Config{ Dial: failingDialer, RetryDuration: retryTimeout, OnConnection: func(c *ConnReq, conn net.Conn) { connected <- c }, + AddrManager: amgr, }) if err != nil { - t.Fatalf("New error: %v", err) + t.Fatalf("unexpected error from New: %s", err) } cmgr.Start() - defer cmgr.Stop() // Establish a connection request to a random IP we've chosen. cr := &ConnReq{ @@ -552,7 +827,8 @@ func TestCancelIgnoreDelayedConnection(t *testing.T) { t.Fatalf("on-connect should not be called for canceled req") case <-time.After(5 * retryTimeout): } - + cmgr.Stop() + cmgr.Wait() } // mockListener implements the net.Listener interface and is used to test @@ -617,21 +893,29 @@ func newMockListener(localAddr string) *mockListener { // TestListeners ensures providing listeners to the connection manager along // with an accept callback works properly. func TestListeners(t *testing.T) { + restoreConfig := overrideActiveConfig() + defer restoreConfig() + // Setup a connection manager with a couple of mock listeners that // notify a channel when they receive mock connections. receivedConns := make(chan net.Conn) listener1 := newMockListener("127.0.0.1:16111") listener2 := newMockListener("127.0.0.1:9333") listeners := []net.Listener{listener1, listener2} + + amgr, teardown := addressManagerForTest(t, "TestListeners", 10) + defer teardown() + cmgr, err := New(&Config{ Listeners: listeners, OnAccept: func(conn net.Conn) { receivedConns <- conn }, - Dial: mockDialer, + Dial: mockDialer, + AddrManager: amgr, }) if err != nil { - t.Fatalf("New error: %v", err) + t.Fatalf("unexpected error from New: %s", err) } cmgr.Start() diff --git a/server/p2p/p2p.go b/server/p2p/p2p.go index 8a41757b8..9bdc42fc0 100644 --- a/server/p2p/p2p.go +++ b/server/p2p/p2p.go @@ -8,7 +8,6 @@ package p2p import ( "crypto/rand" "encoding/binary" - "fmt" "math" "net" "runtime" @@ -150,7 +149,6 @@ type peerState struct { outboundPeers map[int32]*Peer persistentPeers map[int32]*Peer banned map[string]time.Time - outboundGroups map[string]int } // Count returns the count of all known peers. @@ -665,9 +663,6 @@ func (s *Server) handleDonePeerMsg(state *peerState, sp *Peer) { list = state.outboundPeers } if _, ok := list[sp.ID()]; ok { - if !sp.Inbound() && sp.VersionKnown() { - state.outboundGroups[addrmgr.GroupKey(sp.NA())]-- - } if !sp.Inbound() && sp.connReq != nil { s.connManager.Disconnect(sp.connReq.ID()) } @@ -785,11 +780,6 @@ type GetPeersMsg struct { Reply chan []*Peer } -type getOutboundGroup struct { - key string - reply chan int -} - //GetManualNodesMsg is the message type which is used by the rpc server to get the list of persistent peers from the p2p server type GetManualNodesMsg struct { Reply chan []*Peer @@ -843,15 +833,15 @@ func (s *Server) handleQuery(state *peerState, querymsg interface{}) { // TODO: duplicate oneshots? // Limit max number of total peers. if state.countOutboundPeers() >= config.ActiveConfig().TargetOutboundPeers { - msg.Reply <- connmgr.ErrMaxOutboundPeers + msg.Reply <- errors.WithStack(connmgr.ErrMaxOutboundPeers) return } for _, peer := range state.persistentPeers { if peer.Addr() == msg.Addr { if msg.Permanent { - msg.Reply <- connmgr.ErrAlreadyConnected + msg.Reply <- errors.WithStack(connmgr.ErrAlreadyConnected) } else { - msg.Reply <- connmgr.ErrAlreadyPermanent + msg.Reply <- errors.WithStack(connmgr.ErrAlreadyPermanent) } return } @@ -872,23 +862,12 @@ func (s *Server) handleQuery(state *peerState, querymsg interface{}) { }) msg.Reply <- nil case RemoveNodeMsg: - found := disconnectPeer(state.persistentPeers, msg.Cmp, func(sp *Peer) { - // Keep group counts ok since we remove from - // the list now. - state.outboundGroups[addrmgr.GroupKey(sp.NA())]-- - }) + found := disconnectPeer(state.persistentPeers, msg.Cmp) if found { msg.Reply <- nil } else { - msg.Reply <- connmgr.ErrPeerNotFound - } - case getOutboundGroup: - count, ok := state.outboundGroups[msg.key] - if ok { - msg.reply <- count - } else { - msg.reply <- 0 + msg.Reply <- errors.WithStack(connmgr.ErrPeerNotFound) } // Request a list of the persistent (added) peers. case GetManualNodesMsg: @@ -901,32 +880,26 @@ func (s *Server) handleQuery(state *peerState, querymsg interface{}) { case DisconnectNodeMsg: // Check inbound peers. We pass a nil callback since we don't // require any additional actions on disconnect for inbound peers. - found := disconnectPeer(state.inboundPeers, msg.Cmp, nil) + found := disconnectPeer(state.inboundPeers, msg.Cmp) if found { msg.Reply <- nil return } // Check outbound peers. - found = disconnectPeer(state.outboundPeers, msg.Cmp, func(sp *Peer) { - // Keep group counts ok since we remove from - // the list now. - state.outboundGroups[addrmgr.GroupKey(sp.NA())]-- - }) + found = disconnectPeer(state.outboundPeers, msg.Cmp) if found { // If there are multiple outbound connections to the same // ip:port, continue disconnecting them all until no such // peers are found. for found { - found = disconnectPeer(state.outboundPeers, msg.Cmp, func(sp *Peer) { - state.outboundGroups[addrmgr.GroupKey(sp.NA())]-- - }) + found = disconnectPeer(state.outboundPeers, msg.Cmp) } msg.Reply <- nil return } - msg.Reply <- connmgr.ErrPeerNotFound + msg.Reply <- errors.WithStack(connmgr.ErrPeerNotFound) } } @@ -937,13 +910,9 @@ func (s *Server) handleQuery(state *peerState, querymsg interface{}) { // to be located. If the peer is found, and the passed callback: `whenFound' // isn't nil, we call it with the peer as the argument before it is removed // from the peerList, and is disconnected from the server. -func disconnectPeer(peerList map[int32]*Peer, compareFunc func(*Peer) bool, whenFound func(*Peer)) bool { +func disconnectPeer(peerList map[int32]*Peer, compareFunc func(*Peer) bool) bool { for addr, peer := range peerList { if compareFunc(peer) { - if whenFound != nil { - whenFound(peer) - } - // This is ok because we are not continuing // to iterate so won't corrupt the loop. delete(peerList, addr) @@ -1026,7 +995,6 @@ func (s *Server) outboundPeerConnected(state *peerState, msg *outboundPeerConnec s.peerDoneHandler(sp) }) s.addrManager.Attempt(sp.NA()) - state.outboundGroups[addrmgr.GroupKey(sp.NA())]++ } // outboundPeerConnected is invoked by the connection manager when a new @@ -1097,7 +1065,6 @@ func (s *Server) peerHandler() { persistentPeers: make(map[int32]*Peer), outboundPeers: make(map[int32]*Peer), banned: make(map[string]time.Time), - outboundGroups: make(map[string]int), } if !config.ActiveConfig().DisableDNSSeed { @@ -1226,14 +1193,6 @@ func (s *Server) ConnectedCount() int32 { return <-replyChan } -// OutboundGroupCount returns the number of peers connected to the given -// outbound group key. -func (s *Server) OutboundGroupCount(key string) int { - replyChan := make(chan int) - s.Query <- getOutboundGroup{key: key, reply: replyChan} - return <-replyChan -} - // AddBytesSent adds the passed number of bytes to the total bytes sent counter // for the server. It is safe for concurrent access. func (s *Server) AddBytesSent(bytesSent uint64) { @@ -1602,57 +1561,6 @@ func NewServer(listenAddrs []string, dagParams *dagconfig.Params, interrupt <-ch return nil, err } - // Only setup a function to return new addresses to connect to when - // not running in connect-only mode. The simulation network is always - // in connect-only mode since it is only intended to connect to - // specified peers and actively avoid advertising and connecting to - // discovered peers in order to prevent it from becoming a public test - // network. - var newAddressFunc func() (net.Addr, error) - if !config.ActiveConfig().Simnet && len(config.ActiveConfig().ConnectPeers) == 0 { - newAddressFunc = func() (net.Addr, error) { - for tries := 0; tries < 100; tries++ { - addr := s.addrManager.GetAddress() - if addr == nil { - break - } - - // Address will not be invalid, local or unroutable - // because addrmanager rejects those on addition. - // Just check that we don't already have an address - // in the same group so that we are not connecting - // to the same network segment at the expense of - // others. - // - // Networks that accept unroutable connections are exempt - // from this rule, since they're meant to run within a - // private subnet, like 10.0.0.0/16. - if !config.ActiveConfig().NetParams().AcceptUnroutable { - key := addrmgr.GroupKey(addr.NetAddress()) - if s.OutboundGroupCount(key) != 0 { - continue - } - } - - // only allow recent nodes (10mins) after we failed 30 - // times - if tries < 30 && time.Since(addr.LastAttempt()) < 10*time.Minute { - continue - } - - // allow nondefault ports after 50 failed tries. - if tries < 50 && fmt.Sprintf("%d", addr.NetAddress().Port) != - config.ActiveConfig().NetParams().DefaultPort { - continue - } - - addrString := addrmgr.NetAddressKey(addr.NetAddress()) - return addrStringToNetAddr(addrString) - } - return nil, connmgr.ErrNoAddress - } - } - // Create a connection manager. cmgr, err := connmgr.New(&connmgr.Config{ Listeners: listeners, @@ -1671,7 +1579,7 @@ func NewServer(listenAddrs []string, dagParams *dagconfig.Params, interrupt <-ch connReq: c, } }, - GetNewAddress: newAddressFunc, + AddrManager: s.addrManager, }) if err != nil { return nil, err @@ -1782,7 +1690,7 @@ func initListeners(amgr *addrmgr.AddrManager, listenAddrs []string, services wir // a net.Addr which maps to the original address with any host names resolved // to IP addresses. It also handles tor addresses properly by returning a // net.Addr that encapsulates the address. -func addrStringToNetAddr(addr string) (net.Addr, error) { +func addrStringToNetAddr(addr string) (*net.TCPAddr, error) { host, strPort, err := net.SplitHostPort(addr) if err != nil { return nil, err diff --git a/wire/netaddress.go b/wire/netaddress.go index 1ae13684d..4edcb1f39 100644 --- a/wire/netaddress.go +++ b/wire/netaddress.go @@ -48,6 +48,14 @@ func (na *NetAddress) AddService(service ServiceFlag) { na.Services |= service } +// TCPAddress converts the NetAddress to *net.TCPAddr +func (na *NetAddress) TCPAddress() *net.TCPAddr { + return &net.TCPAddr{ + IP: na.IP, + Port: int(na.Port), + } +} + // NewNetAddressIPPort returns a new NetAddress using the provided IP, port, and // supported services with defaults for the remaining fields. func NewNetAddressIPPort(ip net.IP, port uint16, services ServiceFlag) *NetAddress { From 806eab817c5b3b2fad5bf70ed4651572e4d2dcc0 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Tue, 12 May 2020 15:08:24 +0300 Subject: [PATCH 24/77] [NOD-820] When the node isn't synced, make getBlockTemplate return a boolean isSynced instead of an error (#716) * [NOD-820] Add IsSynced to GetBlockTemplateResult. * [NOD-820] Add isSynced to the help file. * [NOD-820] Add MineWhenNotSynced to the kaspaminer config. * [NOD-820] Implement miner MineWhenNotSynced logic. * [NOD-820] Fixed capitalization in an error message. --- cmd/kaspaminer/config.go | 21 +++++++++++---------- cmd/kaspaminer/main.go | 2 +- cmd/kaspaminer/mineloop.go | 23 ++++++++++++++++++----- rpcmodel/rpc_results.go | 1 + server/rpc/handle_get_block_template.go | 23 ++++++++++++----------- server/rpc/rpcserverhelp.go | 1 + 6 files changed, 44 insertions(+), 27 deletions(-) diff --git a/cmd/kaspaminer/config.go b/cmd/kaspaminer/config.go index 20375e805..5f87d947e 100644 --- a/cmd/kaspaminer/config.go +++ b/cmd/kaspaminer/config.go @@ -30,16 +30,17 @@ var ( ) type configFlags struct { - ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"` - RPCUser string `short:"u" long:"rpcuser" description:"RPC username"` - RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"` - RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"` - RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"` - DisableTLS bool `long:"notls" description:"Disable TLS"` - Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"` - NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."` - BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."` - Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"` + RPCUser string `short:"u" long:"rpcuser" description:"RPC username"` + RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"` + RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"` + RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"` + DisableTLS bool `long:"notls" description:"Disable TLS"` + Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"` + NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."` + BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."` + MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` config.NetworkFlags } diff --git a/cmd/kaspaminer/main.go b/cmd/kaspaminer/main.go index e8110503d..32bf524df 100644 --- a/cmd/kaspaminer/main.go +++ b/cmd/kaspaminer/main.go @@ -45,7 +45,7 @@ func main() { doneChan := make(chan struct{}) spawn(func() { - err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay) + err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay, cfg.MineWhenNotSynced) if err != nil { panic(errors.Errorf("Error in mine loop: %s", err)) } diff --git a/cmd/kaspaminer/mineloop.go b/cmd/kaspaminer/mineloop.go index df2fa9d12..58a537eeb 100644 --- a/cmd/kaspaminer/mineloop.go +++ b/cmd/kaspaminer/mineloop.go @@ -25,7 +25,7 @@ var hashesTried uint64 const logHashRateInterval = 10 * time.Second -func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) error { +func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64, mineWhenNotSynced bool) error { errChan := make(chan error) templateStopChan := make(chan struct{}) @@ -35,7 +35,7 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) err wg := sync.WaitGroup{} for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ { foundBlock := make(chan *util.Block) - mineNextBlock(client, foundBlock, templateStopChan, errChan) + mineNextBlock(client, foundBlock, mineWhenNotSynced, templateStopChan, errChan) block := <-foundBlock templateStopChan <- struct{}{} wg.Add(1) @@ -80,13 +80,15 @@ func logHashRate() { }) } -func mineNextBlock(client *minerClient, foundBlock chan *util.Block, templateStopChan chan struct{}, errChan chan error) { +func mineNextBlock(client *minerClient, foundBlock chan *util.Block, mineWhenNotSynced bool, + templateStopChan chan struct{}, errChan chan error) { + newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult) spawn(func() { templatesLoop(client, newTemplateChan, errChan, templateStopChan) }) spawn(func() { - solveLoop(newTemplateChan, foundBlock, errChan) + solveLoop(newTemplateChan, foundBlock, mineWhenNotSynced, errChan) }) } @@ -207,12 +209,23 @@ func getBlockTemplate(client *minerClient, longPollID string) (*rpcmodel.GetBloc return client.GetBlockTemplate([]string{"coinbasetxn"}, longPollID) } -func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, errChan chan error) { +func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, + mineWhenNotSynced bool, errChan chan error) { + var stopOldTemplateSolving chan struct{} for template := range newTemplateChan { if stopOldTemplateSolving != nil { close(stopOldTemplateSolving) } + + if !template.IsSynced { + if !mineWhenNotSynced { + errChan <- errors.Errorf("got template with isSynced=false") + return + } + log.Warnf("Got template with isSynced=false") + } + stopOldTemplateSolving = make(chan struct{}) block, err := parseBlock(template) if err != nil { diff --git a/rpcmodel/rpc_results.go b/rpcmodel/rpc_results.go index 0517d525c..9b3da13b6 100644 --- a/rpcmodel/rpc_results.go +++ b/rpcmodel/rpc_results.go @@ -151,6 +151,7 @@ type GetBlockTemplateResult struct { CoinbaseTxn *GetBlockTemplateResultTx `json:"coinbaseTxn,omitempty"` CoinbaseValue *uint64 `json:"coinbaseValue,omitempty"` WorkID string `json:"workId,omitempty"` + IsSynced bool `json:"isSynced"` // Optional long polling from BIP 0022. LongPollID string `json:"longPollId,omitempty"` diff --git a/server/rpc/handle_get_block_template.go b/server/rpc/handle_get_block_template.go index 7e9a7da3a..ac615a596 100644 --- a/server/rpc/handle_get_block_template.go +++ b/server/rpc/handle_get_block_template.go @@ -71,6 +71,7 @@ type gbtWorkState struct { template *mining.BlockTemplate notifyMap map[string]map[int64]chan struct{} timeSource blockdag.TimeSource + isSynced bool } // newGbtWorkState returns a new instance of a gbtWorkState with all internal @@ -105,17 +106,6 @@ func handleGetBlockTemplate(s *Server, cmd interface{}, closeChan <-chan struct{ mode = request.Mode } - // No point in generating templates or processing proposals before - // the DAG is synced. Note that we make a special check for when - // we have nothing besides the genesis block (blueScore == 0), - // because in that state IsCurrent may still return true. - if !isSyncedForMining(s) { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCClientInInitialDownload, - Message: "Kaspa is downloading blocks...", - } - } - switch mode { case "template": return handleGetBlockTemplateRequest(s, request, closeChan) @@ -655,6 +645,15 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) // consensus rules. minTimestamp := s.cfg.DAG.NextBlockMinimumTime() + // Check whether this node is synced with the rest of of the + // network. There's almost never a good reason to mine on top + // of an unsynced DAG, and miners are generally expected not to + // mine when isSynced is false. + // This is not a straight-up error because the choice of whether + // to mine or not is the responsibility of the miner rather + // than the node's. + isSynced := isSyncedForMining(s) + // Update work state to ensure another block template isn't // generated until needed. state.template = template @@ -662,6 +661,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) state.lastTxUpdate = lastTxUpdate state.tipHashes = tipHashes state.minTimestamp = minTimestamp + state.isSynced = isSynced log.Debugf("Generated block template (timestamp %s, "+ "target %s, merkle root %s)", @@ -820,6 +820,7 @@ func (state *gbtWorkState) blockTemplateResult(dag *blockdag.BlockDAG, useCoinba Mutable: gbtMutableFields, NonceRange: gbtNonceRange, Capabilities: gbtCapabilities, + IsSynced: state.isSynced, } if useCoinbaseValue { diff --git a/server/rpc/rpcserverhelp.go b/server/rpc/rpcserverhelp.go index 00c61931c..2514d0204 100644 --- a/server/rpc/rpcserverhelp.go +++ b/server/rpc/rpcserverhelp.go @@ -331,6 +331,7 @@ var helpDescsEnUS = map[string]string{ "getBlockTemplateResult-nonceRange": "Two concatenated hex-encoded big-endian 64-bit integers which represent the valid ranges of nonces the miner may scan", "getBlockTemplateResult-capabilities": "List of server capabilities including 'proposal' to indicate support for block proposals", "getBlockTemplateResult-rejectReason": "Reason the proposal was invalid as-is (only applies to proposal responses)", + "getBlockTemplateResult-isSynced": "Whether this node is synced with the rest of of the network. Miners are generally expected not to mine when isSynced is false", // GetBlockTemplateCmd help. "getBlockTemplate--synopsis": "Returns a JSON object with information necessary to construct a block to mine or accepts a proposal to validate.\n" + From 65f75c17fcd19e39260eb1967a1bd237b0323d41 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Wed, 13 May 2020 10:03:37 +0300 Subject: [PATCH 25/77] [NOD-982] Log message with level WARN when getting MsgReject (#717) * [NOD-982] Log message with level WARN when getting MsgReject. * [NOD-982] Fix wrong logLevel in Write and Writef. * [NOD-982] Use Write and Writef inside Trace, Tracef, Debug, Debugf, etc... * [NOD-982] Move peer message logging to a separate file. --- logs/logs.go | 74 +++++++---------- peer/log.go | 159 ------------------------------------- peer/message_logging.go | 172 ++++++++++++++++++++++++++++++++++++++++ peer/peer.go | 6 +- 4 files changed, 204 insertions(+), 207 deletions(-) create mode 100644 peer/message_logging.go diff --git a/logs/logs.go b/logs/logs.go index 0041bb24e..7984031b3 100644 --- a/logs/logs.go +++ b/logs/logs.go @@ -368,108 +368,90 @@ type Logger struct { // Trace formats message using the default formats for its operands, prepends // the prefix as necessary, and writes to log with LevelTrace. func (l *Logger) Trace(args ...interface{}) { - lvl := l.Level() - if lvl <= LevelTrace { - l.b.print(LevelTrace, l.tag, args...) - } + l.Write(LevelTrace, args...) } // Tracef formats message according to format specifier, prepends the prefix as // necessary, and writes to log with LevelTrace. func (l *Logger) Tracef(format string, args ...interface{}) { - lvl := l.Level() - if lvl <= LevelTrace { - l.b.printf(LevelTrace, l.tag, format, args...) - } + l.Writef(LevelTrace, format, args...) } // Debug formats message using the default formats for its operands, prepends // the prefix as necessary, and writes to log with LevelDebug. func (l *Logger) Debug(args ...interface{}) { - lvl := l.Level() - if lvl <= LevelDebug { - l.b.print(LevelDebug, l.tag, args...) - } + l.Write(LevelDebug, args...) } // Debugf formats message according to format specifier, prepends the prefix as // necessary, and writes to log with LevelDebug. func (l *Logger) Debugf(format string, args ...interface{}) { - lvl := l.Level() - if lvl <= LevelDebug { - l.b.printf(LevelDebug, l.tag, format, args...) - } + l.Writef(LevelDebug, format, args...) } // Info formats message using the default formats for its operands, prepends // the prefix as necessary, and writes to log with LevelInfo. func (l *Logger) Info(args ...interface{}) { - lvl := l.Level() - if lvl <= LevelInfo { - l.b.print(LevelInfo, l.tag, args...) - } + l.Write(LevelInfo, args...) } // Infof formats message according to format specifier, prepends the prefix as // necessary, and writes to log with LevelInfo. func (l *Logger) Infof(format string, args ...interface{}) { - lvl := l.Level() - if lvl <= LevelInfo { - l.b.printf(LevelInfo, l.tag, format, args...) - } + l.Writef(LevelInfo, format, args...) } // Warn formats message using the default formats for its operands, prepends // the prefix as necessary, and writes to log with LevelWarn. func (l *Logger) Warn(args ...interface{}) { - lvl := l.Level() - if lvl <= LevelWarn { - l.b.print(LevelWarn, l.tag, args...) - } + l.Write(LevelWarn, args...) } // Warnf formats message according to format specifier, prepends the prefix as // necessary, and writes to log with LevelWarn. func (l *Logger) Warnf(format string, args ...interface{}) { - lvl := l.Level() - if lvl <= LevelWarn { - l.b.printf(LevelWarn, l.tag, format, args...) - } + l.Writef(LevelWarn, format, args...) } // Error formats message using the default formats for its operands, prepends // the prefix as necessary, and writes to log with LevelError. func (l *Logger) Error(args ...interface{}) { - lvl := l.Level() - if lvl <= LevelError { - l.b.print(LevelError, l.tag, args...) - } + l.Write(LevelError, args...) } // Errorf formats message according to format specifier, prepends the prefix as // necessary, and writes to log with LevelError. func (l *Logger) Errorf(format string, args ...interface{}) { - lvl := l.Level() - if lvl <= LevelError { - l.b.printf(LevelError, l.tag, format, args...) - } + l.Writef(LevelError, format, args...) } // Critical formats message using the default formats for its operands, prepends // the prefix as necessary, and writes to log with LevelCritical. func (l *Logger) Critical(args ...interface{}) { - lvl := l.Level() - if lvl <= LevelCritical { - l.b.print(LevelCritical, l.tag, args...) - } + l.Write(LevelCritical, args...) } // Criticalf formats message according to format specifier, prepends the prefix // as necessary, and writes to log with LevelCritical. func (l *Logger) Criticalf(format string, args ...interface{}) { + l.Writef(LevelCritical, format, args...) +} + +// Write formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with the given logLevel. +func (l *Logger) Write(logLevel Level, args ...interface{}) { lvl := l.Level() - if lvl <= LevelCritical { - l.b.printf(LevelCritical, l.tag, format, args...) + if lvl <= logLevel { + l.b.print(logLevel, l.tag, args...) + } +} + +// Writef formats message according to format specifier, prepends the prefix +// as necessary, and writes to log with the given logLevel. +func (l *Logger) Writef(logLevel Level, format string, args ...interface{}) { + lvl := l.Level() + if lvl <= logLevel { + l.b.printf(logLevel, l.tag, format, args...) } } diff --git a/peer/log.go b/peer/log.go index 9d05ad7e8..7ae364f48 100644 --- a/peer/log.go +++ b/peer/log.go @@ -5,169 +5,10 @@ package peer import ( - "fmt" - "strings" - "time" - "github.com/kaspanet/kaspad/logger" - "github.com/kaspanet/kaspad/txscript" "github.com/kaspanet/kaspad/util/panics" - "github.com/kaspanet/kaspad/wire" -) - -const ( - // maxRejectReasonLen is the maximum length of a sanitized reject reason - // that will be logged. - maxRejectReasonLen = 250 ) var log, _ = logger.Get(logger.SubsystemTags.PEER) var spawn = panics.GoroutineWrapperFunc(log) var spawnAfter = panics.AfterFuncWrapperFunc(log) - -// formatLockTime returns a transaction lock time as a human-readable string. -func formatLockTime(lockTime uint64) string { - // The lock time field of a transaction is either a block height at - // which the transaction is finalized or a timestamp depending on if the - // value is before the lockTimeThreshold. When it is under the - // threshold it is a block height. - if lockTime < txscript.LockTimeThreshold { - return fmt.Sprintf("height %d", lockTime) - } - - return time.Unix(int64(lockTime), 0).String() -} - -// invSummary returns an inventory message as a human-readable string. -func invSummary(invList []*wire.InvVect) string { - // No inventory. - invLen := len(invList) - if invLen == 0 { - return "empty" - } - - // One inventory item. - if invLen == 1 { - iv := invList[0] - switch iv.Type { - case wire.InvTypeError: - return fmt.Sprintf("error %s", iv.Hash) - case wire.InvTypeBlock: - return fmt.Sprintf("block %s", iv.Hash) - case wire.InvTypeSyncBlock: - return fmt.Sprintf("sync block %s", iv.Hash) - case wire.InvTypeTx: - return fmt.Sprintf("tx %s", iv.Hash) - } - - return fmt.Sprintf("unknown (%d) %s", uint32(iv.Type), iv.Hash) - } - - // More than one inv item. - return fmt.Sprintf("size %d", invLen) -} - -// sanitizeString strips any characters which are even remotely dangerous, such -// as html control characters, from the passed string. It also limits it to -// the passed maximum size, which can be 0 for unlimited. When the string is -// limited, it will also add "..." to the string to indicate it was truncated. -func sanitizeString(str string, maxLength uint) string { - const safeChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXY" + - "Z01234567890 .,;_/:?@" - - // Strip any characters not in the safeChars string removed. - str = strings.Map(func(r rune) rune { - if strings.ContainsRune(safeChars, r) { - return r - } - return -1 - }, str) - - // Limit the string to the max allowed length. - if maxLength > 0 && uint(len(str)) > maxLength { - str = str[:maxLength] - str = str + "..." - } - return str -} - -// messageSummary returns a human-readable string which summarizes a message. -// Not all messages have or need a summary. This is used for debug logging. -func messageSummary(msg wire.Message) string { - switch msg := msg.(type) { - case *wire.MsgVersion: - return fmt.Sprintf("agent %s, pver %d, selected tip %s", - msg.UserAgent, msg.ProtocolVersion, msg.SelectedTipHash) - - case *wire.MsgVerAck: - // No summary. - - case *wire.MsgGetAddr: - if msg.IncludeAllSubnetworks { - return "all subnetworks and full nodes" - } - if msg.SubnetworkID == nil { - return "full nodes" - } - return fmt.Sprintf("subnetwork ID %v", msg.SubnetworkID) - - case *wire.MsgAddr: - return fmt.Sprintf("%d addr", len(msg.AddrList)) - - case *wire.MsgPing: - // No summary - perhaps add nonce. - - case *wire.MsgPong: - // No summary - perhaps add nonce. - - case *wire.MsgTx: - return fmt.Sprintf("hash %s, %d inputs, %d outputs, lock %s", - msg.TxID(), len(msg.TxIn), len(msg.TxOut), - formatLockTime(msg.LockTime)) - - case *wire.MsgBlock: - header := &msg.Header - return fmt.Sprintf("hash %s, ver %d, %d tx, %s", msg.BlockHash(), - header.Version, len(msg.Transactions), header.Timestamp) - - case *wire.MsgInv: - return invSummary(msg.InvList) - - case *wire.MsgNotFound: - return invSummary(msg.InvList) - - case *wire.MsgGetData: - return invSummary(msg.InvList) - - case *wire.MsgGetBlockInvs: - return fmt.Sprintf("low hash %s, high hash %s", msg.LowHash, - msg.HighHash) - - case *wire.MsgGetBlockLocator: - return fmt.Sprintf("high hash %s, low hash %s", msg.HighHash, - msg.LowHash) - - case *wire.MsgBlockLocator: - if len(msg.BlockLocatorHashes) > 0 { - return fmt.Sprintf("locator first hash: %s, last hash: %s", msg.BlockLocatorHashes[0], msg.BlockLocatorHashes[len(msg.BlockLocatorHashes)-1]) - } - return fmt.Sprintf("no locator") - - case *wire.MsgReject: - // Ensure the variable length strings don't contain any - // characters which are even remotely dangerous such as HTML - // control characters, etc. Also limit them to sane length for - // logging. - rejCommand := sanitizeString(msg.Cmd, wire.CommandSize) - rejReason := sanitizeString(msg.Reason, maxRejectReasonLen) - summary := fmt.Sprintf("cmd %s, code %s, reason %s", rejCommand, - msg.Code, rejReason) - if rejCommand == wire.CmdBlock || rejCommand == wire.CmdTx { - summary += fmt.Sprintf(", hash %s", msg.Hash) - } - return summary - } - - // No summary for other messages. - return "" -} diff --git a/peer/message_logging.go b/peer/message_logging.go new file mode 100644 index 000000000..3be61a78e --- /dev/null +++ b/peer/message_logging.go @@ -0,0 +1,172 @@ +package peer + +import ( + "fmt" + "github.com/kaspanet/kaspad/logs" + "github.com/kaspanet/kaspad/txscript" + "github.com/kaspanet/kaspad/wire" + "strings" + "time" +) + +const ( + // maxRejectReasonLen is the maximum length of a sanitized reject reason + // that will be logged. + maxRejectReasonLen = 250 +) + +// formatLockTime returns a transaction lock time as a human-readable string. +func formatLockTime(lockTime uint64) string { + // The lock time field of a transaction is either a block height at + // which the transaction is finalized or a timestamp depending on if the + // value is before the lockTimeThreshold. When it is under the + // threshold it is a block height. + if lockTime < txscript.LockTimeThreshold { + return fmt.Sprintf("height %d", lockTime) + } + + return time.Unix(int64(lockTime), 0).String() +} + +// invSummary returns an inventory message as a human-readable string. +func invSummary(invList []*wire.InvVect) string { + // No inventory. + invLen := len(invList) + if invLen == 0 { + return "empty" + } + + // One inventory item. + if invLen == 1 { + iv := invList[0] + switch iv.Type { + case wire.InvTypeError: + return fmt.Sprintf("error %s", iv.Hash) + case wire.InvTypeBlock: + return fmt.Sprintf("block %s", iv.Hash) + case wire.InvTypeSyncBlock: + return fmt.Sprintf("sync block %s", iv.Hash) + case wire.InvTypeTx: + return fmt.Sprintf("tx %s", iv.Hash) + } + + return fmt.Sprintf("unknown (%d) %s", uint32(iv.Type), iv.Hash) + } + + // More than one inv item. + return fmt.Sprintf("size %d", invLen) +} + +// sanitizeString strips any characters which are even remotely dangerous, such +// as html control characters, from the passed string. It also limits it to +// the passed maximum size, which can be 0 for unlimited. When the string is +// limited, it will also add "..." to the string to indicate it was truncated. +func sanitizeString(str string, maxLength uint) string { + const safeChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXY" + + "Z01234567890 .,;_/:?@" + + // Strip any characters not in the safeChars string removed. + str = strings.Map(func(r rune) rune { + if strings.ContainsRune(safeChars, r) { + return r + } + return -1 + }, str) + + // Limit the string to the max allowed length. + if maxLength > 0 && uint(len(str)) > maxLength { + str = str[:maxLength] + str = str + "..." + } + return str +} + +// messageSummary returns a human-readable string which summarizes a message. +// Not all messages have or need a summary. This is used for debug logging. +func messageSummary(msg wire.Message) string { + switch msg := msg.(type) { + case *wire.MsgVersion: + return fmt.Sprintf("agent %s, pver %d, selected tip %s", + msg.UserAgent, msg.ProtocolVersion, msg.SelectedTipHash) + + case *wire.MsgVerAck: + // No summary. + + case *wire.MsgGetAddr: + if msg.IncludeAllSubnetworks { + return "all subnetworks and full nodes" + } + if msg.SubnetworkID == nil { + return "full nodes" + } + return fmt.Sprintf("subnetwork ID %v", msg.SubnetworkID) + + case *wire.MsgAddr: + return fmt.Sprintf("%d addr", len(msg.AddrList)) + + case *wire.MsgPing: + // No summary - perhaps add nonce. + + case *wire.MsgPong: + // No summary - perhaps add nonce. + + case *wire.MsgTx: + return fmt.Sprintf("hash %s, %d inputs, %d outputs, lock %s", + msg.TxID(), len(msg.TxIn), len(msg.TxOut), + formatLockTime(msg.LockTime)) + + case *wire.MsgBlock: + header := &msg.Header + return fmt.Sprintf("hash %s, ver %d, %d tx, %s", msg.BlockHash(), + header.Version, len(msg.Transactions), header.Timestamp) + + case *wire.MsgInv: + return invSummary(msg.InvList) + + case *wire.MsgNotFound: + return invSummary(msg.InvList) + + case *wire.MsgGetData: + return invSummary(msg.InvList) + + case *wire.MsgGetBlockInvs: + return fmt.Sprintf("low hash %s, high hash %s", msg.LowHash, + msg.HighHash) + + case *wire.MsgGetBlockLocator: + return fmt.Sprintf("high hash %s, low hash %s", msg.HighHash, + msg.LowHash) + + case *wire.MsgBlockLocator: + if len(msg.BlockLocatorHashes) > 0 { + return fmt.Sprintf("locator first hash: %s, last hash: %s", msg.BlockLocatorHashes[0], msg.BlockLocatorHashes[len(msg.BlockLocatorHashes)-1]) + } + return fmt.Sprintf("no locator") + + case *wire.MsgReject: + // Ensure the variable length strings don't contain any + // characters which are even remotely dangerous such as HTML + // control characters, etc. Also limit them to sane length for + // logging. + rejCommand := sanitizeString(msg.Cmd, wire.CommandSize) + rejReason := sanitizeString(msg.Reason, maxRejectReasonLen) + summary := fmt.Sprintf("cmd %s, code %s, reason %s", rejCommand, + msg.Code, rejReason) + if rejCommand == wire.CmdBlock || rejCommand == wire.CmdTx { + summary += fmt.Sprintf(", hash %s", msg.Hash) + } + return summary + } + + // No summary for other messages. + return "" +} + +func messageLogLevel(msg wire.Message) logs.Level { + switch msg.(type) { + case *wire.MsgReject: + return logs.LevelWarn + default: + return logs.LevelDebug + } +} diff --git a/peer/peer.go b/peer/peer.go index a9559d684..13f705871 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -986,7 +986,8 @@ func (p *Peer) readMessage() (wire.Message, []byte, error) { // Use closures to log expensive operations so they are only run when // the logging level requires it. - log.Debugf("%s", logger.NewLogClosure(func() string { + logLevel := messageLogLevel(msg) + log.Writef(logLevel, "%s", logger.NewLogClosure(func() string { // Debug summary of message. summary := messageSummary(msg) if len(summary) > 0 { @@ -1014,7 +1015,8 @@ func (p *Peer) writeMessage(msg wire.Message) error { // Use closures to log expensive operations so they are only run when // the logging level requires it. - log.Debugf("%s", logger.NewLogClosure(func() string { + logLevel := messageLogLevel(msg) + log.Writef(logLevel, "%s", logger.NewLogClosure(func() string { // Debug summary of message. summary := messageSummary(msg) if len(summary) > 0 { From 35b943e04f947fe5c1e5f1a28c5d089047a51627 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Wed, 13 May 2020 15:57:30 +0300 Subject: [PATCH 26/77] [NOD-996] Disable kaspad logs in TestScripts (#718) * [NOD-996] Disable kaspad logs in TestScripts. * [NOD-996] Return the log level to its original state after TestScripts is done. --- txscript/reference_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/txscript/reference_test.go b/txscript/reference_test.go index 6680807a8..f22e374fc 100644 --- a/txscript/reference_test.go +++ b/txscript/reference_test.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "github.com/kaspanet/kaspad/logs" "github.com/pkg/errors" "io/ioutil" "strconv" @@ -361,6 +362,11 @@ func TestScripts(t *testing.T) { t.Fatalf("TestScripts couldn't Unmarshal: %v", err) } + // Disable non-test logs + logLevel := log.Level() + log.SetLevel(logs.LevelOff) + defer log.SetLevel(logLevel) + // Run all script tests with and without the signature cache. testScripts(t, tests, true) testScripts(t, tests, false) From 378f0b659a317f869806864ba7652b8489a08a48 Mon Sep 17 00:00:00 2001 From: Svarog Date: Wed, 13 May 2020 17:27:53 +0300 Subject: [PATCH 27/77] [NOD-993] Get rid of redundant error types + Use %+v when printing startup errors (#719) * [NOD-993] Use %+v when printing errors * [NOD-993] Get rid of AssertError * [NOD-993] Made ruleError use github.com/pkg/errors * [NOD-993] remove redundant TODO * [NOD-993] remove redundant Comment * [NOD-993] Removed DeploymentError --- blockdag/common_test.go | 29 +++++++++++-------------- blockdag/dag.go | 7 +++--- blockdag/dagio.go | 34 ++++++++++++++--------------- blockdag/error.go | 27 ++++------------------- blockdag/error_test.go | 44 -------------------------------------- blockdag/thresholdstate.go | 7 +++--- blockdag/validate_test.go | 3 ++- kaspad.go | 6 +++--- 8 files changed, 47 insertions(+), 110 deletions(-) diff --git a/blockdag/common_test.go b/blockdag/common_test.go index 46155e3ff..ca616d3b1 100644 --- a/blockdag/common_test.go +++ b/blockdag/common_test.go @@ -7,15 +7,15 @@ package blockdag import ( "compress/bzip2" "encoding/binary" - "github.com/pkg/errors" "io" "os" "path/filepath" - "reflect" "strings" "testing" "time" + "github.com/pkg/errors" + "github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" @@ -144,29 +144,26 @@ func addNodeAsChildToParents(node *blockNode) { // same type (either both nil or both of type RuleError) and their error codes // match when not nil. func checkRuleError(gotErr, wantErr error) error { - // Ensure the error code is of the expected type and the error - // code matches the value specified in the test instance. - if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) { - return errors.Errorf("wrong error - got %T (%[1]v), want %T", - gotErr, wantErr) - } - if gotErr == nil { + if wantErr == nil && gotErr == nil { return nil } - // Ensure the want error type is a script error. - werr, ok := wantErr.(RuleError) - if !ok { - return errors.Errorf("unexpected test error type %T", wantErr) + var gotRuleErr RuleError + if ok := errors.As(gotErr, &gotRuleErr); !ok { + return errors.Errorf("gotErr expected to be RuleError, but got %+v instead", gotErr) + } + + var wantRuleErr RuleError + if ok := errors.As(wantErr, &wantRuleErr); !ok { + return errors.Errorf("wantErr expected to be RuleError, but got %+v instead", wantErr) } // Ensure the error codes match. It's safe to use a raw type assert // here since the code above already proved they are the same type and // the want error is a script error. - gotErrorCode := gotErr.(RuleError).ErrorCode - if gotErrorCode != werr.ErrorCode { + if gotRuleErr.ErrorCode != wantRuleErr.ErrorCode { return errors.Errorf("mismatched error code - got %v (%v), want %v", - gotErrorCode, gotErr, werr.ErrorCode) + gotRuleErr.ErrorCode, gotErr, wantRuleErr.ErrorCode) } return nil diff --git a/blockdag/dag.go b/blockdag/dag.go index c30858152..3bf409611 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -6,12 +6,13 @@ package blockdag import ( "fmt" - "github.com/kaspanet/kaspad/dbaccess" "math" "sort" "sync" "time" + "github.com/kaspanet/kaspad/dbaccess" + "github.com/pkg/errors" "github.com/kaspanet/kaspad/util/subnetworkid" @@ -2082,10 +2083,10 @@ type Config struct { func New(config *Config) (*BlockDAG, error) { // Enforce required config fields. if config.DAGParams == nil { - return nil, AssertError("BlockDAG.New DAG parameters nil") + return nil, errors.New("BlockDAG.New DAG parameters nil") } if config.TimeSource == nil { - return nil, AssertError("BlockDAG.New timesource is nil") + return nil, errors.New("BlockDAG.New timesource is nil") } params := config.DAGParams diff --git a/blockdag/dagio.go b/blockdag/dagio.go index f72e9f9c3..730ff4a41 100644 --- a/blockdag/dagio.go +++ b/blockdag/dagio.go @@ -223,7 +223,7 @@ func (dag *BlockDAG) initDAGState() error { log.Debugf("Applying the loaded utxoCollection to the virtual block...") dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection) if err != nil { - return AssertError(fmt.Sprintf("Error loading UTXOSet: %s", err)) + return errors.Wrap(err, "Error loading UTXOSet") } log.Debugf("Applying the stored tips to the virtual block...") @@ -289,14 +289,14 @@ func (dag *BlockDAG) initBlockIndex() (unprocessedBlockNodes []*blockNode, err e if dag.blockCount == 0 { if !node.hash.IsEqual(dag.dagParams.GenesisHash) { - return nil, AssertError(fmt.Sprintf("Expected "+ + return nil, errors.Errorf("Expected "+ "first entry in block index to be genesis block, "+ - "found %s", node.hash)) + "found %s", node.hash) } } else { if len(node.parents) == 0 { - return nil, AssertError(fmt.Sprintf("block %s "+ - "has no parents but it's not the genesis block", node.hash)) + return nil, errors.Errorf("block %s "+ + "has no parents but it's not the genesis block", node.hash) } } @@ -350,8 +350,8 @@ func (dag *BlockDAG) initVirtualBlockTips(state *dagState) error { for _, tipHash := range state.TipHashes { tip := dag.index.LookupNode(tipHash) if tip == nil { - return AssertError(fmt.Sprintf("cannot find "+ - "DAG tip %s in block index", state.TipHashes)) + return errors.Errorf("cannot find "+ + "DAG tip %s in block index", state.TipHashes) } tips.add(tip) } @@ -365,12 +365,12 @@ func (dag *BlockDAG) processUnprocessedBlockNodes(unprocessedBlockNodes []*block // doesn't, the database has certainly been corrupted. blockExists, err := dbaccess.HasBlock(dbaccess.NoTx(), node.hash) if err != nil { - return AssertError(fmt.Sprintf("HasBlock "+ - "for block %s failed: %s", node.hash, err)) + return errors.Wrapf(err, "HasBlock "+ + "for block %s failed: %s", node.hash, err) } if !blockExists { - return AssertError(fmt.Sprintf("block %s "+ - "exists in block index but not in block db", node.hash)) + return errors.Errorf("block %s "+ + "exists in block index but not in block db", node.hash) } // Attempt to accept the block. @@ -388,14 +388,14 @@ func (dag *BlockDAG) processUnprocessedBlockNodes(unprocessedBlockNodes []*block // If the block is an orphan or is delayed then it couldn't have // possibly been written to the block index in the first place. if isOrphan { - return AssertError(fmt.Sprintf("Block %s, which was not "+ + return errors.Errorf("Block %s, which was not "+ "previously processed, turned out to be an orphan, which is "+ - "impossible.", node.hash)) + "impossible.", node.hash) } if isDelayed { - return AssertError(fmt.Sprintf("Block %s, which was not "+ + return errors.Errorf("Block %s, which was not "+ "previously processed, turned out to be delayed, which is "+ - "impossible.", node.hash)) + "impossible.", node.hash) } } return nil @@ -428,8 +428,8 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) { for _, hash := range header.ParentHashes { parent := dag.index.LookupNode(hash) if parent == nil { - return nil, AssertError(fmt.Sprintf("deserializeBlockNode: Could "+ - "not find parent %s for block %s", hash, header.BlockHash())) + return nil, errors.Errorf("deserializeBlockNode: Could "+ + "not find parent %s for block %s", hash, header.BlockHash()) } node.parents.add(parent) } diff --git a/blockdag/error.go b/blockdag/error.go index b003f048d..977a4e77a 100644 --- a/blockdag/error.go +++ b/blockdag/error.go @@ -6,28 +6,10 @@ package blockdag import ( "fmt" + + "github.com/pkg/errors" ) -// DeploymentError identifies an error that indicates a deployment ID was -// specified that does not exist. -type DeploymentError uint32 - -// Error returns the assertion error as a human-readable string and satisfies -// the error interface. -func (e DeploymentError) Error() string { - return fmt.Sprintf("deployment ID %d does not exist", uint32(e)) -} - -// AssertError identifies an error that indicates an internal code consistency -// issue and should be treated as a critical and unrecoverable error. -type AssertError string - -// Error returns the assertion error as a human-readable string and satisfies -// the error interface. -func (e AssertError) Error() string { - return "assertion failed: " + string(e) -} - // ErrorCode identifies a kind of error. type ErrorCode int @@ -294,7 +276,6 @@ func (e RuleError) Error() string { return e.Description } -// ruleError creates an RuleError given a set of arguments. -func ruleError(c ErrorCode, desc string) RuleError { - return RuleError{ErrorCode: c, Description: desc} +func ruleError(c ErrorCode, desc string) error { + return errors.WithStack(RuleError{ErrorCode: c, Description: desc}) } diff --git a/blockdag/error_test.go b/blockdag/error_test.go index 6fbfe3a8a..b1edcc168 100644 --- a/blockdag/error_test.go +++ b/blockdag/error_test.go @@ -5,7 +5,6 @@ package blockdag import ( - "fmt" "testing" ) @@ -99,46 +98,3 @@ func TestRuleError(t *testing.T) { } } } - -// TestDeploymentError tests the stringized output for the DeploymentError type. -func TestDeploymentError(t *testing.T) { - t.Parallel() - - tests := []struct { - in DeploymentError - want string - }{ - { - DeploymentError(0), - "deployment ID 0 does not exist", - }, - { - DeploymentError(10), - "deployment ID 10 does not exist", - }, - { - DeploymentError(123), - "deployment ID 123 does not exist", - }, - } - - t.Logf("Running %d tests", len(tests)) - for i, test := range tests { - result := test.in.Error() - if result != test.want { - t.Errorf("Error #%d\n got: %s want: %s", i, result, - test.want) - continue - } - } -} - -func TestAssertError(t *testing.T) { - message := "abc 123" - err := AssertError(message) - expectedMessage := fmt.Sprintf("assertion failed: %s", message) - if expectedMessage != err.Error() { - t.Errorf("Unexpected AssertError message. "+ - "Got: %s, want: %s", err.Error(), expectedMessage) - } -} diff --git a/blockdag/thresholdstate.go b/blockdag/thresholdstate.go index fbb45baae..506dc67ee 100644 --- a/blockdag/thresholdstate.go +++ b/blockdag/thresholdstate.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/kaspanet/kaspad/util/daghash" + "github.com/pkg/errors" ) // ThresholdState define the various threshold states used when voting on @@ -177,9 +178,9 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit var ok bool state, ok = cache.Lookup(prevNode.hash) if !ok { - return ThresholdFailed, AssertError(fmt.Sprintf( + return ThresholdFailed, errors.Errorf( "thresholdState: cache lookup failed for %s", - prevNode.hash)) + prevNode.hash) } } @@ -297,7 +298,7 @@ func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) { // This function MUST be called with the DAG state lock held (for writes). func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) { if deploymentID > uint32(len(dag.dagParams.Deployments)) { - return ThresholdFailed, DeploymentError(deploymentID) + return ThresholdFailed, errors.Errorf("deployment ID %d does not exist", deploymentID) } deployment := &dag.dagParams.Deployments[deploymentID] diff --git a/blockdag/validate_test.go b/blockdag/validate_test.go index 064387826..9b8a993bc 100644 --- a/blockdag/validate_test.go +++ b/blockdag/validate_test.go @@ -5,12 +5,13 @@ package blockdag import ( - "github.com/pkg/errors" "math" "path/filepath" "testing" "time" + "github.com/pkg/errors" + "github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util/daghash" diff --git a/kaspad.go b/kaspad.go index 607847404..7b3db8260 100644 --- a/kaspad.go +++ b/kaspad.go @@ -6,7 +6,6 @@ package main import ( "fmt" - "github.com/kaspanet/kaspad/dbaccess" _ "net/http/pprof" "os" "path/filepath" @@ -15,6 +14,8 @@ import ( "runtime/pprof" "strings" + "github.com/kaspanet/kaspad/dbaccess" + "github.com/kaspanet/kaspad/blockdag/indexers" "github.com/kaspanet/kaspad/config" "github.com/kaspanet/kaspad/limits" @@ -137,8 +138,7 @@ func kaspadMain(serverChan chan<- *server.Server) error { server, err := server.NewServer(cfg.Listeners, config.ActiveConfig().NetParams(), interrupt) if err != nil { - // TODO: this logging could do with some beautifying. - kasdLog.Errorf("Unable to start server on %s: %s", + kasdLog.Errorf("Unable to start server on %s: %+v", strings.Join(cfg.Listeners, ", "), err) return err } From 28681affdafef819f83671c426d8cb09bcaa09cc Mon Sep 17 00:00:00 2001 From: Svarog Date: Thu, 14 May 2020 10:58:46 +0300 Subject: [PATCH 28/77] [NOD-994] Greatly increase the amount of logs kaspad keeps before rotating them away (#720) * [NOD-994] Greatly increased the amount of logs kaspad keeps before rotating them away * [NOD-994] Actually invcrease the log file * [NOD-994] Update comments * [NOD-994] Fix typo --- logger/logger.go | 2 +- logs/logs.go | 19 ++++++++++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/logger/logger.go b/logger/logger.go index 06fe2431a..a53b6f264 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -113,7 +113,7 @@ var subsystemLoggers = map[string]*logs.Logger{ // InitLog attaches log file and error log file to the backend log. func InitLog(logFile, errLogFile string) { - err := BackendLog.AddLogFile(logFile, logs.LevelTrace) + err := BackendLog.AddLogFileWithCustomRotator(logFile, logs.LevelTrace, 100*1024, 4) if err != nil { fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err) os.Exit(1) diff --git a/logs/logs.go b/logs/logs.go index 7984031b3..e42f53022 100644 --- a/logs/logs.go +++ b/logs/logs.go @@ -35,7 +35,6 @@ package logs import ( "bytes" "fmt" - "github.com/pkg/errors" "os" "path/filepath" "runtime" @@ -44,6 +43,8 @@ import ( "sync/atomic" "time" + "github.com/pkg/errors" + "github.com/jrick/logrotate/rotator" ) @@ -265,15 +266,27 @@ func callsite(flag uint32) (string, int) { return file, line } +const ( + defaultThresholdKB = 10 * 1024 + defaultMaxRolls = 3 +) + // AddLogFile adds a file which the log will write into on a certain -// log level. It'll create the file if it doesn't exist. +// log level with the default log rotation settings. It'll create the file if it doesn't exist. func (b *Backend) AddLogFile(logFile string, logLevel Level) error { + return b.AddLogFileWithCustomRotator(logFile, logLevel, defaultThresholdKB, defaultMaxRolls) +} + +// AddLogFileWithCustomRotator adds a file which the log will write into on a certain +// log level, with the specified log rotation settings. +// It'll create the file if it doesn't exist. +func (b *Backend) AddLogFileWithCustomRotator(logFile string, logLevel Level, thresholdKB int64, maxRolls int) error { logDir, _ := filepath.Split(logFile) err := os.MkdirAll(logDir, 0700) if err != nil { return errors.Errorf("failed to create log directory: %s", err) } - r, err := rotator.New(logFile, 10*1024, false, 3) + r, err := rotator.New(logFile, thresholdKB, false, maxRolls) if err != nil { return errors.Errorf("failed to create file rotator: %s", err) } From eb8b8418502bfe0316a5e3b2149e0ead9b58cf6a Mon Sep 17 00:00:00 2001 From: Svarog Date: Mon, 18 May 2020 10:42:58 +0300 Subject: [PATCH 29/77] [NOD-1005] Use sm.isSynced to check whether should request blocks from invs (#721) * [NOD-1005] Moved isSyncedForMining to netsync manager, and renamed to isSynced + removed isCurrent * [NOD-1005] Use sm.isSynced to check whether should request blocks from invs * [NOD-1005] Use private version of isSynced to avoid infinite loop * [NOD-1005] Fix a few typos --- netsync/manager.go | 34 +++++++++++++++++++------ server/rpc/handle_get_block_template.go | 31 +++++----------------- server/rpc/rpcadapters.go | 6 ++--- server/rpc/rpcserver.go | 4 +-- 4 files changed, 38 insertions(+), 37 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index d10e4e066..fc43d7fbe 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -103,10 +103,10 @@ type processBlockMsg struct { reply chan processBlockResponse } -// isCurrentMsg is a message type to be sent across the message channel for +// isSyncedMsg is a message type to be sent across the message channel for // requesting whether or not the sync manager believes it is synced with the // currently connected peers. -type isCurrentMsg struct { +type isSyncedMsg struct { reply chan bool } @@ -839,7 +839,7 @@ func (sm *SyncManager) sendInvsFromRequestQueue(peer *peerpkg.Peer, state *peerS if err != nil { return err } - if sm.current() { + if sm.syncPeer == nil || sm.isSynced() { err := sm.addInvsToGetDataMessageFromQueue(gdmsg, state, wire.InvTypeBlock, wire.MaxInvPerGetDataMsg) if err != nil { return err @@ -968,8 +968,8 @@ out: err: err, } - case isCurrentMsg: - msg.reply <- sm.current() + case isSyncedMsg: + msg.reply <- sm.isSynced() case pauseMsg: // Wait until the sender unpauses the manager. @@ -1151,14 +1151,32 @@ func (sm *SyncManager) ProcessBlock(block *util.Block, flags blockdag.BehaviorFl return response.isOrphan, response.err } -// IsCurrent returns whether or not the sync manager believes it is synced with +// IsSynced returns whether or not the sync manager believes it is synced with // the connected peers. -func (sm *SyncManager) IsCurrent() bool { +func (sm *SyncManager) IsSynced() bool { reply := make(chan bool) - sm.msgChan <- isCurrentMsg{reply: reply} + sm.msgChan <- isSyncedMsg{reply: reply} return <-reply } +// isSynced checks if the node is synced enough based upon its worldview. +// This is used to determine if the node can support mining and requesting newly-mined blocks. +// To do that, first it checks if the selected tip timestamp is not older than maxTipAge. If that's the case, it means +// the node is synced since blocks' timestamps are not allowed to deviate too much into the future. +// If that's not the case it checks the rate it added new blocks to the DAG recently. If it's faster than +// blockRate * maxSyncRateDeviation it means the node is not synced, since when the node is synced it shouldn't add +// blocks to the DAG faster than the block rate. +func (sm *SyncManager) isSynced() bool { + const maxTipAge = 5 * time.Minute + isCloseToCurrentTime := sm.dag.Now().Sub(sm.dag.SelectedTipHeader().Timestamp) <= maxTipAge + if isCloseToCurrentTime { + return true + } + + const maxSyncRateDeviation = 1.05 + return sm.dag.IsSyncRateBelowThreshold(maxSyncRateDeviation) +} + // Pause pauses the sync manager until the returned channel is closed. // // Note that while paused, all peer and block processing is halted. The diff --git a/server/rpc/handle_get_block_template.go b/server/rpc/handle_get_block_template.go index ac615a596..20385d382 100644 --- a/server/rpc/handle_get_block_template.go +++ b/server/rpc/handle_get_block_template.go @@ -4,6 +4,12 @@ import ( "bytes" "encoding/hex" "fmt" + "math/rand" + "strconv" + "strings" + "sync" + "time" + "github.com/kaspanet/kaspad/blockdag" "github.com/kaspanet/kaspad/config" "github.com/kaspanet/kaspad/mining" @@ -14,11 +20,6 @@ import ( "github.com/kaspanet/kaspad/util/random" "github.com/kaspanet/kaspad/wire" "github.com/pkg/errors" - "math/rand" - "strconv" - "strings" - "sync" - "time" ) const ( @@ -119,24 +120,6 @@ func handleGetBlockTemplate(s *Server, cmd interface{}, closeChan <-chan struct{ } } -// isSyncedForMining checks if the node is synced enough for mining blocks -// on top of its world view. -// To do that, first it checks if the selected tip timestamp is not older than maxTipAge. If that's the case, it means -// the node is synced since blocks' timestamps are not allowed to deviate too much into the future. -// If that's not the case it checks the rate it added new blocks to the DAG recently. If it's faster than -// blockRate * maxSyncRateDeviation it means the node is not synced, since when the node is synced it shouldn't add -// blocks to the DAG faster than the block rate. -func isSyncedForMining(s *Server) bool { - const maxTipAge = 5 * time.Minute - isCloseToCurrentTime := s.cfg.DAG.Now().Sub(s.cfg.DAG.SelectedTipHeader().Timestamp) <= maxTipAge - if isCloseToCurrentTime { - return true - } - - const maxSyncRateDeviation = 1.05 - return s.cfg.DAG.IsSyncRateBelowThreshold(maxSyncRateDeviation) -} - // handleGetBlockTemplateRequest is a helper for handleGetBlockTemplate which // deals with generating and returning block templates to the caller. It // handles both long poll requests as specified by BIP 0022 as well as regular @@ -652,7 +635,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) // This is not a straight-up error because the choice of whether // to mine or not is the responsibility of the miner rather // than the node's. - isSynced := isSyncedForMining(s) + isSynced := s.cfg.SyncMgr.IsSynced() // Update work state to ensure another block template isn't // generated until needed. diff --git a/server/rpc/rpcadapters.go b/server/rpc/rpcadapters.go index 506ccf82b..119e6be6a 100644 --- a/server/rpc/rpcadapters.go +++ b/server/rpc/rpcadapters.go @@ -234,13 +234,13 @@ type rpcSyncMgr struct { // Ensure rpcSyncMgr implements the rpcserverSyncManager interface. var _ rpcserverSyncManager = (*rpcSyncMgr)(nil) -// IsCurrent returns whether or not the sync manager believes the DAG is +// IsSynced returns whether or not the sync manager believes the DAG is // current as compared to the rest of the network. // // This function is safe for concurrent access and is part of the // rpcserverSyncManager interface implementation. -func (b *rpcSyncMgr) IsCurrent() bool { - return b.syncMgr.IsCurrent() +func (b *rpcSyncMgr) IsSynced() bool { + return b.syncMgr.IsSynced() } // SubmitBlock submits the provided block to the network after processing it diff --git a/server/rpc/rpcserver.go b/server/rpc/rpcserver.go index 90cb640a1..fe8920881 100644 --- a/server/rpc/rpcserver.go +++ b/server/rpc/rpcserver.go @@ -723,9 +723,9 @@ type rpcserverConnManager interface { // The interface contract requires that all of these methods are safe for // concurrent access. type rpcserverSyncManager interface { - // IsCurrent returns whether or not the sync manager believes the DAG + // IsSynced returns whether or not the sync manager believes the DAG // is current as compared to the rest of the network. - IsCurrent() bool + IsSynced() bool // SubmitBlock submits the provided block to the network after // processing it locally. From e9e1ef47721e7dcfb24626b357f1e3a6848202bc Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Tue, 19 May 2020 16:29:21 +0300 Subject: [PATCH 30/77] [NOD-1006] Make use of a pool to avoid excessive allocation of big.Ints (#722) * [NOD-1006] Make CompactToBig take an out param so that we can reuse the same big.Int in averageTarget. * [NOD-1006] Fix merge errors. * [NOD-1006] Use CompactToBigWithDestination only in averageTarget. * [NOD-1006] Fix refactor errors. * [NOD-1006] Fix refactor errors. * [NOD-1006] Optimize averageTarget with a big.Int pool. * [NOD-1006] Defer releasing bigInts. * [NOD-1006] Use a pool for requiredDifficulty as well. * [NOD-1006] Move the big int pool to utils. * [NOD-1006] Remove unnecessary line. --- blockdag/blockwindow.go | 15 +++++++++++---- blockdag/difficulty.go | 19 ++++++++++++++----- util/bigintpool/pool.go | 25 +++++++++++++++++++++++++ util/math.go | 21 ++++++++++++++------- 4 files changed, 64 insertions(+), 16 deletions(-) create mode 100644 util/bigintpool/pool.go diff --git a/blockdag/blockwindow.go b/blockdag/blockwindow.go index 5d657d719..4e2420003 100644 --- a/blockdag/blockwindow.go +++ b/blockdag/blockwindow.go @@ -2,6 +2,7 @@ package blockdag import ( "github.com/kaspanet/kaspad/util" + "github.com/kaspanet/kaspad/util/bigintpool" "github.com/pkg/errors" "math" "math/big" @@ -53,13 +54,19 @@ func (window blockWindow) minMaxTimestamps() (min, max int64) { return } -func (window blockWindow) averageTarget() *big.Int { - averageTarget := big.NewInt(0) +func (window blockWindow) averageTarget(averageTarget *big.Int) { + averageTarget.SetInt64(0) + + target := bigintpool.Acquire(0) + defer bigintpool.Release(target) for _, node := range window { - target := util.CompactToBig(node.bits) + util.CompactToBigWithDestination(node.bits, target) averageTarget.Add(averageTarget, target) } - return averageTarget.Div(averageTarget, big.NewInt(int64(len(window)))) + + windowLen := bigintpool.Acquire(int64(len(window))) + defer bigintpool.Release(windowLen) + averageTarget.Div(averageTarget, windowLen) } func (window blockWindow) medianTimestamp() (int64, error) { diff --git a/blockdag/difficulty.go b/blockdag/difficulty.go index aab65f055..3aebd9279 100644 --- a/blockdag/difficulty.go +++ b/blockdag/difficulty.go @@ -5,7 +5,7 @@ package blockdag import ( - "math/big" + "github.com/kaspanet/kaspad/util/bigintpool" "time" "github.com/kaspanet/kaspad/util" @@ -30,11 +30,20 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ti // averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize)) // The result uses integer division which means it will be slightly // rounded down. - newTarget := targetsWindow.averageTarget() + newTarget := bigintpool.Acquire(0) + defer bigintpool.Release(newTarget) + windowTimeStampDifference := bigintpool.Acquire(windowMaxTimeStamp - windowMinTimestamp) + defer bigintpool.Release(windowTimeStampDifference) + targetTimePerBlock := bigintpool.Acquire(dag.targetTimePerBlock) + defer bigintpool.Release(targetTimePerBlock) + difficultyAdjustmentWindowSize := bigintpool.Acquire(int64(dag.difficultyAdjustmentWindowSize)) + defer bigintpool.Release(difficultyAdjustmentWindowSize) + + targetsWindow.averageTarget(newTarget) newTarget. - Mul(newTarget, big.NewInt(windowMaxTimeStamp-windowMinTimestamp)). - Div(newTarget, big.NewInt(dag.targetTimePerBlock)). - Div(newTarget, big.NewInt(int64(dag.difficultyAdjustmentWindowSize))) + Mul(newTarget, windowTimeStampDifference). + Div(newTarget, targetTimePerBlock). + Div(newTarget, difficultyAdjustmentWindowSize) if newTarget.Cmp(dag.dagParams.PowMax) > 0 { return dag.powMaxBits } diff --git a/util/bigintpool/pool.go b/util/bigintpool/pool.go new file mode 100644 index 000000000..d2eab80ff --- /dev/null +++ b/util/bigintpool/pool.go @@ -0,0 +1,25 @@ +package bigintpool + +import ( + "math/big" + "sync" +) + +var bigIntPool = sync.Pool{ + New: func() interface{} { + return big.NewInt(0) + }, +} + +// Acquire acquires a big.Int from the pool and +// initializes it to x. +func Acquire(x int64) *big.Int { + bigInt := bigIntPool.Get().(*big.Int) + bigInt.SetInt64(x) + return bigInt +} + +// Release returns the given big.Int to the pool. +func Release(toRelease *big.Int) { + bigIntPool.Put(toRelease) +} diff --git a/util/math.go b/util/math.go index c7a4e50b9..9b0bb9c55 100644 --- a/util/math.go +++ b/util/math.go @@ -53,6 +53,16 @@ func FastLog2Floor(n uint64) uint8 { // The formula to calculate N is: // N = (-1^sign) * mantissa * 256^(exponent-3) func CompactToBig(compact uint32) *big.Int { + destination := big.NewInt(0) + CompactToBigWithDestination(compact, destination) + return destination +} + +// CompactToBigWithDestination is a version of CompactToBig that +// takes a destination parameter. This is useful for saving memory, +// as then the destination big.Int can be reused. +// See CompactToBig for further details. +func CompactToBigWithDestination(compact uint32, destination *big.Int) { // Extract the mantissa, sign bit, and exponent. mantissa := compact & 0x007fffff isNegative := compact&0x00800000 != 0 @@ -63,21 +73,18 @@ func CompactToBig(compact uint32) *big.Int { // treat the exponent as the number of bytes and shift the mantissa // right or left accordingly. This is equivalent to: // N = mantissa * 256^(exponent-3) - var bn *big.Int if exponent <= 3 { mantissa >>= 8 * (3 - exponent) - bn = big.NewInt(int64(mantissa)) + destination.SetInt64(int64(mantissa)) } else { - bn = big.NewInt(int64(mantissa)) - bn.Lsh(bn, 8*(exponent-3)) + destination.SetInt64(int64(mantissa)) + destination.Lsh(destination, 8*(exponent-3)) } // Make it negative if the sign bit is set. if isNegative { - bn = bn.Neg(bn) + destination.Neg(destination) } - - return bn } // BigToCompact converts a whole number N to a compact representation using From e0f587f5993a281244d70db15a15a4cf2ae64164 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Tue, 19 May 2020 17:56:07 +0300 Subject: [PATCH 31/77] [NOD-877] Separate UTXO header code to two fields in serialization: blue score and packed flags (#725) * [NOD-877] In UTXOEntry serialization, extract packedFlags out to a separate Uint8. * [NOD-877] Generate new test blocks. * [NOD-877] Fix TestIsKnownBlock. * [NOD-877] Fix TestBlueBlockWindow. * [NOD-877] Fix TestUTXOSerialization and TestGHOSTDAG. * [NOD-877] Fix TestVirtualBlock. --- blockdag/blockwindow_test.go | 18 ++++++------- blockdag/dag_test.go | 2 +- blockdag/dagio_test.go | 4 +-- blockdag/ghostdag_test.go | 4 +-- blockdag/testdata/blk_0_to_4.dat | Bin 2055 -> 2055 bytes blockdag/testdata/blk_3A.dat | Bin 467 -> 467 bytes blockdag/testdata/blk_3B.dat | Bin 354 -> 354 bytes blockdag/testdata/blk_3C.dat | Bin 382 -> 382 bytes blockdag/testdata/blk_3D.dat | Bin 508 -> 508 bytes blockdag/utxoio.go | 45 +++++++++++-------------------- blockdag/virtualblock_test.go | 2 +- 11 files changed, 30 insertions(+), 45 deletions(-) diff --git a/blockdag/blockwindow_test.go b/blockdag/blockwindow_test.go index bb5de5026..939b84de5 100644 --- a/blockdag/blockwindow_test.go +++ b/blockdag/blockwindow_test.go @@ -53,12 +53,12 @@ func TestBlueBlockWindow(t *testing.T) { { parents: []string{"C", "D"}, id: "E", - expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"}, }, { parents: []string{"C", "D"}, id: "F", - expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"}, }, { parents: []string{"A"}, @@ -73,37 +73,37 @@ func TestBlueBlockWindow(t *testing.T) { { parents: []string{"H", "F"}, id: "I", - expectedWindowWithGenesisPadding: []string{"F", "C", "D", "B", "A", "A", "A", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"}, }, { parents: []string{"I"}, id: "J", - expectedWindowWithGenesisPadding: []string{"I", "F", "C", "D", "B", "A", "A", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"}, }, { parents: []string{"J"}, id: "K", - expectedWindowWithGenesisPadding: []string{"J", "I", "F", "C", "D", "B", "A", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"}, }, { parents: []string{"K"}, id: "L", - expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "C", "D", "B", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"}, }, { parents: []string{"L"}, id: "M", - expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "C", "D", "B", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"}, }, { parents: []string{"M"}, id: "N", - expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "C", "D", "B", "A"}, + expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"}, }, { parents: []string{"N"}, id: "O", - expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "B"}, + expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"}, }, } diff --git a/blockdag/dag_test.go b/blockdag/dag_test.go index 4a6b9dea6..a47abf757 100644 --- a/blockdag/dag_test.go +++ b/blockdag/dag_test.go @@ -204,7 +204,7 @@ func TestIsKnownBlock(t *testing.T) { {hash: dagconfig.SimnetParams.GenesisHash.String(), want: true}, // Block 3b should be present (as a second child of Block 2). - {hash: "216301e3fc03cf89973b9192b4ecdd732bf3b677cf1ca4f6c340a56f1533fb4f", want: true}, + {hash: "7f2bea5aa4122aed2a542447133e73da6b6f6190ec34c061be70d4576cdd7498", want: true}, // Block 100000 should be present (as an orphan). {hash: "65b20b048a074793ebfd1196e49341c8d194dabfc6b44a4fd0c607406e122baf", want: true}, diff --git a/blockdag/dagio_test.go b/blockdag/dagio_test.go index b1522da07..ac744548f 100644 --- a/blockdag/dagio_test.go +++ b/blockdag/dagio_test.go @@ -66,7 +66,7 @@ func TestUTXOSerialization(t *testing.T) { blockBlueScore: 1, packedFlags: tfCoinbase, }, - serialized: hexToBytes("030000000000000000f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"), + serialized: hexToBytes("01000000000000000100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"), }, { name: "blue score 100001, not coinbase", @@ -76,7 +76,7 @@ func TestUTXOSerialization(t *testing.T) { blockBlueScore: 100001, packedFlags: 0, }, - serialized: hexToBytes("420d03000000000040420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"), + serialized: hexToBytes("a1860100000000000040420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"), }, } diff --git a/blockdag/ghostdag_test.go b/blockdag/ghostdag_test.go index 920bff7fd..1770975a7 100644 --- a/blockdag/ghostdag_test.go +++ b/blockdag/ghostdag_test.go @@ -33,7 +33,7 @@ func TestGHOSTDAG(t *testing.T) { }{ { k: 3, - expectedReds: []string{"F", "G", "H", "I", "N", "Q"}, + expectedReds: []string{"F", "G", "H", "I", "O", "Q"}, dagData: []*testBlockData{ { parents: []string{"A"}, @@ -166,7 +166,7 @@ func TestGHOSTDAG(t *testing.T) { id: "T", expectedScore: 13, expectedSelectedParent: "S", - expectedBlues: []string{"S", "O", "P"}, + expectedBlues: []string{"S", "P", "N"}, }, }, }, diff --git a/blockdag/testdata/blk_0_to_4.dat b/blockdag/testdata/blk_0_to_4.dat index 3352f2024d3855230d214bf64293e23650a203a9..c539d6e734f0999821da7044b3ced818dcdea4f7 100644 GIT binary patch delta 679 zcmZn{XcyQplTo*3qxmd-YK3VRr^@U@`PpBM~}(JbzeJb$X*tWV*rBx z|LYZ4CLd$G>TuZP%F+FwmYtxtiW(FKmH6DPU8%aD>vWPDpCT)XAF zwc@XC_r)T)AHtQkDZjmOW&eiPJl|{Fbtd0tQc!r5a;r)1Hvg_y&!jKL9Z>S#$~rlz z$a04un~aQd+?UKLlckxZ6bRWg`8Qk8!l95Fn{+sI6wa5y8We68-9vEUT{Vr zk9p2_mkG|Z?70Llze$;O)oO?Tx1!xj`G;-@=oOxbeek8}>$+g}<=@oQ!Y=JK5Q=SF z{7d$IxWR}2K9>({e|d+GfALwRvbrge`zv%=rRPM+{CZPl^}0-B7q6XON`{}Re%H1A z2bRwAcF106ohpwMN=%^O0s(@GX*w3h@YOj=;d5Cy9r-97jKbwym9MtX!X;WV>2?=vdFqg6!%Vf z`P|QH`X1?)KO)~;X3aXW^x|>0D-WLcrC1zrJ1CK}TE|TJ*rV_4v+r88=+t`Ld*XVh z+%JDxhRV_>|6VKL^fe);0e!tdAvs$G}c}~YQvX@2U7=Yma z|9S<6$;TM4I=t9_QglMg5;@1R>orp+OEXI;5VC3VZ?>Sx%bAbY+ca6+b1zu4y;VKGHgnp}#UVHEJd?iq zVhM+}!T0}qS2!<6o2veQC&hKk_+inlRR8eiLo?sN_8V!6Hy|-=8ZlQs*D>*eo1B@y;VfS))xC zymxVFPB?7+OCBkdm_fk>1Wb%Tf{~EZz<#J=^;1~SsxP5zu#`Xi!fjQeDQyv)8ddj8Ot)ZM&0>RRUZ=&IS~ z>P|T>6TI$O-7@%a#dp=gKg%a1@bIL_nW%VA{QK&vjjLMe*$BO}ZU*bOD8A}%@Mh;< znASb7V7K(2HB2`zneNnPKiu@~evoZhEw6UuO72JNedL_)$o_a)c}yT{p~qL>j6%(~JxtAiyYf(1rQC&%yceAJ^?KmD=!A{PBV_0(s1HzPn6to@LJ^ zxOc9^2jP3xRyL(G&eXew)U#g9p87pq^~vq8Z>CO|6LbI4UIU@n#>Ky6--jD~`0sQ1 z!1kAS`1lu}RVu5S61l%Z_hjJF#HsrPZ?2GBy84ID3)49}_}({5Xgz(~JNKlu<4VTB ziM>*)gdFi72pA^!F&a+%o2Bq1%vfZ4%(~JxtAi!wTWO2{EV9oYc_59k*X*(B(+`RKl`s#}%9M%Tk|La}h zyf9JjhSat@OK%+Yk7C@Ic(;2$>;BBJpbNem;(k^3oU-56_0%=%C`JYl5MY!#=)(Nn=ivPKkL&iAN^STl{&>L|fjs6p-(4m+&$8zd z)KEGy+tMM+eYnAg|2~%w zY=3!&kALx5rLwvyk^3ujTfIYn%=ztVH1}oV&vRFo@@u`~yWa0~>2AZ?%j*{HPrbQz OVy~0}AxBI+J{thN09&H~ delta 186 zcmaFF^oXfm_WK>%C`JYl5MZ=vvbg77ux5L!dVX!@w4IAXZr*t&ef7l>4r_z&|MjkL zMw&i7U~ic)>EPX*E2SmQVr`$*Cro45{(8@W@O4sZ!Q$Jxp1Nk8?7#HZkyI zu~&%uw%$W}Sr@;FwtG}E@P{%wvgBBs&Ja0&+J}2xp%mW*apvL`y_*vcL?4`V@4>`g MDFs4~n0S0P04v8;n*aa+ diff --git a/blockdag/testdata/blk_3C.dat b/blockdag/testdata/blk_3C.dat index d1915575f9f13b8ef2a7de6762945275082643a8..c9fc086c3e73c082d59c7738e95d8b791900992e 100644 GIT binary patch delta 193 zcmV;y06zcz0{#Mj9rxTlb^!nY0000G0vf?Y1NTh9pI`B=zjYe0`4{7$%n)t^ocBbK zLz+Jd5yLdp#lP~W&l-VzPW?3GA^Uwya4|B_2-T62Meb-C8W2>SdFPVjm${jfwLtE% zSdO6pPf2!Yt^L#96qk#$W=h9!+Z;FEz*nL$@_aXl)lt?Cix<41bLC&y7e__3roNGp v9Fd>|k*#GQ3pp8x%j^AS=##YkokJjr71H`T;t&ZsQ?rZGKOVGX!=05S{|Lp3f$&3H>^gEG} v9Fd>|k*#GQuLUm{BQT{8SMaR=)y4J;@lt2T=C}F`Dm7g!95s^DlQIEJzY<|< diff --git a/blockdag/testdata/blk_3D.dat b/blockdag/testdata/blk_3D.dat index 276fd0aa16e0a63367a8b184fbcd076a77853f36..e857dd182bd6a2197d8b0df490629b262dddc527 100644 GIT binary patch delta 254 zcmeyv{D(PL_WK>%FN_QzAiyYf(1rQC&%yceAJ^?KmD=!A{PBV_0(s1HzPn6to@LJ^ z7!zmZVEB*4({Q5B7L6Pawo@$L$8<`<#4gPU-ZJxZ#Cose1-E6)?;QwVWbmoRyyI%% zwQlh}3yYt|-w=0q-Lh=o#6p>g4J;FTc^DZc&WccY5@swiJ@VJ1g8`H3n2Xk~zU-WL o(O~WG2!UGm^w8+gos)$a149TY0=b-tG4MYW5Tl%7vLvG`0MW~E-T(jq delta 254 zcmeyv{D(PL_WK>%FN_QzAi!wTWO2{EV9oYc_59k*X*(B(+`RKl`s#}%9M%Tk|La}h zJhrvu#6>xECy5)g`_^V$n-?cweMr)A$?vkMnL1uma(S=4=3+jwPJM0Pi!bl8-<{_qcL{$D-%jr(Iz q`mtx*fAMIU#_GtJPP#l> 1 + // Decode the packedFlags. + packedFlags, err := binaryserializer.Uint8(r) + if err != nil { + return nil, err + } entry := &UTXOEntry{ blockBlueScore: blockBlueScore, - packedFlags: 0, - } - - if isCoinbase { - entry.packedFlags |= tfCoinbase + packedFlags: txoFlags(packedFlags), } entry.amount, err = binaryserializer.Uint64(r, byteOrder) diff --git a/blockdag/virtualblock_test.go b/blockdag/virtualblock_test.go index fbe414b1d..1ee543c6f 100644 --- a/blockdag/virtualblock_test.go +++ b/blockdag/virtualblock_test.go @@ -97,7 +97,7 @@ func TestVirtualBlock(t *testing.T) { tipsToSet: []*blockNode{}, tipsToAdd: []*blockNode{node0, node1, node2, node3, node4, node5, node6}, expectedTips: blockSetFromSlice(node2, node5, node6), - expectedSelectedParent: node5, + expectedSelectedParent: node6, }, } From fe25ea3d8c11f3053c8e3632442a5032bdd81ac6 Mon Sep 17 00:00:00 2001 From: Svarog Date: Wed, 20 May 2020 10:36:44 +0300 Subject: [PATCH 32/77] [NOD-1001] Make an error in Peer.start() stop the connection process from continuing. (#723) * [NOD-1001] Move side-effects of connection out of OnVersion * [NOD-1001] Make AssociateConnection synchronous * [NOD-1001] Wait for 2 veracks in TestPeerListeners * [NOD-1001] Made AssociateConnection return error * [NOD-1001] Remove temporary logs * [NOD-1001] Fix typos and find-and-replace errors * [NOD-1001] Move example_test back out of peer package + fix some typos * [NOD-1001] Use correct remote address in setupPeersWithConns and return to address string literals * [NOD-1001] Use separate verack channels for inPeer and outPeer * [NOD-1001] Make verack channels buffered * [NOD-1001] Removed temporary sleep of 1 second * [NOD-1001] Removed redundant // --- peer/example_test.go | 23 ++- peer/peer.go | 23 +-- peer/peer_test.go | 270 ++++++++++++++++++++---------------- server/p2p/on_version.go | 6 - server/p2p/p2p.go | 29 ++-- util/testtools/testtools.go | 20 +++ 6 files changed, 225 insertions(+), 146 deletions(-) diff --git a/peer/example_test.go b/peer/example_test.go index 715e50269..8bcbc425a 100644 --- a/peer/example_test.go +++ b/peer/example_test.go @@ -9,11 +9,18 @@ import ( "net" "time" - "github.com/kaspanet/kaspad/dagconfig" + "github.com/kaspanet/kaspad/util/daghash" + "github.com/kaspanet/kaspad/peer" + + "github.com/kaspanet/kaspad/dagconfig" "github.com/kaspanet/kaspad/wire" ) +func fakeSelectedTipFn() *daghash.Hash { + return &daghash.Hash{0x12, 0x34} +} + // mockRemotePeer creates a basic inbound peer listening on the simnet port for // use with Example_peerConnection. It does not return until the listner is // active. @@ -40,7 +47,11 @@ func mockRemotePeer() error { // Create and start the inbound peer. p := peer.NewInboundPeer(peerCfg) - p.AssociateConnection(conn) + err = p.AssociateConnection(conn) + if err != nil { + fmt.Printf("AssociateConnection: error %+v\n", err) + return + } }() return nil @@ -89,10 +100,14 @@ func Example_newOutboundPeer() { // Establish the connection to the peer address and mark it connected. conn, err := net.Dial("tcp", p.Addr()) if err != nil { - fmt.Printf("net.Dial: error %v\n", err) + fmt.Printf("net.Dial: error %+v\n", err) + return + } + err = p.AssociateConnection(conn) + if err != nil { + fmt.Printf("AssociateConnection: error %+v\n", err) return } - p.AssociateConnection(conn) // Wait for the verack message or timeout in case of failure. select { diff --git a/peer/peer.go b/peer/peer.go index 13f705871..fe23d1d4a 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -8,7 +8,6 @@ import ( "bytes" "container/list" "fmt" - "github.com/pkg/errors" "io" "math/rand" "net" @@ -17,6 +16,8 @@ import ( "sync/atomic" "time" + "github.com/pkg/errors" + "github.com/kaspanet/kaspad/util/random" "github.com/kaspanet/kaspad/util/subnetworkid" @@ -1752,10 +1753,10 @@ func (p *Peer) QueueInventory(invVect *wire.InvVect) { // AssociateConnection associates the given conn to the peer. Calling this // function when the peer is already connected will have no effect. -func (p *Peer) AssociateConnection(conn net.Conn) { +func (p *Peer) AssociateConnection(conn net.Conn) error { // Already connected? if !atomic.CompareAndSwapInt32(&p.connected, 0, 1) { - return + return nil } p.conn = conn @@ -1769,19 +1770,18 @@ func (p *Peer) AssociateConnection(conn net.Conn) { // and no point recomputing. na, err := newNetAddress(p.conn.RemoteAddr(), p.services) if err != nil { - log.Errorf("Cannot create remote net address: %s", err) p.Disconnect() - return + return errors.Wrap(err, "Cannot create remote net address") } p.na = na } - spawn(func() { - if err := p.start(); err != nil { - log.Debugf("Cannot start peer %s: %s", p, err) - p.Disconnect() - } - }) + if err := p.start(); err != nil { + p.Disconnect() + return errors.Wrapf(err, "Cannot start peer %s", p) + } + + return nil } // Connected returns whether or not the peer is currently connected. @@ -1841,6 +1841,7 @@ func (p *Peer) start() error { // Send our verack message now that the IO processing machinery has started. p.QueueMessage(wire.NewMsgVerAck(), nil) + return nil } diff --git a/peer/peer_test.go b/peer/peer_test.go index 29a098f11..fbc8942c0 100644 --- a/peer/peer_test.go +++ b/peer/peer_test.go @@ -2,20 +2,22 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package peer_test +package peer import ( "io" "net" "strconv" + "strings" "testing" "time" + "github.com/kaspanet/kaspad/util/testtools" + "github.com/pkg/errors" "github.com/btcsuite/go-socks/socks" "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/peer" "github.com/kaspanet/kaspad/util/daghash" "github.com/kaspanet/kaspad/wire" ) @@ -110,7 +112,7 @@ type peerStats struct { } // testPeer tests the given peer's flags and stats -func testPeer(t *testing.T, p *peer.Peer, s peerStats) { +func testPeer(t *testing.T, p *Peer, s peerStats) { if p.UserAgent() != s.wantUserAgent { t.Errorf("testPeer: wrong UserAgent - got %v, want %v", p.UserAgent(), s.wantUserAgent) return @@ -199,16 +201,18 @@ func testPeer(t *testing.T, p *peer.Peer, s peerStats) { // TestPeerConnection tests connection between inbound and outbound peers. func TestPeerConnection(t *testing.T) { - verack := make(chan struct{}) - peer1Cfg := &peer.Config{ - Listeners: peer.MessageListeners{ - OnVerAck: func(p *peer.Peer, msg *wire.MsgVerAck) { - verack <- struct{}{} + inPeerVerack, outPeerVerack, inPeerOnWriteVerack, outPeerOnWriteVerack := + make(chan struct{}, 1), make(chan struct{}, 1), make(chan struct{}, 1), make(chan struct{}, 1) + + inPeerCfg := &Config{ + Listeners: MessageListeners{ + OnVerAck: func(p *Peer, msg *wire.MsgVerAck) { + inPeerVerack <- struct{}{} }, - OnWrite: func(p *peer.Peer, bytesWritten int, msg wire.Message, + OnWrite: func(p *Peer, bytesWritten int, msg wire.Message, err error) { if _, ok := msg.(*wire.MsgVerAck); ok { - verack <- struct{}{} + inPeerOnWriteVerack <- struct{}{} } }, }, @@ -220,8 +224,18 @@ func TestPeerConnection(t *testing.T) { Services: 0, SelectedTipHash: fakeSelectedTipFn, } - peer2Cfg := &peer.Config{ - Listeners: peer1Cfg.Listeners, + outPeerCfg := &Config{ + Listeners: MessageListeners{ + OnVerAck: func(p *Peer, msg *wire.MsgVerAck) { + outPeerVerack <- struct{}{} + }, + OnWrite: func(p *Peer, bytesWritten int, msg wire.Message, + err error) { + if _, ok := msg.(*wire.MsgVerAck); ok { + outPeerOnWriteVerack <- struct{}{} + } + }, + }, UserAgentName: "peer", UserAgentVersion: "1.0", UserAgentComments: []string{"comment"}, @@ -262,56 +276,42 @@ func TestPeerConnection(t *testing.T) { tests := []struct { name string - setup func() (*peer.Peer, *peer.Peer, error) + setup func() (*Peer, *Peer, error) }{ { "basic handshake", - func() (*peer.Peer, *peer.Peer, error) { - inConn, outConn := pipe( - &conn{raddr: "10.0.0.1:16111"}, - &conn{raddr: "10.0.0.2:16111"}, - ) - inPeer := peer.NewInboundPeer(peer1Cfg) - inPeer.AssociateConnection(inConn) - - outPeer, err := peer.NewOutboundPeer(peer2Cfg, "10.0.0.2:16111") + func() (*Peer, *Peer, error) { + inPeer, outPeer, err := setupPeers(inPeerCfg, outPeerCfg) if err != nil { return nil, nil, err } - outPeer.AssociateConnection(outConn) - for i := 0; i < 4; i++ { - select { - case <-verack: - case <-time.After(time.Second): - return nil, nil, errors.New("verack timeout") - } + // wait for 4 veracks + if !testtools.WaitTillAllCompleteOrTimeout(time.Second, + inPeerVerack, inPeerOnWriteVerack, outPeerVerack, outPeerOnWriteVerack) { + + return nil, nil, errors.New("handshake timeout") } return inPeer, outPeer, nil }, }, { "socks proxy", - func() (*peer.Peer, *peer.Peer, error) { + func() (*Peer, *Peer, error) { inConn, outConn := pipe( &conn{raddr: "10.0.0.1:16111", proxy: true}, &conn{raddr: "10.0.0.2:16111"}, ) - inPeer := peer.NewInboundPeer(peer1Cfg) - inPeer.AssociateConnection(inConn) - - outPeer, err := peer.NewOutboundPeer(peer2Cfg, "10.0.0.2:16111") + inPeer, outPeer, err := setupPeersWithConns(inPeerCfg, outPeerCfg, inConn, outConn) if err != nil { return nil, nil, err } - outPeer.AssociateConnection(outConn) - for i := 0; i < 4; i++ { - select { - case <-verack: - case <-time.After(time.Second): - return nil, nil, errors.New("verack timeout") - } + // wait for 4 veracks + if !testtools.WaitTillAllCompleteOrTimeout(time.Second, + inPeerVerack, inPeerOnWriteVerack, outPeerVerack, outPeerOnWriteVerack) { + + return nil, nil, errors.New("handshake timeout") } return inPeer, outPeer, nil }, @@ -336,62 +336,62 @@ func TestPeerConnection(t *testing.T) { // TestPeerListeners tests that the peer listeners are called as expected. func TestPeerListeners(t *testing.T) { - verack := make(chan struct{}, 1) + inPeerVerack, outPeerVerack := make(chan struct{}, 1), make(chan struct{}, 1) ok := make(chan wire.Message, 20) - peerCfg := &peer.Config{ - Listeners: peer.MessageListeners{ - OnGetAddr: func(p *peer.Peer, msg *wire.MsgGetAddr) { + inPeerCfg := &Config{ + Listeners: MessageListeners{ + OnGetAddr: func(p *Peer, msg *wire.MsgGetAddr) { ok <- msg }, - OnAddr: func(p *peer.Peer, msg *wire.MsgAddr) { + OnAddr: func(p *Peer, msg *wire.MsgAddr) { ok <- msg }, - OnPing: func(p *peer.Peer, msg *wire.MsgPing) { + OnPing: func(p *Peer, msg *wire.MsgPing) { ok <- msg }, - OnPong: func(p *peer.Peer, msg *wire.MsgPong) { + OnPong: func(p *Peer, msg *wire.MsgPong) { ok <- msg }, - OnTx: func(p *peer.Peer, msg *wire.MsgTx) { + OnTx: func(p *Peer, msg *wire.MsgTx) { ok <- msg }, - OnBlock: func(p *peer.Peer, msg *wire.MsgBlock, buf []byte) { + OnBlock: func(p *Peer, msg *wire.MsgBlock, buf []byte) { ok <- msg }, - OnInv: func(p *peer.Peer, msg *wire.MsgInv) { + OnInv: func(p *Peer, msg *wire.MsgInv) { ok <- msg }, - OnNotFound: func(p *peer.Peer, msg *wire.MsgNotFound) { + OnNotFound: func(p *Peer, msg *wire.MsgNotFound) { ok <- msg }, - OnGetData: func(p *peer.Peer, msg *wire.MsgGetData) { + OnGetData: func(p *Peer, msg *wire.MsgGetData) { ok <- msg }, - OnGetBlockInvs: func(p *peer.Peer, msg *wire.MsgGetBlockInvs) { + OnGetBlockInvs: func(p *Peer, msg *wire.MsgGetBlockInvs) { ok <- msg }, - OnFeeFilter: func(p *peer.Peer, msg *wire.MsgFeeFilter) { + OnFeeFilter: func(p *Peer, msg *wire.MsgFeeFilter) { ok <- msg }, - OnFilterAdd: func(p *peer.Peer, msg *wire.MsgFilterAdd) { + OnFilterAdd: func(p *Peer, msg *wire.MsgFilterAdd) { ok <- msg }, - OnFilterClear: func(p *peer.Peer, msg *wire.MsgFilterClear) { + OnFilterClear: func(p *Peer, msg *wire.MsgFilterClear) { ok <- msg }, - OnFilterLoad: func(p *peer.Peer, msg *wire.MsgFilterLoad) { + OnFilterLoad: func(p *Peer, msg *wire.MsgFilterLoad) { ok <- msg }, - OnMerkleBlock: func(p *peer.Peer, msg *wire.MsgMerkleBlock) { + OnMerkleBlock: func(p *Peer, msg *wire.MsgMerkleBlock) { ok <- msg }, - OnVersion: func(p *peer.Peer, msg *wire.MsgVersion) { + OnVersion: func(p *Peer, msg *wire.MsgVersion) { ok <- msg }, - OnVerAck: func(p *peer.Peer, msg *wire.MsgVerAck) { - verack <- struct{}{} + OnVerAck: func(p *Peer, msg *wire.MsgVerAck) { + inPeerVerack <- struct{}{} }, - OnReject: func(p *peer.Peer, msg *wire.MsgReject) { + OnReject: func(p *Peer, msg *wire.MsgReject) { ok <- msg }, }, @@ -402,32 +402,20 @@ func TestPeerListeners(t *testing.T) { Services: wire.SFNodeBloom, SelectedTipHash: fakeSelectedTipFn, } - inConn, outConn := pipe( - &conn{raddr: "10.0.0.1:16111"}, - &conn{raddr: "10.0.0.2:16111"}, - ) - inPeer := peer.NewInboundPeer(peerCfg) - inPeer.AssociateConnection(inConn) - peerCfg.Listeners = peer.MessageListeners{ - OnVerAck: func(p *peer.Peer, msg *wire.MsgVerAck) { - verack <- struct{}{} - }, + outPeerCfg := &Config{} + *outPeerCfg = *inPeerCfg // copy inPeerCfg + outPeerCfg.Listeners.OnVerAck = func(p *Peer, msg *wire.MsgVerAck) { + outPeerVerack <- struct{}{} } - outPeer, err := peer.NewOutboundPeer(peerCfg, "10.0.0.1:16111") + + inPeer, outPeer, err := setupPeers(inPeerCfg, outPeerCfg) if err != nil { - t.Errorf("NewOutboundPeer: unexpected err %v\n", err) - return + t.Errorf("TestPeerListeners: %v", err) } - outPeer.AssociateConnection(outConn) - - for i := 0; i < 2; i++ { - select { - case <-verack: - case <-time.After(time.Second * 1): - t.Errorf("TestPeerListeners: verack timeout\n") - return - } + // wait for 2 veracks + if !testtools.WaitTillAllCompleteOrTimeout(time.Second, inPeerVerack, outPeerVerack) { + t.Errorf("TestPeerListeners: Timout waiting for veracks") } tests := []struct { @@ -520,7 +508,7 @@ func TestPeerListeners(t *testing.T) { // TestOutboundPeer tests that the outbound peer works as expected. func TestOutboundPeer(t *testing.T) { - peerCfg := &peer.Config{ + peerCfg := &Config{ SelectedTipHash: func() *daghash.Hash { return &daghash.ZeroHash }, @@ -531,18 +519,16 @@ func TestOutboundPeer(t *testing.T) { Services: 0, } - r, w := io.Pipe() - c := &conn{raddr: "10.0.0.1:16111", Writer: w, Reader: r} - - p, err := peer.NewOutboundPeer(peerCfg, "10.0.0.1:16111") + _, p, err := setupPeers(peerCfg, peerCfg) if err != nil { - t.Errorf("NewOutboundPeer: unexpected err - %v\n", err) - return + t.Fatalf("TestOuboundPeer: unexpected err in setupPeers - %v\n", err) } - // Test trying to connect twice. - p.AssociateConnection(c) - p.AssociateConnection(c) + // Test trying to connect for a second time and make sure nothing happens. + err = p.AssociateConnection(p.conn) + if err != nil { + t.Fatalf("AssociateConnection for the second time didn't return nil") + } p.Disconnect() // Test Queue Inv @@ -572,14 +558,11 @@ func TestOutboundPeer(t *testing.T) { } peerCfg.SelectedTipHash = selectedTipHash - r1, w1 := io.Pipe() - c1 := &conn{raddr: "10.0.0.1:16111", Writer: w1, Reader: r1} - p1, err := peer.NewOutboundPeer(peerCfg, "10.0.0.1:16111") + + _, p1, err := setupPeers(peerCfg, peerCfg) if err != nil { - t.Errorf("NewOutboundPeer: unexpected err - %v\n", err) - return + t.Fatalf("TestOuboundPeer: unexpected err in setupPeers - %v\n", err) } - p1.AssociateConnection(c1) // Test Queue Inv after connection p1.QueueInventory(fakeInv) @@ -588,14 +571,10 @@ func TestOutboundPeer(t *testing.T) { // Test regression peerCfg.DAGParams = &dagconfig.RegressionNetParams peerCfg.Services = wire.SFNodeBloom - r2, w2 := io.Pipe() - c2 := &conn{raddr: "10.0.0.1:16111", Writer: w2, Reader: r2} - p2, err := peer.NewOutboundPeer(peerCfg, "10.0.0.1:16111") + _, p2, err := setupPeers(peerCfg, peerCfg) if err != nil { - t.Errorf("NewOutboundPeer: unexpected err - %v\n", err) - return + t.Fatalf("NewOutboundPeer: unexpected err - %v\n", err) } - p2.AssociateConnection(c2) // Test PushXXX var addrs []*wire.NetAddress @@ -604,12 +583,10 @@ func TestOutboundPeer(t *testing.T) { addrs = append(addrs, &na) } if _, err := p2.PushAddrMsg(addrs, nil); err != nil { - t.Errorf("PushAddrMsg: unexpected err %v\n", err) - return + t.Fatalf("PushAddrMsg: unexpected err %v\n", err) } if err := p2.PushGetBlockInvsMsg(nil, &daghash.Hash{}); err != nil { - t.Errorf("PushGetBlockInvsMsg: unexpected err %v\n", err) - return + t.Fatalf("PushGetBlockInvsMsg: unexpected err %v\n", err) } p2.PushRejectMsg("block", wire.RejectMalformed, "malformed", nil, false) @@ -627,7 +604,7 @@ func TestOutboundPeer(t *testing.T) { // Tests that the node disconnects from peers with an unsupported protocol // version. func TestUnsupportedVersionPeer(t *testing.T) { - peerCfg := &peer.Config{ + peerCfg := &Config{ UserAgentName: "peer", UserAgentVersion: "1.0", UserAgentComments: []string{"comment"}, @@ -637,12 +614,12 @@ func TestUnsupportedVersionPeer(t *testing.T) { } localNA := wire.NewNetAddressIPPort( - net.ParseIP("10.0.0.1"), + net.ParseIP("10.0.0.1:16111"), uint16(16111), wire.SFNodeNetwork, ) remoteNA := wire.NewNetAddressIPPort( - net.ParseIP("10.0.0.2"), + net.ParseIP("10.0.0.2:16111"), uint16(16111), wire.SFNodeNetwork, ) @@ -651,11 +628,23 @@ func TestUnsupportedVersionPeer(t *testing.T) { &conn{laddr: "10.0.0.2:16111", raddr: "10.0.0.1:16111"}, ) - p, err := peer.NewOutboundPeer(peerCfg, "10.0.0.1:16111") + p, err := NewOutboundPeer(peerCfg, "10.0.0.1:16111") if err != nil { t.Fatalf("NewOutboundPeer: unexpected err - %v\n", err) } - p.AssociateConnection(localConn) + + go func() { + err := p.AssociateConnection(localConn) + wantErrorMessage := "protocol version must be 1 or greater" + if err == nil { + t.Fatalf("No error from AssociateConnection to invalid protocol version") + } + gotErrorMessage := err.Error() + if !strings.Contains(gotErrorMessage, wantErrorMessage) { + t.Fatalf("Wrong error message from AssociateConnection to invalid protocol version.\nWant: '%s'\nGot: '%s'", + wantErrorMessage, gotErrorMessage) + } + }() // Read outbound messages to peer into a channel outboundMessages := make(chan wire.Message) @@ -730,9 +719,56 @@ func TestUnsupportedVersionPeer(t *testing.T) { func init() { // Allow self connection when running the tests. - peer.TstAllowSelfConns() + TstAllowSelfConns() } func fakeSelectedTipFn() *daghash.Hash { return &daghash.Hash{0x12, 0x34} } + +func setupPeers(inPeerCfg, outPeerCfg *Config) (inPeer *Peer, outPeer *Peer, err error) { + inConn, outConn := pipe( + &conn{raddr: "10.0.0.1:16111"}, + &conn{raddr: "10.0.0.2:16111"}, + ) + return setupPeersWithConns(inPeerCfg, outPeerCfg, inConn, outConn) +} + +func setupPeersWithConns(inPeerCfg, outPeerCfg *Config, inConn, outConn *conn) (inPeer *Peer, outPeer *Peer, err error) { + inPeer = NewInboundPeer(inPeerCfg) + inPeerDone := make(chan struct{}) + var inPeerErr error + go func() { + inPeerErr = inPeer.AssociateConnection(inConn) + inPeerDone <- struct{}{} + }() + + outPeer, err = NewOutboundPeer(outPeerCfg, outConn.raddr) + if err != nil { + return nil, nil, err + } + outPeerDone := make(chan struct{}) + var outPeerErr error + go func() { + outPeerErr = outPeer.AssociateConnection(outConn) + outPeerDone <- struct{}{} + }() + + // wait for AssociateConnection to complete in all instances + if !testtools.WaitTillAllCompleteOrTimeout(2*time.Second, inPeerDone, outPeerDone) { + return nil, nil, errors.New("handshake timeout") + } + + if inPeerErr != nil && outPeerErr != nil { + return nil, nil, errors.Errorf("both inPeer and outPeer failed connecting: \nInPeer: %+v\nOutPeer: %+v", + inPeerErr, outPeerErr) + } + if inPeerErr != nil { + return nil, nil, inPeerErr + } + if outPeerErr != nil { + return nil, nil, outPeerErr + } + + return inPeer, outPeer, nil +} diff --git a/server/p2p/on_version.go b/server/p2p/on_version.go index 68ee83fc5..0a553e3d0 100644 --- a/server/p2p/on_version.go +++ b/server/p2p/on_version.go @@ -11,9 +11,6 @@ import ( // and is used to negotiate the protocol version details as well as kick start // the communications. func (sp *Peer) OnVersion(_ *peer.Peer, msg *wire.MsgVersion) { - // Signal the sync manager this peer is a new sync candidate. - sp.server.SyncManager.NewPeer(sp.Peer) - // Choose whether or not to relay transactions before a filter command // is received. sp.setDisableRelayTx(msg.DisableRelayTx) @@ -54,7 +51,4 @@ func (sp *Peer) OnVersion(_ *peer.Peer, msg *wire.MsgVersion) { addrManager.Good(sp.NA(), msg.SubnetworkID) } } - - // Add valid peer to the server. - sp.server.AddPeer(sp) } diff --git a/server/p2p/p2p.go b/server/p2p/p2p.go index 9bdc42fc0..4bf5cb03e 100644 --- a/server/p2p/p2p.go +++ b/server/p2p/p2p.go @@ -967,12 +967,9 @@ func newPeerConfig(sp *Peer) *peer.Config { // for disconnection. func (s *Server) inboundPeerConnected(conn net.Conn) { sp := newServerPeer(s, false) - sp.isWhitelisted = isWhitelisted(conn.RemoteAddr()) sp.Peer = peer.NewInboundPeer(newPeerConfig(sp)) - sp.AssociateConnection(conn) - spawn(func() { - s.peerDoneHandler(sp) - }) + + s.peerConnected(sp, conn) } // outboundPeerConnected is invoked by the connection manager when a new @@ -989,12 +986,28 @@ func (s *Server) outboundPeerConnected(state *peerState, msg *outboundPeerConnec } sp.Peer = outboundPeer sp.connReq = msg.connReq - sp.isWhitelisted = isWhitelisted(msg.conn.RemoteAddr()) - sp.AssociateConnection(msg.conn) + + s.peerConnected(sp, msg.conn) + + s.addrManager.Attempt(sp.NA()) +} + +func (s *Server) peerConnected(sp *Peer, conn net.Conn) { + sp.isWhitelisted = isWhitelisted(conn.RemoteAddr()) + spawn(func() { + err := sp.AssociateConnection(conn) + if err != nil { + peerLog.Debugf("Error connecting to peer: %+v", err) + return + } + + s.SyncManager.NewPeer(sp.Peer) + + s.AddPeer(sp) + s.peerDoneHandler(sp) }) - s.addrManager.Attempt(sp.NA()) } // outboundPeerConnected is invoked by the connection manager when a new diff --git a/util/testtools/testtools.go b/util/testtools/testtools.go index 43c77f242..c75e8d155 100644 --- a/util/testtools/testtools.go +++ b/util/testtools/testtools.go @@ -1,6 +1,8 @@ package testtools import ( + "time" + "github.com/kaspanet/kaspad/dagconfig" "github.com/pkg/errors" @@ -94,3 +96,21 @@ func RegisterSubnetworkForTest(dag *blockdag.BlockDAG, params *dagconfig.Params, } return subnetworkID, nil } + +// WaitTillAllCompleteOrTimeout waits until all the provided channels has been written to, +// or until a timeout period has passed. +// Returns true iff all channels returned in the allotted time. +func WaitTillAllCompleteOrTimeout(timeoutDuration time.Duration, chans ...chan struct{}) (ok bool) { + timeout := time.After(timeoutDuration) + + for _, c := range chans { + select { + case <-c: + continue + case <-timeout: + return false + } + } + + return true +} From b884ba128ef2df8bc619b50eebe57bd89a8e7471 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Wed, 20 May 2020 10:47:01 +0300 Subject: [PATCH 33/77] [NOD-1008] In utxoDiffStore, keep diffData in memory for blocks whose blueScore is at least virtualBlueScore - X (#726) * [NOD-1008] Use *blockNode as keys in utxoDiffStore.loaded and .dirty. * [NOD-1008] Implement clearOldEntries. * [NOD-1008] Increase maxBlueScoreDifferenceToKeepLoaded to 100. * [NOD-1008] Fix a typo. * [NOD-1008] Add clearOldEntries to saveChangesFromBlock. * [NOD-1008] Begin implementing TestClearOldEntries. * [NOD-1008] Finish implementing TestClearOldEntries. * [NOD-1008] Fix a comment. * [NOD-1008] Rename diffDataByHash to diffDataByBlockNode. * [NOD-1008] Use dag.TipHashes instead of tracking tips manually. --- blockdag/dag.go | 9 ++-- blockdag/dag_test.go | 10 +++- blockdag/utxodiffstore.go | 86 ++++++++++++++++++++++------------ blockdag/utxodiffstore_test.go | 74 ++++++++++++++++++++++++++++- 4 files changed, 140 insertions(+), 39 deletions(-) diff --git a/blockdag/dag.go b/blockdag/dag.go index 3bf409611..8c9cf6732 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -804,6 +804,7 @@ func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UT dag.index.clearDirtyEntries() dag.utxoDiffStore.clearDirtyEntries() + dag.utxoDiffStore.clearOldEntries() dag.reachabilityStore.clearDirtyEntries() dag.multisetStore.clearNewEntries() @@ -910,9 +911,9 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) { for parent := range dag.lastFinalityPoint.parents { queue = append(queue, parent) } - var blockHashesToDelete []*daghash.Hash + var nodesToDelete []*blockNode if deleteDiffData { - blockHashesToDelete = make([]*daghash.Hash, 0, dag.dagParams.FinalityInterval) + nodesToDelete = make([]*blockNode, 0, dag.dagParams.FinalityInterval) } for len(queue) > 0 { var current *blockNode @@ -920,7 +921,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) { if !current.isFinalized { current.isFinalized = true if deleteDiffData { - blockHashesToDelete = append(blockHashesToDelete, current.hash) + nodesToDelete = append(nodesToDelete, current) } for parent := range current.parents { queue = append(queue, parent) @@ -928,7 +929,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) { } } if deleteDiffData { - err := dag.utxoDiffStore.removeBlocksDiffData(dbaccess.NoTx(), blockHashesToDelete) + err := dag.utxoDiffStore.removeBlocksDiffData(dbaccess.NoTx(), nodesToDelete) if err != nil { panic(fmt.Sprintf("Error removing diff data from utxoDiffStore: %s", err)) } diff --git a/blockdag/dag_test.go b/blockdag/dag_test.go index a47abf757..a0933b560 100644 --- a/blockdag/dag_test.go +++ b/blockdag/dag_test.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/kaspanet/kaspad/dbaccess" "github.com/pkg/errors" + "math" "os" "path/filepath" "testing" @@ -953,6 +954,11 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) { // Manually set the last finality point dag.lastFinalityPoint = nodes[finalityInterval-1] + // Don't unload diffData + currentDifference := maxBlueScoreDifferenceToKeepLoaded + maxBlueScoreDifferenceToKeepLoaded = math.MaxUint64 + defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }() + dag.finalizeNodesBelowFinalityPoint(deleteDiffData) flushUTXODiffStore() @@ -960,7 +966,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) { if !node.isFinalized { t.Errorf("Node with blue score %d expected to be finalized", node.blueScore) } - if _, ok := dag.utxoDiffStore.loaded[*node.hash]; deleteDiffData && ok { + if _, ok := dag.utxoDiffStore.loaded[node]; deleteDiffData && ok { t.Errorf("The diff data of node with blue score %d should have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData) } else if !deleteDiffData && !ok { t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData) @@ -988,7 +994,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) { if node.isFinalized { t.Errorf("Node with blue score %d wasn't expected to be finalized", node.blueScore) } - if _, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok { + if _, ok := dag.utxoDiffStore.loaded[node]; !ok { t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded", node.blueScore) } if diffData, err := dag.utxoDiffStore.diffDataFromDB(node.hash); err != nil { diff --git a/blockdag/utxodiffstore.go b/blockdag/utxodiffstore.go index 70fdaa88d..1fa88168f 100644 --- a/blockdag/utxodiffstore.go +++ b/blockdag/utxodiffstore.go @@ -14,16 +14,16 @@ type blockUTXODiffData struct { type utxoDiffStore struct { dag *BlockDAG - dirty map[daghash.Hash]struct{} - loaded map[daghash.Hash]*blockUTXODiffData + dirty map[*blockNode]struct{} + loaded map[*blockNode]*blockUTXODiffData mtx *locks.PriorityMutex } func newUTXODiffStore(dag *BlockDAG) *utxoDiffStore { return &utxoDiffStore{ dag: dag, - dirty: make(map[daghash.Hash]struct{}), - loaded: make(map[daghash.Hash]*blockUTXODiffData), + dirty: make(map[*blockNode]struct{}), + loaded: make(map[*blockNode]*blockUTXODiffData), mtx: locks.NewPriorityMutex(), } } @@ -32,15 +32,15 @@ func (diffStore *utxoDiffStore) setBlockDiff(node *blockNode, diff *UTXODiff) er diffStore.mtx.HighPriorityWriteLock() defer diffStore.mtx.HighPriorityWriteUnlock() // load the diff data from DB to diffStore.loaded - _, err := diffStore.diffDataByHash(node.hash) + _, err := diffStore.diffDataByBlockNode(node) if dbaccess.IsNotFoundError(err) { - diffStore.loaded[*node.hash] = &blockUTXODiffData{} + diffStore.loaded[node] = &blockUTXODiffData{} } else if err != nil { return err } - diffStore.loaded[*node.hash].diff = diff - diffStore.setBlockAsDirty(node.hash) + diffStore.loaded[node].diff = diff + diffStore.setBlockAsDirty(node) return nil } @@ -48,19 +48,19 @@ func (diffStore *utxoDiffStore) setBlockDiffChild(node *blockNode, diffChild *bl diffStore.mtx.HighPriorityWriteLock() defer diffStore.mtx.HighPriorityWriteUnlock() // load the diff data from DB to diffStore.loaded - _, err := diffStore.diffDataByHash(node.hash) + _, err := diffStore.diffDataByBlockNode(node) if err != nil { return err } - diffStore.loaded[*node.hash].diffChild = diffChild - diffStore.setBlockAsDirty(node.hash) + diffStore.loaded[node].diffChild = diffChild + diffStore.setBlockAsDirty(node) return nil } -func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context, blockHashes []*daghash.Hash) error { - for _, hash := range blockHashes { - err := diffStore.removeBlockDiffData(dbContext, hash) +func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context, nodes []*blockNode) error { + for _, node := range nodes { + err := diffStore.removeBlockDiffData(dbContext, node) if err != nil { return err } @@ -68,37 +68,37 @@ func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context, return nil } -func (diffStore *utxoDiffStore) removeBlockDiffData(dbContext dbaccess.Context, blockHash *daghash.Hash) error { +func (diffStore *utxoDiffStore) removeBlockDiffData(dbContext dbaccess.Context, node *blockNode) error { diffStore.mtx.LowPriorityWriteLock() defer diffStore.mtx.LowPriorityWriteUnlock() - delete(diffStore.loaded, *blockHash) - err := dbaccess.RemoveDiffData(dbContext, blockHash) + delete(diffStore.loaded, node) + err := dbaccess.RemoveDiffData(dbContext, node.hash) if err != nil { return err } return nil } -func (diffStore *utxoDiffStore) setBlockAsDirty(blockHash *daghash.Hash) { - diffStore.dirty[*blockHash] = struct{}{} +func (diffStore *utxoDiffStore) setBlockAsDirty(node *blockNode) { + diffStore.dirty[node] = struct{}{} } -func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, error) { - if diffData, ok := diffStore.loaded[*hash]; ok { +func (diffStore *utxoDiffStore) diffDataByBlockNode(node *blockNode) (*blockUTXODiffData, error) { + if diffData, ok := diffStore.loaded[node]; ok { return diffData, nil } - diffData, err := diffStore.diffDataFromDB(hash) + diffData, err := diffStore.diffDataFromDB(node.hash) if err != nil { return nil, err } - diffStore.loaded[*hash] = diffData + diffStore.loaded[node] = diffData return diffData, nil } func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) { diffStore.mtx.HighPriorityReadLock() defer diffStore.mtx.HighPriorityReadUnlock() - diffData, err := diffStore.diffDataByHash(node.hash) + diffData, err := diffStore.diffDataByBlockNode(node) if err != nil { return nil, err } @@ -108,7 +108,7 @@ func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) { func (diffStore *utxoDiffStore) diffChildByNode(node *blockNode) (*blockNode, error) { diffStore.mtx.HighPriorityReadLock() defer diffStore.mtx.HighPriorityReadUnlock() - diffData, err := diffStore.diffDataByHash(node.hash) + diffData, err := diffStore.diffDataByBlockNode(node) if err != nil { return nil, err } @@ -135,11 +135,10 @@ func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error { // Allocate a buffer here to avoid needless allocations/grows // while writing each entry. buffer := &bytes.Buffer{} - for hash := range diffStore.dirty { - hash := hash // Copy hash to a new variable to avoid passing the same pointer + for node := range diffStore.dirty { buffer.Reset() - diffData := diffStore.loaded[hash] - err := storeDiffData(dbContext, buffer, &hash, diffData) + diffData := diffStore.loaded[node] + err := storeDiffData(dbContext, buffer, node.hash, diffData) if err != nil { return err } @@ -148,7 +147,32 @@ func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error { } func (diffStore *utxoDiffStore) clearDirtyEntries() { - diffStore.dirty = make(map[daghash.Hash]struct{}) + diffStore.dirty = make(map[*blockNode]struct{}) +} + +// maxBlueScoreDifferenceToKeepLoaded is the maximum difference +// between the virtual's blueScore and a blockNode's blueScore +// under which to keep diff data loaded in memory. +var maxBlueScoreDifferenceToKeepLoaded uint64 = 100 + +// clearOldEntries removes entries whose blue score is lower than +// virtual.blueScore - maxBlueScoreDifferenceToKeepLoaded. +func (diffStore *utxoDiffStore) clearOldEntries() { + virtualBlueScore := diffStore.dag.VirtualBlueScore() + minBlueScore := virtualBlueScore - maxBlueScoreDifferenceToKeepLoaded + if maxBlueScoreDifferenceToKeepLoaded > virtualBlueScore { + minBlueScore = 0 + } + + toRemove := make(map[*blockNode]struct{}) + for node := range diffStore.loaded { + if node.blueScore < minBlueScore { + toRemove[node] = struct{}{} + } + } + for node := range toRemove { + delete(diffStore.loaded, node) + } } // storeDiffData stores the UTXO diff data to the database. @@ -156,7 +180,7 @@ func (diffStore *utxoDiffStore) clearDirtyEntries() { func storeDiffData(dbContext dbaccess.Context, w *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error { // To avoid a ton of allocs, use the io.Writer // instead of allocating one. We expect the buffer to - // already be initalized and, in most cases, to already + // already be initialized and, in most cases, to already // be large enough to accommodate the serialized data // without growing. err := serializeBlockUTXODiffData(w, diffData) diff --git a/blockdag/utxodiffstore_test.go b/blockdag/utxodiffstore_test.go index 1d8abc316..d7c6d89d7 100644 --- a/blockdag/utxodiffstore_test.go +++ b/blockdag/utxodiffstore_test.go @@ -78,7 +78,7 @@ func TestUTXODiffStore(t *testing.T) { if err != nil { t.Fatalf("Failed to commit database transaction: %s", err) } - delete(dag.utxoDiffStore.loaded, *node.hash) + delete(dag.utxoDiffStore.loaded, node) if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil { t.Fatalf("diffByNode: unexpected error: %s", err) @@ -87,9 +87,79 @@ func TestUTXODiffStore(t *testing.T) { } // Check if getBlockDiff caches the result in dag.utxoDiffStore.loaded - if loadedDiffData, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok { + if loadedDiffData, ok := dag.utxoDiffStore.loaded[node]; !ok { t.Errorf("the diff data wasn't added to loaded map after requesting it") } else if !reflect.DeepEqual(loadedDiffData.diff, diff) { t.Errorf("Expected diff and loadedDiff to be equal") } } + +func TestClearOldEntries(t *testing.T) { + // Create a new database and DAG instance to run tests against. + dag, teardownFunc, err := DAGSetup("TestClearOldEntries", true, Config{ + DAGParams: &dagconfig.SimnetParams, + }) + if err != nil { + t.Fatalf("TestClearOldEntries: Failed to setup DAG instance: %v", err) + } + defer teardownFunc() + + // Set maxBlueScoreDifferenceToKeepLoaded to 10 to make this test fast to run + currentDifference := maxBlueScoreDifferenceToKeepLoaded + maxBlueScoreDifferenceToKeepLoaded = 10 + defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }() + + // Add 10 blocks + blockNodes := make([]*blockNode, 10) + for i := 0; i < 10; i++ { + processedBlock := PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil) + + node := dag.index.LookupNode(processedBlock.BlockHash()) + if node == nil { + t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash()) + } + blockNodes[i] = node + } + + // Make sure that all of them exist in the loaded set + for _, node := range blockNodes { + _, ok := dag.utxoDiffStore.loaded[node] + if !ok { + t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash) + } + } + + // Add 10 more blocks on top of the others + for i := 0; i < 10; i++ { + PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil) + } + + // Make sure that all the old nodes no longer exist in the loaded set + for _, node := range blockNodes { + _, ok := dag.utxoDiffStore.loaded[node] + if ok { + t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash) + } + } + + // Add a block on top of the genesis to force the retrieval of all diffData + processedBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) + node := dag.index.LookupNode(processedBlock.BlockHash()) + if node == nil { + t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash()) + } + + // Make sure that the child-of-genesis node isn't in the loaded set + _, ok := dag.utxoDiffStore.loaded[node] + if ok { + t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash) + } + + // Make sure that all the old nodes still do not exist in the loaded set + for _, node := range blockNodes { + _, ok := dag.utxoDiffStore.loaded[node] + if ok { + t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash) + } + } +} From 0ca127853d3469f77300fe340b1e4dbc2c24cbc2 Mon Sep 17 00:00:00 2001 From: Svarog Date: Wed, 20 May 2020 12:43:52 +0300 Subject: [PATCH 34/77] [NOD-974] UTXO-Commitments shouldn't include the new block's transactions (#727) * [NOD-975] Don't include block transactions inside its UTXO commitment (#711) * [NOD-975] Don't include block transactions inside its UTXO commitment. * Revert "[NOD-975] Don't include block transactions inside its UTXO commitment." This reverts commit b1a2ae66 * [NOD-975] Implement a (currently failing) TestUTXOCommitment. * [NOD-975] Remove the block's own transactions from calcMultiset. * [NOD-975] Simplify calcMultiset. * [NOD-975] Add a comment on top of selectedParentMultiset. * [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset. * [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset. * [NOD-975] Extract selectedParentPastUTXO to a separate function. * [NOD-975] Remove selectedParentUTXO from pastUTXO's return values. * [NOD-975] Add txs to TestUTXOCommitment. * [NOD-975] Remove debug code. * [NOD-975] In pastUTXOMultiSet, copy the multiset to avoid modifying the original. * [NOD-975] Add a test: TestPastUTXOMultiSet. * [NOD-975] Improve TestPastUTXOMultiSet. * [NOD-976] Implement tests for UTXO commitments (#715) * [NOD-975] Don't include block transactions inside its UTXO commitment. * Revert "[NOD-975] Don't include block transactions inside its UTXO commitment." This reverts commit b1a2ae66 * [NOD-975] Implement a (currently failing) TestUTXOCommitment. * [NOD-975] Remove the block's own transactions from calcMultiset. * [NOD-975] Simplify calcMultiset. * [NOD-975] Add a comment on top of selectedParentMultiset. * [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset. * [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset. * [NOD-975] Extract selectedParentPastUTXO to a separate function. * [NOD-975] Remove selectedParentUTXO from pastUTXO's return values. * [NOD-975] Add txs to TestUTXOCommitment. * [NOD-976] Generate new blockDB blocks for tests. * [NOD-976] Fix TestBlueBlockWindow. * [NOD-976] Fix TestIsKnownBlock. * [NOD-976] Fix TestGHOSTDAG. * [NOD-976] Fix TestUTXOCommitment. * [NOD-976] Remove kaka. * [NOD-990] Save utxo diffs of past UTXO (#724) * [NOD-990] Save UTXO diffs of past UTXO * [NOD-990] Check for block double spends with its past instead of building its UTXO * [NOD-990] Call resetExtraNonceForTest in TestUTXOCommitment * [NOD-990] Remove redundant functions diffFromTx and diffFromAcceptedTx * [NOD-990] Rename i->j to avoid confusion * [NOD-990] Break long lines * [NOD-990] Rename ErrDoubleSpendsWithBlockTransaction -> ErrDoubleSpendInSameBlock * [NOD-990] Make ErrDoubleSpendInSameBlock more detailed * [NOD-990] Add testProcessBlockRuleError * [NOD-990] Fix comment * [NOD-990] Add test for duplicate transactions on the same block * [NOD-990] Use pkg/errors on panic * [NOD-990] Make cloneWithoutBase method * [NOD-990] Break long lines * [NOD-990] Fix comment * [NOD-990] Fix wrong variable names * [NOD-990] Fix comment * [NOD-974] Generate new test blocks. * [NOD-974] Fix TestIsKnownBlock and TestGHOSTDAG. * [NOD-974] Fix TestUTXOCommitment. * [NOD-974] Fix comments Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Co-authored-by: stasatdaglabs Co-authored-by: Ori Newman --- blockdag/dag.go | 224 ++++++++++------------------ blockdag/dag_test.go | 243 +++++++++++++++++++++++++------ blockdag/error.go | 6 + blockdag/ghostdag_test.go | 4 +- blockdag/mining.go | 10 +- blockdag/testdata/blk_0_to_4.dat | Bin 2055 -> 2055 bytes blockdag/testdata/blk_3A.dat | Bin 467 -> 467 bytes blockdag/testdata/blk_3B.dat | Bin 354 -> 354 bytes blockdag/testdata/blk_3C.dat | Bin 382 -> 382 bytes blockdag/testdata/blk_3D.dat | Bin 508 -> 508 bytes blockdag/utxoset.go | 93 +----------- blockdag/utxoset_test.go | 75 ---------- blockdag/validate.go | 21 ++- mining/test_utils.go | 2 +- 14 files changed, 309 insertions(+), 369 deletions(-) diff --git a/blockdag/dag.go b/blockdag/dag.go index 8c9cf6732..35c9d5a95 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -574,7 +574,8 @@ func (dag *BlockDAG) connectBlock(node *blockNode, return nil, err } - newBlockUTXO, txsAcceptanceData, newBlockFeeData, newBlockMultiSet, err := node.verifyAndBuildUTXO(dag, block.Transactions(), fastAdd) + newBlockPastUTXO, txsAcceptanceData, newBlockFeeData, newBlockMultiSet, err := + node.verifyAndBuildUTXO(dag, block.Transactions(), fastAdd) if err != nil { var ruleErr RuleError if ok := errors.As(err, &ruleErr); ok { @@ -589,7 +590,8 @@ func (dag *BlockDAG) connectBlock(node *blockNode, } // Apply all changes to the DAG. - virtualUTXODiff, chainUpdates, err := dag.applyDAGChanges(node, newBlockUTXO, newBlockMultiSet, selectedParentAnticone) + virtualUTXODiff, chainUpdates, err := + dag.applyDAGChanges(node, newBlockPastUTXO, newBlockMultiSet, selectedParentAnticone) if err != nil { // Since all validation logic has already ran, if applyDAGChanges errors out, // this means we have a problem in the internal structure of the DAG - a problem which is @@ -606,29 +608,43 @@ func (dag *BlockDAG) connectBlock(node *blockNode, return chainUpdates, nil } -// calcMultiset returns the multiset of the UTXO of the given block with the given transactions. -func (node *blockNode) calcMultiset(dag *BlockDAG, transactions []*util.Tx, acceptanceData MultiBlockTxsAcceptanceData, selectedParentUTXO, pastUTXO UTXOSet) (*secp256k1.MultiSet, error) { - ms, err := node.pastUTXOMultiSet(dag, acceptanceData, selectedParentUTXO) +// calcMultiset returns the multiset of the past UTXO of the given block. +func (node *blockNode) calcMultiset(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData, + selectedParentPastUTXO UTXOSet) (*secp256k1.MultiSet, error) { + + return node.pastUTXOMultiSet(dag, acceptanceData, selectedParentPastUTXO) +} + +func (node *blockNode) pastUTXOMultiSet(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData, + selectedParentPastUTXO UTXOSet) (*secp256k1.MultiSet, error) { + + ms, err := node.selectedParentMultiset(dag) if err != nil { return nil, err } - for _, tx := range transactions { - ms, err = addTxToMultiset(ms, tx.MsgTx(), pastUTXO, UnacceptedBlueScore) - if err != nil { - return nil, err + for _, blockAcceptanceData := range acceptanceData { + for _, txAcceptanceData := range blockAcceptanceData.TxAcceptanceData { + if !txAcceptanceData.IsAccepted { + continue + } + + tx := txAcceptanceData.Tx.MsgTx() + + var err error + ms, err = addTxToMultiset(ms, tx, selectedParentPastUTXO, node.blueScore) + if err != nil { + return nil, err + } } } - return ms, nil } -// acceptedSelectedParentMultiset takes the multiset of the selected -// parent, replaces all the selected parent outputs' blue score with -// the block blue score and returns the result. -func (node *blockNode) acceptedSelectedParentMultiset(dag *BlockDAG, - acceptanceData MultiBlockTxsAcceptanceData) (*secp256k1.MultiSet, error) { - +// selectedParentMultiset returns the multiset of the node's selected +// parent. If the node is the genesis blockNode then it does not have +// a selected parent, in which case return a new, empty multiset. +func (node *blockNode) selectedParentMultiset(dag *BlockDAG) (*secp256k1.MultiSet, error) { if node.isGenesis() { return secp256k1.NewMultiset(), nil } @@ -638,61 +654,6 @@ func (node *blockNode) acceptedSelectedParentMultiset(dag *BlockDAG, return nil, err } - selectedParentAcceptanceData, exists := acceptanceData.FindAcceptanceData(node.selectedParent.hash) - if !exists { - return nil, errors.Errorf("couldn't find selected parent acceptance data for block %s", node) - } - for _, txAcceptanceData := range selectedParentAcceptanceData.TxAcceptanceData { - tx := txAcceptanceData.Tx - msgTx := tx.MsgTx() - isCoinbase := tx.IsCoinBase() - for i, txOut := range msgTx.TxOut { - outpoint := *wire.NewOutpoint(tx.ID(), uint32(i)) - - unacceptedEntry := NewUTXOEntry(txOut, isCoinbase, UnacceptedBlueScore) - acceptedEntry := NewUTXOEntry(txOut, isCoinbase, node.blueScore) - - var err error - ms, err = removeUTXOFromMultiset(ms, unacceptedEntry, &outpoint) - if err != nil { - return nil, err - } - - ms, err = addUTXOToMultiset(ms, acceptedEntry, &outpoint) - if err != nil { - return nil, err - } - } - } - - return ms, nil -} - -func (node *blockNode) pastUTXOMultiSet(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData, selectedParentUTXO UTXOSet) (*secp256k1.MultiSet, error) { - ms, err := node.acceptedSelectedParentMultiset(dag, acceptanceData) - if err != nil { - return nil, err - } - - for _, blockAcceptanceData := range acceptanceData { - if blockAcceptanceData.BlockHash.IsEqual(node.selectedParent.hash) { - continue - } - - for _, txAcceptanceData := range blockAcceptanceData.TxAcceptanceData { - if !txAcceptanceData.IsAccepted { - continue - } - - tx := txAcceptanceData.Tx.MsgTx() - - var err error - ms, err = addTxToMultiset(ms, tx, selectedParentUTXO, node.blueScore) - if err != nil { - return nil, err - } - } - } return ms, nil } @@ -1011,7 +972,8 @@ func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlock // It returns the diff in the virtual block's UTXO set. // // This function MUST be called with the DAG state lock held (for writes). -func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, newBlockMultiset *secp256k1.MultiSet, selectedParentAnticone []*blockNode) ( +func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockPastUTXO UTXOSet, + newBlockMultiset *secp256k1.MultiSet, selectedParentAnticone []*blockNode) ( virtualUTXODiff *UTXODiff, chainUpdates *chainUpdates, err error) { // Add the block to the reachability structures @@ -1022,7 +984,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, newB dag.multisetStore.setMultiset(node, newBlockMultiset) - if err = node.updateParents(dag, newBlockUTXO); err != nil { + if err = node.updateParents(dag, newBlockPastUTXO); err != nil { return nil, nil, errors.Wrapf(err, "failed updating parents of %s", node) } @@ -1063,26 +1025,23 @@ func (dag *BlockDAG) meldVirtualUTXO(newVirtualUTXODiffSet *DiffUTXOSet) error { return newVirtualUTXODiffSet.meldToBase() } -// applyAndVerifyBlockTransactionsToPastUTXO applies a block's transactions to its -// given past UTXO, and verifies that there are no double spends with its past. -func applyAndVerifyBlockTransactionsToPastUTXO(pastUTXO UTXOSet, blockTransactions []*util.Tx) (UTXOSet, error) { - diff := NewUTXODiff() - +// checkDoubleSpendsWithBlockPast checks that each block transaction +// has a corresponding UTXO in the block pastUTXO. +func checkDoubleSpendsWithBlockPast(pastUTXO UTXOSet, blockTransactions []*util.Tx) error { for _, tx := range blockTransactions { - txDiff, err := pastUTXO.diffFromTx(tx.MsgTx(), UnacceptedBlueScore) - if errors.Is(err, errUTXOMissingTxOut) { - return nil, ruleError(ErrMissingTxOut, err.Error()) + if tx.IsCoinBase() { + continue } - if err != nil { - return nil, err - } - diff, err = diff.WithDiff(txDiff) - if err != nil { - return nil, err + + for _, txIn := range tx.MsgTx().TxIn { + if _, ok := pastUTXO.Get(txIn.PreviousOutpoint); !ok { + return ruleError(ErrMissingTxOut, fmt.Sprintf("missing transaction "+ + "output %s in the utxo set", txIn.PreviousOutpoint)) + } } } - return pastUTXO.WithDiff(diff) + return nil } // verifyAndBuildUTXO verifies all transactions in the given block and builds its UTXO @@ -1091,7 +1050,7 @@ func applyAndVerifyBlockTransactionsToPastUTXO(pastUTXO UTXOSet, blockTransactio func (node *blockNode) verifyAndBuildUTXO(dag *BlockDAG, transactions []*util.Tx, fastAdd bool) ( newBlockUTXO UTXOSet, txsAcceptanceData MultiBlockTxsAcceptanceData, newBlockFeeData compactFeeData, multiset *secp256k1.MultiSet, err error) { - pastUTXO, selectedParentUTXO, txsAcceptanceData, err := dag.pastUTXO(node) + pastUTXO, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(node) if err != nil { return nil, nil, nil, nil, err } @@ -1106,12 +1065,7 @@ func (node *blockNode) verifyAndBuildUTXO(dag *BlockDAG, transactions []*util.Tx return nil, nil, nil, nil, err } - utxo, err := applyAndVerifyBlockTransactionsToPastUTXO(pastUTXO, transactions) - if err != nil { - return nil, nil, nil, nil, err - } - - multiset, err = node.calcMultiset(dag, transactions, txsAcceptanceData, selectedParentUTXO, pastUTXO) + multiset, err = node.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO) if err != nil { return nil, nil, nil, nil, err } @@ -1124,7 +1078,7 @@ func (node *blockNode) verifyAndBuildUTXO(dag *BlockDAG, transactions []*util.Tx return nil, nil, nil, nil, ruleError(ErrBadUTXOCommitment, str) } - return utxo, txsAcceptanceData, feeData, multiset, nil + return pastUTXO, txsAcceptanceData, feeData, multiset, nil } // TxAcceptanceData stores a transaction together with an indication @@ -1180,34 +1134,33 @@ func (node *blockNode) fetchBlueBlocks() ([]*util.Block, error) { return blueBlocks, nil } -// applyBlueBlocks adds all transactions in the blue blocks to the selectedParent's UTXO set +// applyBlueBlocks adds all transactions in the blue blocks to the selectedParent's past UTXO set // Purposefully ignoring failures - these are just unaccepted transactions // Writing down which transactions were accepted or not in txsAcceptanceData -func (node *blockNode) applyBlueBlocks(acceptedSelectedParentUTXO UTXOSet, selectedParentAcceptanceData []TxAcceptanceData, blueBlocks []*util.Block) ( +func (node *blockNode) applyBlueBlocks(selectedParentPastUTXO UTXOSet, blueBlocks []*util.Block) ( pastUTXO UTXOSet, multiBlockTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) { - pastUTXO = acceptedSelectedParentUTXO - multiBlockTxsAcceptanceData = MultiBlockTxsAcceptanceData{BlockTxsAcceptanceData{ - BlockHash: *node.selectedParent.hash, - TxAcceptanceData: selectedParentAcceptanceData, - }} + pastUTXO = selectedParentPastUTXO.(*DiffUTXOSet).cloneWithoutBase() + multiBlockTxsAcceptanceData = make(MultiBlockTxsAcceptanceData, len(blueBlocks)) // Add blueBlocks to multiBlockTxsAcceptanceData in topological order. This // is so that anyone who iterates over it would process blocks (and transactions) // in their order of appearance in the DAG. - // We skip the selected parent, because we calculated its UTXO in acceptSelectedParentTransactions. - for i := 1; i < len(blueBlocks); i++ { + for i := 0; i < len(blueBlocks); i++ { blueBlock := blueBlocks[i] transactions := blueBlock.Transactions() blockTxsAcceptanceData := BlockTxsAcceptanceData{ BlockHash: *blueBlock.Hash(), TxAcceptanceData: make([]TxAcceptanceData, len(transactions)), } - for i, tx := range blueBlock.Transactions() { + isSelectedParent := i == 0 + + for j, tx := range blueBlock.Transactions() { var isAccepted bool + // Coinbase transaction outputs are added to the UTXO // only if they are in the selected parent chain. - if tx.IsCoinBase() { + if !isSelectedParent && tx.IsCoinBase() { isAccepted = false } else { isAccepted, err = pastUTXO.AddTx(tx.MsgTx(), node.blueScore) @@ -1215,9 +1168,9 @@ func (node *blockNode) applyBlueBlocks(acceptedSelectedParentUTXO UTXOSet, selec return nil, nil, err } } - blockTxsAcceptanceData.TxAcceptanceData[i] = TxAcceptanceData{Tx: tx, IsAccepted: isAccepted} + blockTxsAcceptanceData.TxAcceptanceData[j] = TxAcceptanceData{Tx: tx, IsAccepted: isAccepted} } - multiBlockTxsAcceptanceData = append(multiBlockTxsAcceptanceData, blockTxsAcceptanceData) + multiBlockTxsAcceptanceData[i] = blockTxsAcceptanceData } return pastUTXO, multiBlockTxsAcceptanceData, nil @@ -1248,7 +1201,7 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e return err } if diffChild == nil { - parentUTXO, err := dag.restoreUTXO(parent) + parentPastUTXO, err := dag.restorePastUTXO(parent) if err != nil { return err } @@ -1256,7 +1209,7 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e if err != nil { return err } - diff, err := newBlockUTXO.diffFrom(parentUTXO) + diff, err := newBlockUTXO.diffFrom(parentPastUTXO) if err != nil { return err } @@ -1274,12 +1227,13 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e // To save traversals over the blue blocks, it also returns the transaction acceptance data for // all blue blocks func (dag *BlockDAG) pastUTXO(node *blockNode) ( - pastUTXO, selectedParentUTXO UTXOSet, bluesTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) { + pastUTXO, selectedParentPastUTXO UTXOSet, bluesTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) { if node.isGenesis() { - return genesisPastUTXO(dag.virtual), NewFullUTXOSet(), MultiBlockTxsAcceptanceData{}, nil + return genesisPastUTXO(dag.virtual), nil, MultiBlockTxsAcceptanceData{}, nil } - selectedParentUTXO, err = dag.restoreUTXO(node.selectedParent) + + selectedParentPastUTXO, err = dag.restorePastUTXO(node.selectedParent) if err != nil { return nil, nil, nil, err } @@ -1289,46 +1243,16 @@ func (dag *BlockDAG) pastUTXO(node *blockNode) ( return nil, nil, nil, err } - selectedParent := blueBlocks[0] - acceptedSelectedParentUTXO, selectedParentAcceptanceData, err := node.acceptSelectedParentTransactions(selectedParent, selectedParentUTXO) + pastUTXO, bluesTxsAcceptanceData, err = node.applyBlueBlocks(selectedParentPastUTXO, blueBlocks) if err != nil { return nil, nil, nil, err } - pastUTXO, bluesTxsAcceptanceData, err = node.applyBlueBlocks(acceptedSelectedParentUTXO, selectedParentAcceptanceData, blueBlocks) - if err != nil { - return nil, nil, nil, err - } - - return pastUTXO, selectedParentUTXO, bluesTxsAcceptanceData, nil + return pastUTXO, selectedParentPastUTXO, bluesTxsAcceptanceData, nil } -func (node *blockNode) acceptSelectedParentTransactions(selectedParent *util.Block, selectedParentUTXO UTXOSet) (acceptedSelectedParentUTXO UTXOSet, txAcceptanceData []TxAcceptanceData, err error) { - diff := NewUTXODiff() - txAcceptanceData = make([]TxAcceptanceData, len(selectedParent.Transactions())) - for i, tx := range selectedParent.Transactions() { - txAcceptanceData[i] = TxAcceptanceData{ - Tx: tx, - IsAccepted: true, - } - acceptanceDiff, err := selectedParentUTXO.diffFromAcceptedTx(tx.MsgTx(), node.blueScore) - if err != nil { - return nil, nil, err - } - diff, err = diff.WithDiff(acceptanceDiff) - if err != nil { - return nil, nil, err - } - } - acceptedSelectedParentUTXO, err = selectedParentUTXO.WithDiff(diff) - if err != nil { - return nil, nil, err - } - return acceptedSelectedParentUTXO, txAcceptanceData, nil -} - -// restoreUTXO restores the UTXO of a given block from its diff -func (dag *BlockDAG) restoreUTXO(node *blockNode) (UTXOSet, error) { +// restorePastUTXO restores the UTXO of a given block from its diff +func (dag *BlockDAG) restorePastUTXO(node *blockNode) (UTXOSet, error) { stack := []*blockNode{} // Iterate over the chain of diff-childs from node till virtual and add them @@ -1369,11 +1293,11 @@ func (dag *BlockDAG) restoreUTXO(node *blockNode) (UTXOSet, error) { // updateTipsUTXO builds and applies new diff UTXOs for all the DAG's tips func updateTipsUTXO(dag *BlockDAG, virtualUTXO UTXOSet) error { for tip := range dag.virtual.parents { - tipUTXO, err := dag.restoreUTXO(tip) + tipPastUTXO, err := dag.restorePastUTXO(tip) if err != nil { return err } - diff, err := virtualUTXO.diffFrom(tipUTXO) + diff, err := virtualUTXO.diffFrom(tipPastUTXO) if err != nil { return err } diff --git a/blockdag/dag_test.go b/blockdag/dag_test.go index a0933b560..a1bbee2b0 100644 --- a/blockdag/dag_test.go +++ b/blockdag/dag_test.go @@ -6,11 +6,13 @@ package blockdag import ( "fmt" + "github.com/kaspanet/go-secp256k1" "github.com/kaspanet/kaspad/dbaccess" "github.com/pkg/errors" "math" "os" "path/filepath" + "reflect" "testing" "time" @@ -205,7 +207,7 @@ func TestIsKnownBlock(t *testing.T) { {hash: dagconfig.SimnetParams.GenesisHash.String(), want: true}, // Block 3b should be present (as a second child of Block 2). - {hash: "7f2bea5aa4122aed2a542447133e73da6b6f6190ec34c061be70d4576cdd7498", want: true}, + {hash: "48a752afbe36ad66357f751f8dee4f75665d24e18f644d83a3409b398405b46b", want: true}, // Block 100000 should be present (as an orphan). {hash: "65b20b048a074793ebfd1196e49341c8d194dabfc6b44a4fd0c607406e122baf", want: true}, @@ -1124,6 +1126,23 @@ func TestIsDAGCurrentMaxDiff(t *testing.T) { } } +func testProcessBlockRuleError(t *testing.T, dag *BlockDAG, block *wire.MsgBlock, expectedRuleErr error) { + isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block), BFNoPoWCheck) + + err = checkRuleError(err, expectedRuleErr) + if err != nil { + t.Errorf("checkRuleError: %s", err) + } + + if isDelayed { + t.Fatalf("ProcessBlock: block " + + "is too far in the future") + } + if isOrphan { + t.Fatalf("ProcessBlock: block got unexpectedly orphaned") + } +} + func TestDoubleSpends(t *testing.T) { params := dagconfig.SimnetParams params.BlockCoinbaseMaturity = 0 @@ -1176,26 +1195,7 @@ func TestDoubleSpends(t *testing.T) { } anotherBlockWithTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(anotherBlockWithTx1UtilTxs).Root() - isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(anotherBlockWithTx1), BFNoPoWCheck) - if err == nil { - t.Errorf("ProcessBlock expected an error") - } else { - var ruleErr RuleError - if ok := errors.As(err, &ruleErr); ok { - if ruleErr.ErrorCode != ErrOverwriteTx { - t.Errorf("ProcessBlock expected an %v error code but got %v", ErrOverwriteTx, ruleErr.ErrorCode) - } - } else { - t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err) - } - } - if isDelayed { - t.Fatalf("ProcessBlock: anotherBlockWithTx1 " + - "is too far in the future") - } - if isOrphan { - t.Fatalf("ProcessBlock: anotherBlockWithTx1 got unexpectedly orphaned") - } + testProcessBlockRuleError(t, dag, anotherBlockWithTx1, ruleError(ErrOverwriteTx, "")) // Check that a block will be rejected if it has a transaction that double spends // a transaction from its past. @@ -1212,26 +1212,7 @@ func TestDoubleSpends(t *testing.T) { } blockWithDoubleSpendForTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendForTx1UtilTxs).Root() - isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(blockWithDoubleSpendForTx1), BFNoPoWCheck) - if err == nil { - t.Errorf("ProcessBlock expected an error") - } else { - var ruleErr RuleError - if ok := errors.As(err, &ruleErr); ok { - if ruleErr.ErrorCode != ErrMissingTxOut { - t.Errorf("ProcessBlock expected an %v error code but got %v", ErrMissingTxOut, ruleErr.ErrorCode) - } - } else { - t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err) - } - } - if isDelayed { - t.Fatalf("ProcessBlock: blockWithDoubleSpendForTx1 " + - "is too far in the future") - } - if isOrphan { - t.Fatalf("ProcessBlock: blockWithDoubleSpendForTx1 got unexpectedly orphaned") - } + testProcessBlockRuleError(t, dag, blockWithDoubleSpendForTx1, ruleError(ErrMissingTxOut, "")) blockInAnticoneOfBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*wire.MsgTx{doubleSpendTx1}) if err != nil { @@ -1240,15 +1221,181 @@ func TestDoubleSpends(t *testing.T) { // Check that a block will not get rejected if it has a transaction that double spends // a transaction from its anticone. - isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(blockInAnticoneOfBlockWithTx1), BFNoPoWCheck) + testProcessBlockRuleError(t, dag, blockInAnticoneOfBlockWithTx1, nil) + + // Check that a block will be rejected if it has two transactions that spend the same UTXO. + blockWithDoubleSpendWithItself, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil) if err != nil { - t.Fatalf("ProcessBlock: %v", err) + t.Fatalf("PrepareBlockForTest: %v", err) } - if isDelayed { - t.Fatalf("ProcessBlock: blockInAnticoneOfBlockWithTx1 " + - "is too far in the future") + + // Manually add tx1 and doubleSpendTx1. + blockWithDoubleSpendWithItself.Transactions = append(blockWithDoubleSpendWithItself.Transactions, tx1, doubleSpendTx1) + blockWithDoubleSpendWithItselfUtilTxs := make([]*util.Tx, len(blockWithDoubleSpendWithItself.Transactions)) + for i, tx := range blockWithDoubleSpendWithItself.Transactions { + blockWithDoubleSpendWithItselfUtilTxs[i] = util.NewTx(tx) } - if isOrphan { - t.Fatalf("ProcessBlock: blockInAnticoneOfBlockWithTx1 got unexpectedly orphaned") + blockWithDoubleSpendWithItself.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendWithItselfUtilTxs).Root() + + testProcessBlockRuleError(t, dag, blockWithDoubleSpendWithItself, ruleError(ErrDoubleSpendInSameBlock, "")) + + // Check that a block will be rejected if it has the same transaction twice. + blockWithDuplicateTransaction, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil) + if err != nil { + t.Fatalf("PrepareBlockForTest: %v", err) + } + + // Manually add tx1 twice. + blockWithDuplicateTransaction.Transactions = append(blockWithDuplicateTransaction.Transactions, tx1, tx1) + blockWithDuplicateTransactionUtilTxs := make([]*util.Tx, len(blockWithDuplicateTransaction.Transactions)) + for i, tx := range blockWithDuplicateTransaction.Transactions { + blockWithDuplicateTransactionUtilTxs[i] = util.NewTx(tx) + } + blockWithDuplicateTransaction.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDuplicateTransactionUtilTxs).Root() + testProcessBlockRuleError(t, dag, blockWithDuplicateTransaction, ruleError(ErrDuplicateTx, "")) +} + +func TestUTXOCommitment(t *testing.T) { + // Create a new database and dag instance to run tests against. + params := dagconfig.DevnetParams + params.BlockCoinbaseMaturity = 0 + dag, teardownFunc, err := DAGSetup("TestUTXOCommitment", true, Config{ + DAGParams: ¶ms, + }) + if err != nil { + t.Fatalf("TestUTXOCommitment: Failed to setup dag instance: %v", err) + } + defer teardownFunc() + + resetExtraNonceForTest() + + createTx := func(txToSpend *wire.MsgTx) *wire.MsgTx { + scriptPubKey, err := txscript.PayToScriptHashScript(OpTrueScript) + if err != nil { + t.Fatalf("TestUTXOCommitment: failed to build script pub key: %s", err) + } + signatureScript, err := txscript.PayToScriptHashSignatureScript(OpTrueScript, nil) + if err != nil { + t.Fatalf("TestUTXOCommitment: failed to build signature script: %s", err) + } + txIn := &wire.TxIn{ + PreviousOutpoint: wire.Outpoint{TxID: *txToSpend.TxID(), Index: 0}, + SignatureScript: signatureScript, + Sequence: wire.MaxTxInSequenceNum, + } + txOut := &wire.TxOut{ + ScriptPubKey: scriptPubKey, + Value: uint64(1), + } + return wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}) + } + + // Build the following DAG: + // G <- A <- B <- D + // <- C <- + genesis := params.GenesisBlock + + // Block A: + blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{genesis.BlockHash()}, nil) + + // Block B: + blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil) + + // Block C: + txSpendBlockACoinbase := createTx(blockA.Transactions[0]) + blockCTxs := []*wire.MsgTx{txSpendBlockACoinbase} + blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, blockCTxs) + + // Block D: + txSpendTxInBlockC := createTx(txSpendBlockACoinbase) + blockDTxs := []*wire.MsgTx{txSpendTxInBlockC} + blockD := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash(), blockC.BlockHash()}, blockDTxs) + + // Get the pastUTXO of blockD + blockNodeD := dag.index.LookupNode(blockD.BlockHash()) + if blockNodeD == nil { + t.Fatalf("TestUTXOCommitment: blockNode for block D not found") + } + blockDPastUTXO, _, _, _ := dag.pastUTXO(blockNodeD) + blockDPastDiffUTXOSet := blockDPastUTXO.(*DiffUTXOSet) + + // Build a Multiset for block D + multiset := secp256k1.NewMultiset() + for outpoint, entry := range blockDPastDiffUTXOSet.base.utxoCollection { + var err error + multiset, err = addUTXOToMultiset(multiset, entry, &outpoint) + if err != nil { + t.Fatalf("TestUTXOCommitment: addUTXOToMultiset unexpectedly failed") + } + } + for outpoint, entry := range blockDPastDiffUTXOSet.UTXODiff.toAdd { + var err error + multiset, err = addUTXOToMultiset(multiset, entry, &outpoint) + if err != nil { + t.Fatalf("TestUTXOCommitment: addUTXOToMultiset unexpectedly failed") + } + } + for outpoint, entry := range blockDPastDiffUTXOSet.UTXODiff.toRemove { + var err error + multiset, err = removeUTXOFromMultiset(multiset, entry, &outpoint) + if err != nil { + t.Fatalf("TestUTXOCommitment: removeUTXOFromMultiset unexpectedly failed") + } + } + + // Turn the multiset into a UTXO commitment + utxoCommitment := daghash.Hash(*multiset.Finalize()) + + // Make sure that the two commitments are equal + if !utxoCommitment.IsEqual(blockNodeD.utxoCommitment) { + t.Fatalf("TestUTXOCommitment: calculated UTXO commitment and "+ + "actual UTXO commitment don't match. Want: %s, got: %s", + utxoCommitment, blockNodeD.utxoCommitment) + } +} + +func TestPastUTXOMultiSet(t *testing.T) { + // Create a new database and dag instance to run tests against. + params := dagconfig.SimnetParams + dag, teardownFunc, err := DAGSetup("TestPastUTXOMultiSet", true, Config{ + DAGParams: ¶ms, + }) + if err != nil { + t.Fatalf("TestPastUTXOMultiSet: Failed to setup dag instance: %v", err) + } + defer teardownFunc() + + // Build a short chain + genesis := params.GenesisBlock + blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{genesis.BlockHash()}, nil) + blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil) + blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash()}, nil) + + // Take blockC's selectedParentMultiset + blockNodeC := dag.index.LookupNode(blockC.BlockHash()) + if blockNodeC == nil { + t.Fatalf("TestPastUTXOMultiSet: blockNode for blockC not found") + } + blockCSelectedParentMultiset, err := blockNodeC.selectedParentMultiset(dag) + if err != nil { + t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err) + } + + // Copy the multiset + blockCSelectedParentMultisetCopy := *blockCSelectedParentMultiset + blockCSelectedParentMultiset = &blockCSelectedParentMultisetCopy + + // Add a block on top of blockC + PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockC.BlockHash()}, nil) + + // Get blockC's selectedParentMultiset again + blockCSelectedParentMultiSetAfterAnotherBlock, err := blockNodeC.selectedParentMultiset(dag) + if err != nil { + t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err) + } + + // Make sure that blockC's selectedParentMultiset had not changed + if !reflect.DeepEqual(blockCSelectedParentMultiset, blockCSelectedParentMultiSetAfterAnotherBlock) { + t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset appears to have changed") } } diff --git a/blockdag/error.go b/blockdag/error.go index 977a4e77a..cc86f7bb6 100644 --- a/blockdag/error.go +++ b/blockdag/error.go @@ -103,6 +103,11 @@ const ( // either does not exist or has already been spent. ErrMissingTxOut + // ErrDoubleSpendInSameBlock indicates a transaction + // that spends an output that was already spent by another + // transaction in the same block. + ErrDoubleSpendInSameBlock + // ErrUnfinalizedTx indicates a transaction has not been finalized. // A valid block may only contain finalized transactions. ErrUnfinalizedTx @@ -227,6 +232,7 @@ var errorCodeStrings = map[ErrorCode]string{ ErrDuplicateTxInputs: "ErrDuplicateTxInputs", ErrBadTxInput: "ErrBadTxInput", ErrMissingTxOut: "ErrMissingTxOut", + ErrDoubleSpendInSameBlock: "ErrDoubleSpendInSameBlock", ErrUnfinalizedTx: "ErrUnfinalizedTx", ErrDuplicateTx: "ErrDuplicateTx", ErrOverwriteTx: "ErrOverwriteTx", diff --git a/blockdag/ghostdag_test.go b/blockdag/ghostdag_test.go index 1770975a7..985ffabd0 100644 --- a/blockdag/ghostdag_test.go +++ b/blockdag/ghostdag_test.go @@ -33,7 +33,7 @@ func TestGHOSTDAG(t *testing.T) { }{ { k: 3, - expectedReds: []string{"F", "G", "H", "I", "O", "Q"}, + expectedReds: []string{"F", "G", "H", "I", "N", "O"}, dagData: []*testBlockData{ { parents: []string{"A"}, @@ -166,7 +166,7 @@ func TestGHOSTDAG(t *testing.T) { id: "T", expectedScore: 13, expectedSelectedParent: "S", - expectedBlues: []string{"S", "P", "N"}, + expectedBlues: []string{"S", "P", "Q"}, }, }, }, diff --git a/blockdag/mining.go b/blockdag/mining.go index 9b7ef095d..83fe7c1b6 100644 --- a/blockdag/mining.go +++ b/blockdag/mining.go @@ -38,7 +38,7 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er msgBlock.AddTransaction(tx.MsgTx()) } - multiset, err := dag.NextBlockMultiset(transactions) + multiset, err := dag.NextBlockMultiset() if err != nil { return nil, err } @@ -57,16 +57,16 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er } // NextBlockMultiset returns the multiset of an assumed next block -// built on top of the current tips, with the given transactions. +// built on top of the current tips. // // This function MUST be called with the DAG state lock held (for reads). -func (dag *BlockDAG) NextBlockMultiset(transactions []*util.Tx) (*secp256k1.MultiSet, error) { - pastUTXO, selectedParentUTXO, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode) +func (dag *BlockDAG) NextBlockMultiset() (*secp256k1.MultiSet, error) { + _, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode) if err != nil { return nil, err } - return dag.virtual.blockNode.calcMultiset(dag, transactions, txsAcceptanceData, selectedParentUTXO, pastUTXO) + return dag.virtual.blockNode.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO) } // CoinbasePayloadExtraData returns coinbase payload extra data parameter diff --git a/blockdag/testdata/blk_0_to_4.dat b/blockdag/testdata/blk_0_to_4.dat index c539d6e734f0999821da7044b3ced818dcdea4f7..d7d516eba16f02ca09dadb513db0fd51c4ec50db 100644 GIT binary patch delta 610 zcmZn{XcyQplX0S<0P~)a+BY)NGcGrYVvQkpvlXbkJszy z_HzAScEr!*gT(INOmEgV1nS-?I)BLKwh?FVzpP|d-z6JTtv0t7PqrwY*Y2JYRNZ~_ zo0bGY3X{N$I?`I@#=89C3>xbl0WW$do|mYQE~D+7AXZnt^hl?iq%g+q4)lM-nym> znLQ>d%pZRC8-Fz7=91YHt`nPmAguTQ>B+m$8v!wQijC#E2t|iu+YkH=% zYA5PkRW`lsJ1Os_`&9n+Z3*r53Pg410>Z?9&zcl@M z?Ys%q>n`v&e?0nDC}R2a0C@}JQ&tL-Wtb%u2ss1j!vzYab>>@yvX!?mCFz}-(EY_^ XiRfF2hSm3*iqt-+Ondb*1LQjZiPsiZ delta 659 zcmZn{XcyQplTo*3qxmd-YK3VRr^@U@`PpBM~}(JbzeJb$X*tWV*rBx z|LYZ4CLd$G>TuZP%F+FwmYtxtiW(FKmH6DPU8%aD>vWPDpCT)XAF zwc@XC_r)T)AHtQkDZjmOW&eiPJl|{Fbtd0tQiyt#a;r)1Hvg_y&!jKL9Z>S#$~rlz z$a04un~aQd+?UKLa!A&KtOo%mMj*jR$ok2@*@7l7XFgspbr`FHK+91+y>zrluBl zX|I7$Y~$izvhTwUKK%E&d|>;_JAC|$&nlJGO^MuJq01^gCrakmnbCz`!8DSYvT&&!t(C6LPjVm-EIhsF@@aCa1ND ztxvRR)@ql7a|)%I0-GPiPtRTSaxR7bqlW%SSa$oqTYc)oGUEyA6pSCSMbJ&(}KJIZ0=R L^6OPXAm0H1Rd_H4 diff --git a/blockdag/testdata/blk_3A.dat b/blockdag/testdata/blk_3A.dat index 5b83736e1aac2756b13112dfd89b269ccc344158..0251c8fa23535176126a31a2aa35cdbed9f5e32d 100644 GIT binary patch delta 232 zcmcc2e3`jk_WK>%(~JxtAi$`j+spNT*%3dJ4-&h7Grd{g5U6{n==>p@+eVzd|FV)< zT|B*iyf9W|a(sBZuU6!7tGwTXio5q+Jx|B^WN6m>5xKQ_|C%(0;=k1i{Fhp|w&io5 zE$)hLuRHfP%(~JxtAiyYf(1rQC&%yceAJ^?KmD=!A{PBV_0(s1HzPn6to@LJ^ zxOc9^2jP3xRyL(G&eXew)U#g9p87pq^~vq8Z>CO|6LbI4UIU@n#>Ky6--jD~`0sQ1 z!1kAS`1lu}RVu5S61l%Z_hjJF#HsrPZ?2GBy84ID3)49}_}({5Xgz(~JNKlu<4VTB ziM>*)gdFi72pA^!F&a+%o2Bq1%vfZ4%C`JYl5Mb2N?dAHv?1-Pq2Z`Okncl2#2-Lk(bpDXdZ6nU!e_6?_ z73X3zo)k|oyu9(M&D7gfC)U*7n0+DPqc-Ou7H^fd*T%Ot?_ZO~Q2e(#f&Wqq*S37_ zv&CJ}?RDqghWzc=dGP<5z>H4My8dG4mzJ*Qc`QwZ7q8|Ew_BpuDk%Bm{#KhyX0k|evN&o-= delta 186 zcmaFF^oXfm_WK>%C`JYl5MY!#=)(Nn=ivPKkL&iAN^STl{&>L|fjs6p-(4m+&$8zd z)KEGy+tMM+eYnAg|2~%w zY=3!&kALx5rLwvyk^3ujTfIYn%=ztVH1}oV&vRFo@@u`~yWa0~>2AZ?%j*{HPrbQz OVy~0}AxBI+J{thN09&H~ diff --git a/blockdag/testdata/blk_3C.dat b/blockdag/testdata/blk_3C.dat index c9fc086c3e73c082d59c7738e95d8b791900992e..7c2387cd9335b52946211de6ca52901d3eb0c94e 100644 GIT binary patch delta 198 zcmV;%06G8u0{#Mj9rxTlb^!nY0000G0xT_!3je0WPBicsyZZv{uYgf4+;Y#tJliq} zjs9w51-NR13i22&*e$n&JO|k*y2?0Fk;_ARvw3-wl3(&}ob`Bm?mIk2CQy3kn>(S1es?z*dd_%abwzO!e_t AN&o-= delta 198 zcmV;%06G8u0{#Mj9rxTlb^!nY0000G0vf?Y1NTh9pI`B=zjYe0`4{7$%n)t^ocBbK zLz+Jd5yLdp#lP~W&l-VzPW?3GA^Uwya4|B_2-T62Meb-C8W2>SdFPVjm${jfwLtE% zSdO6pPf2!Yt^L#96qk#$W=h9!+Z;FEz*nL$@_aXl)lt?Cix<41bLC&y7e__3roNGp z9Fd>|k*y2?0g<{`Ammmv6qi~0;=xdoegkr?tJ6bn(J-z1SP*>&XH{EOxsx&hOddyF AjQ{`u diff --git a/blockdag/testdata/blk_3D.dat b/blockdag/testdata/blk_3D.dat index e857dd182bd6a2197d8b0df490629b262dddc527..f6bd64d5e2c51ea91f2b33215a42fe0c4a69c896 100644 GIT binary patch delta 249 zcmeyv{D(PL_WK>%FN_QzAi$`j+spNT*%3dJ4-&h7Grd{g5U6{n==>p@+eVzd|FV)< z6lZNV4C_~lQ0U%7UipUirL zh}AbPJvfw~yv#uIWN%kBJKNhoAk%AC0)VWcGyX j#AY7|>-~RvvJhin2q8rvM>8=7{)Yl$lrv10WOM}p9UF0| delta 249 zcmeyv{D(PL_WK>%FN_QzAiyYf(1rQC&%yceAJ^?KmD=!A{PBV_0(s1HzPn6to@LJ^ z7!zmZVEB*4({Q5B7L6Pawo@$L$8<`<#4gPU-ZJxZ#Cose1-E6)?;QwVWbmoRyyI%% zwQlh}3yYt|-w=0q-Lh=o#6p>g4J;FTQx%?s8H-Gh{PpNyz@$3nqP43pJLg?ASo=Fd jpq4#7G&*$WWFf}D5JHMTj%H#E{0{}hC})@~$><6IEJ<&( diff --git a/blockdag/utxoset.go b/blockdag/utxoset.go index eb5a134c2..bab9a400a 100644 --- a/blockdag/utxoset.go +++ b/blockdag/utxoset.go @@ -399,78 +399,11 @@ type UTXOSet interface { fmt.Stringer diffFrom(other UTXOSet) (*UTXODiff, error) WithDiff(utxoDiff *UTXODiff) (UTXOSet, error) - diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) - diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) AddTx(tx *wire.MsgTx, blockBlueScore uint64) (ok bool, err error) clone() UTXOSet Get(outpoint wire.Outpoint) (*UTXOEntry, bool) } -var errUTXOMissingTxOut = errors.New("missing transaction output in the utxo set") - -// diffFromTx is a common implementation for diffFromTx, that works -// for both diff-based and full UTXO sets -// Returns a diff that is equivalent to provided transaction, -// or an error if provided transaction is not valid in the context of this UTXOSet -func diffFromTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) { - diff := NewUTXODiff() - isCoinbase := tx.IsCoinBase() - if !isCoinbase { - for _, txIn := range tx.TxIn { - if entry, ok := u.Get(txIn.PreviousOutpoint); ok { - err := diff.RemoveEntry(txIn.PreviousOutpoint, entry) - if err != nil { - return nil, err - } - } else { - return nil, errors.Wrapf(errUTXOMissingTxOut, "Transaction %s is invalid because it spends "+ - "outpoint %s that is not in utxo set", tx.TxID(), txIn.PreviousOutpoint) - } - } - } - for i, txOut := range tx.TxOut { - entry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore) - outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i)) - err := diff.AddEntry(outpoint, entry) - if err != nil { - return nil, err - } - } - return diff, nil -} - -// diffFromAcceptedTx is a common implementation for diffFromAcceptedTx, that works -// for both diff-based and full UTXO sets. -// Returns a diff that replaces an entry's blockBlueScore with the given acceptingBlueScore. -// Returns an error if the provided transaction's entry is not valid in the context -// of this UTXOSet. -func diffFromAcceptedTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) { - diff := NewUTXODiff() - isCoinbase := tx.IsCoinBase() - for i, txOut := range tx.TxOut { - // Fetch any unaccepted transaction - existingOutpoint := *wire.NewOutpoint(tx.TxID(), uint32(i)) - existingEntry, ok := u.Get(existingOutpoint) - if !ok { - return nil, errors.Errorf("cannot accept outpoint %s because it doesn't exist in the given UTXO", existingOutpoint) - } - - // Remove unaccepted entries - err := diff.RemoveEntry(existingOutpoint, existingEntry) - if err != nil { - return nil, err - } - - // Add new entries with their accepting blue score - newEntry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore) - err = diff.AddEntry(existingOutpoint, newEntry) - if err != nil { - return nil, err - } - } - return diff, nil -} - // FullUTXOSet represents a full list of transaction outputs and their values type FullUTXOSet struct { utxoCollection @@ -544,12 +477,6 @@ func (fus *FullUTXOSet) AddTx(tx *wire.MsgTx, blueScore uint64) (isAccepted bool return true, nil } -// diffFromTx returns a diff that is equivalent to provided transaction, -// or an error if provided transaction is not valid in the context of this UTXOSet -func (fus *FullUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) { - return diffFromTx(fus, tx, acceptingBlueScore) -} - func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool { for _, txIn := range tx.TxIn { outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index) @@ -561,10 +488,6 @@ func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool { return true } -func (fus *FullUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) { - return diffFromAcceptedTx(fus, tx, acceptingBlueScore) -} - // clone returns a clone of this utxoSet func (fus *FullUTXOSet) clone() UTXOSet { return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone()} @@ -690,16 +613,6 @@ func (dus *DiffUTXOSet) meldToBase() error { return nil } -// diffFromTx returns a diff that is equivalent to provided transaction, -// or an error if provided transaction is not valid in the context of this UTXOSet -func (dus *DiffUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) { - return diffFromTx(dus, tx, acceptingBlueScore) -} - -func (dus *DiffUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) { - return diffFromAcceptedTx(dus, tx, acceptingBlueScore) -} - func (dus *DiffUTXOSet) String() string { return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove) } @@ -709,6 +622,12 @@ func (dus *DiffUTXOSet) clone() UTXOSet { return NewDiffUTXOSet(dus.base.clone().(*FullUTXOSet), dus.UTXODiff.clone()) } +// cloneWithoutBase returns a *DiffUTXOSet with same +// base as this *DiffUTXOSet and a cloned diff. +func (dus *DiffUTXOSet) cloneWithoutBase() UTXOSet { + return NewDiffUTXOSet(dus.base, dus.UTXODiff.clone()) +} + // Get returns the UTXOEntry associated with provided outpoint in this UTXOSet. // Returns false in second output if this UTXOEntry was not found func (dus *DiffUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) { diff --git a/blockdag/utxoset_test.go b/blockdag/utxoset_test.go index c2f2d4ad6..95c99daac 100644 --- a/blockdag/utxoset_test.go +++ b/blockdag/utxoset_test.go @@ -1110,81 +1110,6 @@ testLoop: } } -func TestDiffFromTx(t *testing.T) { - fus := &FullUTXOSet{ - utxoCollection: utxoCollection{}, - } - - txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000") - txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: wire.Outpoint{TxID: *txID0, Index: math.MaxUint32}, Sequence: 0} - txOut0 := &wire.TxOut{ScriptPubKey: []byte{0}, Value: 10} - cbTx := wire.NewSubnetworkMsgTx(1, []*wire.TxIn{txIn0}, []*wire.TxOut{txOut0}, subnetworkid.SubnetworkIDCoinbase, 0, nil) - if isAccepted, err := fus.AddTx(cbTx, 1); err != nil { - t.Fatalf("AddTx unexpectedly failed. Error: %s", err) - } else if !isAccepted { - t.Fatalf("AddTx unexpectedly didn't add tx %s", cbTx.TxID()) - } - acceptingBlueScore := uint64(2) - cbOutpoint := wire.Outpoint{TxID: *cbTx.TxID(), Index: 0} - txIns := []*wire.TxIn{{ - PreviousOutpoint: cbOutpoint, - SignatureScript: nil, - Sequence: wire.MaxTxInSequenceNum, - }} - txOuts := []*wire.TxOut{{ - ScriptPubKey: OpTrueScript, - Value: uint64(1), - }} - tx := wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts) - diff, err := fus.diffFromTx(tx, acceptingBlueScore) - if err != nil { - t.Errorf("diffFromTx: %v", err) - } - if !reflect.DeepEqual(diff.toAdd, utxoCollection{ - wire.Outpoint{TxID: *tx.TxID(), Index: 0}: NewUTXOEntry(tx.TxOut[0], false, 2), - }) { - t.Errorf("diff.toAdd doesn't have the expected values") - } - - if !reflect.DeepEqual(diff.toRemove, utxoCollection{ - wire.Outpoint{TxID: *cbTx.TxID(), Index: 0}: NewUTXOEntry(cbTx.TxOut[0], true, 1), - }) { - t.Errorf("diff.toRemove doesn't have the expected values") - } - - //Test that we get an error if we don't have the outpoint inside the utxo set - invalidTxIns := []*wire.TxIn{{ - PreviousOutpoint: wire.Outpoint{TxID: daghash.TxID{}, Index: 0}, - SignatureScript: nil, - Sequence: wire.MaxTxInSequenceNum, - }} - invalidTxOuts := []*wire.TxOut{{ - ScriptPubKey: OpTrueScript, - Value: uint64(1), - }} - invalidTx := wire.NewNativeMsgTx(wire.TxVersion, invalidTxIns, invalidTxOuts) - _, err = fus.diffFromTx(invalidTx, acceptingBlueScore) - if err == nil { - t.Errorf("diffFromTx: expected an error but got ") - } - - //Test that we get an error if the outpoint is inside diffUTXOSet's toRemove - diff2 := &UTXODiff{ - toAdd: utxoCollection{}, - toRemove: utxoCollection{}, - } - dus := NewDiffUTXOSet(fus, diff2) - if isAccepted, err := dus.AddTx(tx, 2); err != nil { - t.Fatalf("AddTx unexpectedly failed. Error: %s", err) - } else if !isAccepted { - t.Fatalf("AddTx unexpectedly didn't add tx %s", tx.TxID()) - } - _, err = dus.diffFromTx(tx, acceptingBlueScore) - if err == nil { - t.Errorf("diffFromTx: expected an error but got ") - } -} - // collection returns a collection of all UTXOs in this set func (fus *FullUTXOSet) collection() utxoCollection { return fus.utxoCollection.clone() diff --git a/blockdag/validate.go b/blockdag/validate.go index 7e1154485..a42dad058 100644 --- a/blockdag/validate.go +++ b/blockdag/validate.go @@ -545,6 +545,20 @@ func (dag *BlockDAG) checkBlockSanity(block *util.Block, flags BehaviorFlags) (t existingTxIDs[*id] = struct{}{} } + // Check for double spends with transactions on the same block. + usedOutpoints := make(map[wire.Outpoint]*daghash.TxID) + for _, tx := range transactions { + for _, txIn := range tx.MsgTx().TxIn { + if spendingTxID, exists := usedOutpoints[txIn.PreviousOutpoint]; exists { + str := fmt.Sprintf("transaction %s spends "+ + "outpoint %s that was already spent by "+ + "transaction %s in this block", tx.ID(), txIn.PreviousOutpoint, spendingTxID) + return 0, ruleError(ErrDoubleSpendInSameBlock, str) + } + usedOutpoints[txIn.PreviousOutpoint] = tx.ID() + } + } + return delay, nil } @@ -838,6 +852,11 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet, return nil, err } + err = checkDoubleSpendsWithBlockPast(pastUTXO, transactions) + if err != nil { + return nil, err + } + if err := validateBlockMass(pastUTXO, transactions); err != nil { return nil, err } @@ -913,7 +932,7 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet, // Now that the inexpensive checks are done and have passed, verify the // transactions are actually allowed to spend the coins by running the - // expensive ECDSA signature check scripts. Doing this last helps + // expensive SCHNORR signature check scripts. Doing this last helps // prevent CPU exhaustion attacks. err := checkBlockScripts(block, pastUTXO, transactions, scriptFlags, dag.sigCache) if err != nil { diff --git a/mining/test_utils.go b/mining/test_utils.go index 132c38587..bbca4d83c 100644 --- a/mining/test_utils.go +++ b/mining/test_utils.go @@ -104,7 +104,7 @@ func PrepareBlockForTest(dag *blockdag.BlockDAG, params *dagconfig.Params, paren } template.Block.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(utilTxs).Root() - ms, err := dag.NextBlockMultiset(utilTxs) + ms, err := dag.NextBlockMultiset() if err != nil { return nil, err } From 6463a4b5d022d7490cfb1d15c34deb5e8759741f Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Wed, 20 May 2020 14:38:24 +0300 Subject: [PATCH 35/77] [NOD-1011] Don't cache isSynced on getBlockTemplate (#728) --- server/rpc/handle_get_block_template.go | 34 ++++++++++++------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/server/rpc/handle_get_block_template.go b/server/rpc/handle_get_block_template.go index 20385d382..a890ae730 100644 --- a/server/rpc/handle_get_block_template.go +++ b/server/rpc/handle_get_block_template.go @@ -72,7 +72,6 @@ type gbtWorkState struct { template *mining.BlockTemplate notifyMap map[string]map[int64]chan struct{} timeSource blockdag.TimeSource - isSynced bool } // newGbtWorkState returns a new instance of a gbtWorkState with all internal @@ -194,7 +193,7 @@ func handleGetBlockTemplateRequest(s *Server, request *rpcmodel.TemplateRequest, if err := state.updateBlockTemplate(s, useCoinbaseValue); err != nil { return nil, err } - return state.blockTemplateResult(s.cfg.DAG, useCoinbaseValue) + return state.blockTemplateResult(s, useCoinbaseValue) } // handleGetBlockTemplateLongPoll is a helper for handleGetBlockTemplateRequest @@ -239,7 +238,7 @@ func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseVal // Include whether or not it is valid to submit work against the old // block template depending on whether or not a solution has already // been found and added to the block DAG. - result, err = state.blockTemplateResult(s.cfg.DAG, useCoinbaseValue) + result, err = state.blockTemplateResult(s, useCoinbaseValue) if err != nil { return nil, err } @@ -268,7 +267,7 @@ func blockTemplateOrLongPollChan(s *Server, longPollID string, useCoinbaseValue // the caller is invalid. parentHashes, lastGenerated, err := decodeLongPollID(longPollID) if err != nil { - result, err := state.blockTemplateResult(s.cfg.DAG, useCoinbaseValue) + result, err := state.blockTemplateResult(s, useCoinbaseValue) if err != nil { return nil, nil, err } @@ -286,7 +285,7 @@ func blockTemplateOrLongPollChan(s *Server, longPollID string, useCoinbaseValue // Include whether or not it is valid to submit work against the // old block template depending on whether or not a solution has // already been found and added to the block DAG. - result, err := state.blockTemplateResult(s.cfg.DAG, useCoinbaseValue) + result, err := state.blockTemplateResult(s, useCoinbaseValue) if err != nil { return nil, nil, err } @@ -628,15 +627,6 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) // consensus rules. minTimestamp := s.cfg.DAG.NextBlockMinimumTime() - // Check whether this node is synced with the rest of of the - // network. There's almost never a good reason to mine on top - // of an unsynced DAG, and miners are generally expected not to - // mine when isSynced is false. - // This is not a straight-up error because the choice of whether - // to mine or not is the responsibility of the miner rather - // than the node's. - isSynced := s.cfg.SyncMgr.IsSynced() - // Update work state to ensure another block template isn't // generated until needed. state.template = template @@ -644,7 +634,6 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) state.lastTxUpdate = lastTxUpdate state.tipHashes = tipHashes state.minTimestamp = minTimestamp - state.isSynced = isSynced log.Debugf("Generated block template (timestamp %s, "+ "target %s, merkle root %s)", @@ -711,7 +700,8 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) // and returned to the caller. // // This function MUST be called with the state locked. -func (state *gbtWorkState) blockTemplateResult(dag *blockdag.BlockDAG, useCoinbaseValue bool) (*rpcmodel.GetBlockTemplateResult, error) { +func (state *gbtWorkState) blockTemplateResult(s *Server, useCoinbaseValue bool) (*rpcmodel.GetBlockTemplateResult, error) { + dag := s.cfg.DAG // Ensure the timestamps are still in valid range for the template. // This should really only ever happen if the local clock is changed // after the template is generated, but it's important to avoid serving @@ -786,6 +776,16 @@ func (state *gbtWorkState) blockTemplateResult(dag *blockdag.BlockDAG, useCoinba // Omitting CoinbaseTxn -> coinbase, generation targetDifficulty := fmt.Sprintf("%064x", util.CompactToBig(header.Bits)) longPollID := encodeLongPollID(state.tipHashes, state.lastGenerated) + + // Check whether this node is synced with the rest of of the + // network. There's almost never a good reason to mine on top + // of an unsynced DAG, and miners are generally expected not to + // mine when isSynced is false. + // This is not a straight-up error because the choice of whether + // to mine or not is the responsibility of the miner rather + // than the node's. + isSynced := s.cfg.SyncMgr.IsSynced() + reply := rpcmodel.GetBlockTemplateResult{ Bits: strconv.FormatInt(int64(header.Bits), 16), CurTime: header.Timestamp.Unix(), @@ -803,7 +803,7 @@ func (state *gbtWorkState) blockTemplateResult(dag *blockdag.BlockDAG, useCoinba Mutable: gbtMutableFields, NonceRange: gbtNonceRange, Capabilities: gbtCapabilities, - IsSynced: state.isSynced, + IsSynced: isSynced, } if useCoinbaseValue { From 96052ac69a642dac1c016d61777dae9b21e02553 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Sun, 24 May 2020 16:59:37 +0300 Subject: [PATCH 36/77] [NOD-809] Change fee rate to fee per megagram (#730) --- mempool/mempool.go | 12 ++++++++---- mining/mining.go | 4 ++-- mining/txselection.go | 4 ++-- server/p2p/p2p.go | 2 +- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/mempool/mempool.go b/mempool/mempool.go index 15ed66768..9a3395629 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -668,12 +668,16 @@ func (mp *TxPool) RemoveDoubleSpends(tx *util.Tx) { func (mp *TxPool) addTransaction(tx *util.Tx, fee uint64, parentsInPool []*wire.Outpoint) (*TxDesc, error) { // Add the transaction to the pool and mark the referenced outpoints // as spent by the pool. + mass, err := blockdag.CalcTxMassFromUTXOSet(tx, mp.mpUTXOSet) + if err != nil { + return nil, err + } txD := &TxDesc{ TxDesc: mining.TxDesc{ - Tx: tx, - Added: time.Now(), - Fee: fee, - FeePerKB: fee * 1000 / uint64(tx.MsgTx().SerializeSize()), + Tx: tx, + Added: time.Now(), + Fee: fee, + FeePerMegaGram: fee * 1e6 / mass, }, depCount: len(parentsInPool), } diff --git a/mining/mining.go b/mining/mining.go index 157b261d1..e193f261c 100644 --- a/mining/mining.go +++ b/mining/mining.go @@ -35,8 +35,8 @@ type TxDesc struct { // Fee is the total fee the transaction associated with the entry pays. Fee uint64 - // FeePerKB is the fee the transaction pays in sompi per 1000 bytes. - FeePerKB uint64 + // FeePerMegaGram is the fee the transaction pays in sompi per million gram. + FeePerMegaGram uint64 } // TxSource represents a source of transactions to consider for inclusion in diff --git a/mining/txselection.go b/mining/txselection.go index a4d068806..67bbf4721 100644 --- a/mining/txselection.go +++ b/mining/txselection.go @@ -301,8 +301,8 @@ func (g *BlkTmplGenerator) populateTemplateFromCandidates(candidateTxs []*candid txsForBlockTemplate.totalMass += selectedTx.txMass txsForBlockTemplate.totalFees += selectedTx.txDesc.Fee - log.Tracef("Adding tx %s (feePerKB %d)", - tx.ID(), selectedTx.txDesc.FeePerKB) + log.Tracef("Adding tx %s (feePerMegaGram %d)", + tx.ID(), selectedTx.txDesc.FeePerMegaGram) markCandidateTxForDeletion(selectedTx) } diff --git a/server/p2p/p2p.go b/server/p2p/p2p.go index 4bf5cb03e..5892fb5b9 100644 --- a/server/p2p/p2p.go +++ b/server/p2p/p2p.go @@ -725,7 +725,7 @@ func (s *Server) handleRelayInvMsg(state *peerState, msg relayMsg) { // Don't relay the transaction if the transaction fee-per-kb // is less than the peer's feefilter. feeFilter := uint64(atomic.LoadInt64(&sp.FeeFilterInt)) - if feeFilter > 0 && txD.FeePerKB < feeFilter { + if feeFilter > 0 && txD.FeePerMegaGram < feeFilter { return true } From 3a4571d6717f54dcfcccd3a6394dd70895ac41ae Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Mon, 25 May 2020 12:51:30 +0300 Subject: [PATCH 37/77] [NOD-965] Make LookupNode return boolean (#729) * [NOD-965] Make dag.index.LookupNode return false if node is not found * [NOD-965] Rename blockDAG->dag * [NOD-965] Remove irrelevant test * [NOD-965] Use bi.index's ok in LookupNode --- blockdag/accept.go | 8 ++-- blockdag/accept_test.go | 5 ++- blockdag/blockindex.go | 6 +-- blockdag/blocklocator.go | 15 +++++-- blockdag/blockwindow_test.go | 5 ++- blockdag/common_test.go | 4 +- blockdag/dag.go | 71 +++++++++++++++++++--------------- blockdag/dag_test.go | 31 +++++++++------ blockdag/dagio.go | 35 +++++++++++------ blockdag/difficulty_test.go | 6 ++- blockdag/ghostdag_test.go | 21 ++++++++-- blockdag/process_test.go | 4 +- blockdag/reachabilitystore.go | 9 +++-- blockdag/test_utils.go | 4 +- blockdag/utxodiffstore_test.go | 10 ++--- blockdag/utxoio.go | 7 +++- blockdag/validate_test.go | 6 --- 17 files changed, 153 insertions(+), 94 deletions(-) diff --git a/blockdag/accept.go b/blockdag/accept.go index 238a97d68..9a81c0311 100644 --- a/blockdag/accept.go +++ b/blockdag/accept.go @@ -133,17 +133,17 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er return nil } -func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) { +func lookupParentNodes(block *util.Block, dag *BlockDAG) (blockSet, error) { header := block.MsgBlock().Header parentHashes := header.ParentHashes nodes := newBlockSet() for _, parentHash := range parentHashes { - node := blockDAG.index.LookupNode(parentHash) - if node == nil { + node, ok := dag.index.LookupNode(parentHash) + if !ok { str := fmt.Sprintf("parent block %s is unknown", parentHash) return nil, ruleError(ErrParentBlockUnknown, str) - } else if blockDAG.index.NodeStatus(node).KnownInvalid() { + } else if dag.index.NodeStatus(node).KnownInvalid() { str := fmt.Sprintf("parent block %s is known to be invalid", parentHash) return nil, ruleError(ErrInvalidAncestorBlock, str) } diff --git a/blockdag/accept_test.go b/blockdag/accept_test.go index c0beafbf9..994c31f9a 100644 --- a/blockdag/accept_test.go +++ b/blockdag/accept_test.go @@ -63,7 +63,10 @@ func TestMaybeAcceptBlockErrors(t *testing.T) { if isOrphan { t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan") } - blockNode1 := dag.index.LookupNode(block1.Hash()) + blockNode1, ok := dag.index.LookupNode(block1.Hash()) + if !ok { + t.Fatalf("block %s does not exist in the DAG", block1.Hash()) + } dag.index.SetStatusFlags(blockNode1, statusValidateFailed) block2 := blocks[2] diff --git a/blockdag/blockindex.go b/blockdag/blockindex.go index 2ed0aa6b7..26f1a4340 100644 --- a/blockdag/blockindex.go +++ b/blockdag/blockindex.go @@ -50,11 +50,11 @@ func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool { // return nil if there is no entry for the hash. // // This function is safe for concurrent access. -func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode { +func (bi *blockIndex) LookupNode(hash *daghash.Hash) (*blockNode, bool) { bi.RLock() defer bi.RUnlock() - node := bi.index[*hash] - return node + node, ok := bi.index[*hash] + return node, ok } // AddNode adds the provided node to the block index and marks it as dirty. diff --git a/blockdag/blocklocator.go b/blockdag/blocklocator.go index ef0d9b1ab..5015a7dc2 100644 --- a/blockdag/blocklocator.go +++ b/blockdag/blocklocator.go @@ -29,8 +29,15 @@ func (dag *BlockDAG) BlockLocatorFromHashes(highHash, lowHash *daghash.Hash) (Bl dag.dagLock.RLock() defer dag.dagLock.RUnlock() - highNode := dag.index.LookupNode(highHash) - lowNode := dag.index.LookupNode(lowHash) + highNode, ok := dag.index.LookupNode(highHash) + if !ok { + return nil, errors.Errorf("block %s is unknown", highHash) + } + + lowNode, ok := dag.index.LookupNode(lowHash) + if !ok { + return nil, errors.Errorf("block %s is unknown", lowHash) + } return dag.blockLocator(highNode, lowNode) } @@ -88,8 +95,8 @@ func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (highHash, lowNode := dag.genesis nextBlockLocatorIndex := int64(len(locator) - 1) for i, hash := range locator { - node := dag.index.LookupNode(hash) - if node != nil { + node, ok := dag.index.LookupNode(hash) + if ok { lowNode = node nextBlockLocatorIndex = int64(i) - 1 break diff --git a/blockdag/blockwindow_test.go b/blockdag/blockwindow_test.go index 939b84de5..de5d8d461 100644 --- a/blockdag/blockwindow_test.go +++ b/blockdag/blockwindow_test.go @@ -133,7 +133,10 @@ func TestBlueBlockWindow(t *testing.T) { t.Fatalf("block %v was unexpectedly orphan", blockData.id) } - node := dag.index.LookupNode(utilBlock.Hash()) + node, ok := dag.index.LookupNode(utilBlock.Hash()) + if !ok { + t.Fatalf("block %s does not exist in the DAG", utilBlock.Hash()) + } blockByIDMap[blockData.id] = node idByBlockMap[node] = blockData.id diff --git a/blockdag/common_test.go b/blockdag/common_test.go index ca616d3b1..5079b33f1 100644 --- a/blockdag/common_test.go +++ b/blockdag/common_test.go @@ -178,8 +178,8 @@ func prepareAndProcessBlockByParentMsgBlocks(t *testing.T, dag *BlockDAG, parent } func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNode { - node := dag.index.LookupNode(block.BlockHash()) - if node == nil { + node, ok := dag.index.LookupNode(block.BlockHash()) + if !ok { t.Fatalf("couldn't find block node with hash %s", block.BlockHash()) } return node diff --git a/blockdag/dag.go b/blockdag/dag.go index 35c9d5a95..df925c962 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -209,8 +209,8 @@ func (dag *BlockDAG) IsKnownOrphan(hash *daghash.Hash) bool { // // This function is safe for concurrent access. func (dag *BlockDAG) IsKnownInvalid(hash *daghash.Hash) bool { - node := dag.index.LookupNode(hash) - if node == nil { + node, ok := dag.index.LookupNode(hash) + if !ok { return false } return dag.index.NodeStatus(node).KnownInvalid() @@ -902,8 +902,8 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) { // updated in a separate goroutine. To get a definite answer if a block // is finalized or not, use dag.checkFinalityRules. func (dag *BlockDAG) IsKnownFinalizedBlock(blockHash *daghash.Hash) bool { - node := dag.index.LookupNode(blockHash) - return node != nil && node.isFinalized + node, ok := dag.index.LookupNode(blockHash) + return ok && node.isFinalized } // NextBlockCoinbaseTransaction prepares the coinbase transaction for the next mined block @@ -951,8 +951,8 @@ func (dag *BlockDAG) TxsAcceptedByVirtual() (MultiBlockTxsAcceptanceData, error) // // This function MUST be called with the DAG read-lock held func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlockTxsAcceptanceData, error) { - node := dag.index.LookupNode(blockHash) - if node == nil { + node, ok := dag.index.LookupNode(blockHash) + if !ok { return nil, errors.Errorf("Couldn't find block %s", blockHash) } _, _, txsAcceptanceData, err := dag.pastUTXO(node) @@ -1407,8 +1407,8 @@ func (dag *BlockDAG) GetUTXOEntry(outpoint wire.Outpoint) (*UTXOEntry, bool) { // BlueScoreByBlockHash returns the blue score of a block with the given hash. func (dag *BlockDAG) BlueScoreByBlockHash(hash *daghash.Hash) (uint64, error) { - node := dag.index.LookupNode(hash) - if node == nil { + node, ok := dag.index.LookupNode(hash) + if !ok { return 0, errors.Errorf("block %s is unknown", hash) } @@ -1417,8 +1417,8 @@ func (dag *BlockDAG) BlueScoreByBlockHash(hash *daghash.Hash) (uint64, error) { // BluesByBlockHash returns the blues of the block for the given hash. func (dag *BlockDAG) BluesByBlockHash(hash *daghash.Hash) ([]*daghash.Hash, error) { - node := dag.index.LookupNode(hash) - if node == nil { + node, ok := dag.index.LookupNode(hash) + if !ok { return nil, errors.Errorf("block %s is unknown", hash) } @@ -1449,8 +1449,8 @@ func (dag *BlockDAG) BlockConfirmationsByHashNoLock(hash *daghash.Hash) (uint64, return 0, nil } - node := dag.index.LookupNode(hash) - if node == nil { + node, ok := dag.index.LookupNode(hash) + if !ok { return 0, errors.Errorf("block %s is unknown", hash) } @@ -1565,8 +1565,8 @@ func (dag *BlockDAG) oldestChainBlockWithBlueScoreGreaterThan(blueScore uint64) // // This method MUST be called with the DAG lock held func (dag *BlockDAG) IsInSelectedParentChain(blockHash *daghash.Hash) (bool, error) { - blockNode := dag.index.LookupNode(blockHash) - if blockNode == nil { + blockNode, ok := dag.index.LookupNode(blockHash) + if !ok { str := fmt.Sprintf("block %s is not in the DAG", blockHash) return false, errNotInDAG(str) } @@ -1598,7 +1598,10 @@ func (dag *BlockDAG) SelectedParentChain(blockHash *daghash.Hash) ([]*daghash.Ha for !isBlockInSelectedParentChain { removedChainHashes = append(removedChainHashes, blockHash) - node := dag.index.LookupNode(blockHash) + node, ok := dag.index.LookupNode(blockHash) + if !ok { + return nil, nil, errors.Errorf("block %s does not exist in the DAG", blockHash) + } blockHash = node.selectedParent.hash isBlockInSelectedParentChain, err = dag.IsInSelectedParentChain(blockHash) @@ -1661,8 +1664,8 @@ func (dag *BlockDAG) CurrentBits() uint32 { // HeaderByHash returns the block header identified by the given hash or an // error if it doesn't exist. func (dag *BlockDAG) HeaderByHash(hash *daghash.Hash) (*wire.BlockHeader, error) { - node := dag.index.LookupNode(hash) - if node == nil { + node, ok := dag.index.LookupNode(hash) + if !ok { err := errors.Errorf("block %s is not known", hash) return &wire.BlockHeader{}, err } @@ -1675,8 +1678,8 @@ func (dag *BlockDAG) HeaderByHash(hash *daghash.Hash) (*wire.BlockHeader, error) // // This function is safe for concurrent access. func (dag *BlockDAG) ChildHashesByHash(hash *daghash.Hash) ([]*daghash.Hash, error) { - node := dag.index.LookupNode(hash) - if node == nil { + node, ok := dag.index.LookupNode(hash) + if !ok { str := fmt.Sprintf("block %s is not in the DAG", hash) return nil, errNotInDAG(str) @@ -1690,8 +1693,8 @@ func (dag *BlockDAG) ChildHashesByHash(hash *daghash.Hash) ([]*daghash.Hash, err // // This function is safe for concurrent access. func (dag *BlockDAG) SelectedParentHash(blockHash *daghash.Hash) (*daghash.Hash, error) { - node := dag.index.LookupNode(blockHash) - if node == nil { + node, ok := dag.index.LookupNode(blockHash) + if !ok { str := fmt.Sprintf("block %s is not in the DAG", blockHash) return nil, errNotInDAG(str) @@ -1725,12 +1728,12 @@ func (dag *BlockDAG) antiPastHashesBetween(lowHash, highHash *daghash.Hash, maxH // // This function MUST be called with the DAG state lock held (for reads). func (dag *BlockDAG) antiPastBetween(lowHash, highHash *daghash.Hash, maxEntries uint64) ([]*blockNode, error) { - lowNode := dag.index.LookupNode(lowHash) - if lowNode == nil { + lowNode, ok := dag.index.LookupNode(lowHash) + if !ok { return nil, errors.Errorf("Couldn't find low hash %s", lowHash) } - highNode := dag.index.LookupNode(highHash) - if highNode == nil { + highNode, ok := dag.index.LookupNode(highHash) + if !ok { return nil, errors.Errorf("Couldn't find high hash %s", highHash) } if lowNode.blueScore >= highNode.blueScore { @@ -1825,8 +1828,9 @@ func (dag *BlockDAG) antiPastHeadersBetween(lowHash, highHash *daghash.Hash, max func (dag *BlockDAG) GetTopHeaders(highHash *daghash.Hash, maxHeaders uint64) ([]*wire.BlockHeader, error) { highNode := &dag.virtual.blockNode if highHash != nil { - highNode = dag.index.LookupNode(highHash) - if highNode == nil { + var ok bool + highNode, ok = dag.index.LookupNode(highHash) + if !ok { return nil, errors.Errorf("Couldn't find the high hash %s in the dag", highHash) } } @@ -2061,9 +2065,9 @@ func New(config *Config) (*BlockDAG, error) { } } - genesis := index.LookupNode(params.GenesisHash) + genesis, ok := index.LookupNode(params.GenesisHash) - if genesis == nil { + if !ok { genesisBlock := util.NewBlock(dag.dagParams.GenesisBlock) // To prevent the creation of a new err variable unintentionally so the // defered function above could read err - declare isOrphan and isDelayed explicitly. @@ -2073,12 +2077,15 @@ func New(config *Config) (*BlockDAG, error) { return nil, err } if isDelayed { - return nil, errors.New("Genesis block shouldn't be in the future") + return nil, errors.New("genesis block shouldn't be in the future") } if isOrphan { - return nil, errors.New("Genesis block is unexpectedly orphan") + return nil, errors.New("genesis block is unexpectedly orphan") + } + genesis, ok = index.LookupNode(params.GenesisHash) + if !ok { + return nil, errors.New("genesis is not found in the DAG after it was proccessed") } - genesis = index.LookupNode(params.GenesisHash) } // Save a reference to the genesis block. diff --git a/blockdag/dag_test.go b/blockdag/dag_test.go index a1bbee2b0..3f73fa8a7 100644 --- a/blockdag/dag_test.go +++ b/blockdag/dag_test.go @@ -621,7 +621,10 @@ func TestAcceptingInInit(t *testing.T) { testBlock := blocks[1] // Create a test blockNode with an unvalidated status - genesisNode := dag.index.LookupNode(genesisBlock.Hash()) + genesisNode, ok := dag.index.LookupNode(genesisBlock.Hash()) + if !ok { + t.Fatalf("genesis block does not exist in the DAG") + } testNode, _ := dag.newBlockNode(&testBlock.MsgBlock().Header, blockSetFromSlice(genesisNode)) testNode.status = statusDataStored @@ -659,7 +662,11 @@ func TestAcceptingInInit(t *testing.T) { } // Make sure that the test node's status is valid - testNode = dag.index.LookupNode(testBlock.Hash()) + testNode, ok = dag.index.LookupNode(testBlock.Hash()) + if !ok { + t.Fatalf("block %s does not exist in the DAG", testBlock.Hash()) + } + if testNode.status&statusValid == 0 { t.Fatalf("testNode is unexpectedly invalid") } @@ -1045,8 +1052,8 @@ func TestDAGIndexFailedStatus(t *testing.T) { "is an orphan\n") } - invalidBlockNode := dag.index.LookupNode(invalidBlock.Hash()) - if invalidBlockNode == nil { + invalidBlockNode, ok := dag.index.LookupNode(invalidBlock.Hash()) + if !ok { t.Fatalf("invalidBlockNode wasn't added to the block index as expected") } if invalidBlockNode.status&statusValidateFailed != statusValidateFailed { @@ -1074,8 +1081,8 @@ func TestDAGIndexFailedStatus(t *testing.T) { t.Fatalf("ProcessBlock incorrectly returned invalidBlockChild " + "is an orphan\n") } - invalidBlockChildNode := dag.index.LookupNode(invalidBlockChild.Hash()) - if invalidBlockChildNode == nil { + invalidBlockChildNode, ok := dag.index.LookupNode(invalidBlockChild.Hash()) + if !ok { t.Fatalf("invalidBlockChild wasn't added to the block index as expected") } if invalidBlockChildNode.status&statusInvalidAncestor != statusInvalidAncestor { @@ -1102,8 +1109,8 @@ func TestDAGIndexFailedStatus(t *testing.T) { t.Fatalf("ProcessBlock incorrectly returned invalidBlockGrandChild " + "is an orphan\n") } - invalidBlockGrandChildNode := dag.index.LookupNode(invalidBlockGrandChild.Hash()) - if invalidBlockGrandChildNode == nil { + invalidBlockGrandChildNode, ok := dag.index.LookupNode(invalidBlockGrandChild.Hash()) + if !ok { t.Fatalf("invalidBlockGrandChild wasn't added to the block index as expected") } if invalidBlockGrandChildNode.status&statusInvalidAncestor != statusInvalidAncestor { @@ -1312,8 +1319,8 @@ func TestUTXOCommitment(t *testing.T) { blockD := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash(), blockC.BlockHash()}, blockDTxs) // Get the pastUTXO of blockD - blockNodeD := dag.index.LookupNode(blockD.BlockHash()) - if blockNodeD == nil { + blockNodeD, ok := dag.index.LookupNode(blockD.BlockHash()) + if !ok { t.Fatalf("TestUTXOCommitment: blockNode for block D not found") } blockDPastUTXO, _, _, _ := dag.pastUTXO(blockNodeD) @@ -1372,8 +1379,8 @@ func TestPastUTXOMultiSet(t *testing.T) { blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash()}, nil) // Take blockC's selectedParentMultiset - blockNodeC := dag.index.LookupNode(blockC.BlockHash()) - if blockNodeC == nil { + blockNodeC, ok := dag.index.LookupNode(blockC.BlockHash()) + if !ok { t.Fatalf("TestPastUTXOMultiSet: blockNode for blockC not found") } blockCSelectedParentMultiset, err := blockNodeC.selectedParentMultiset(dag) diff --git a/blockdag/dagio.go b/blockdag/dagio.go index 730ff4a41..32c01877e 100644 --- a/blockdag/dagio.go +++ b/blockdag/dagio.go @@ -233,7 +233,11 @@ func (dag *BlockDAG) initDAGState() error { } log.Debugf("Setting the last finality point...") - dag.lastFinalityPoint = dag.index.LookupNode(dagState.LastFinalityPoint) + var ok bool + dag.lastFinalityPoint, ok = dag.index.LookupNode(dagState.LastFinalityPoint) + if !ok { + return errors.Errorf("block %s does not exist in the DAG", dagState.LastFinalityPoint) + } dag.finalizeNodesBelowFinalityPoint(false) log.Debugf("Processing unprocessed blockNodes...") @@ -348,8 +352,8 @@ func (dag *BlockDAG) initUTXOSet() (fullUTXOCollection utxoCollection, err error func (dag *BlockDAG) initVirtualBlockTips(state *dagState) error { tips := newBlockSet() for _, tipHash := range state.TipHashes { - tip := dag.index.LookupNode(tipHash) - if tip == nil { + tip, ok := dag.index.LookupNode(tipHash) + if !ok { return errors.Errorf("cannot find "+ "DAG tip %s in block index", state.TipHashes) } @@ -426,8 +430,8 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) { node.parents = newBlockSet() for _, hash := range header.ParentHashes { - parent := dag.index.LookupNode(hash) - if parent == nil { + parent, ok := dag.index.LookupNode(hash) + if !ok { return nil, errors.Errorf("deserializeBlockNode: Could "+ "not find parent %s for block %s", hash, header.BlockHash()) } @@ -447,7 +451,11 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) { // Because genesis doesn't have selected parent, it's serialized as zero hash if !selectedParentHash.IsEqual(&daghash.ZeroHash) { - node.selectedParent = dag.index.LookupNode(selectedParentHash) + var ok bool + node.selectedParent, ok = dag.index.LookupNode(selectedParentHash) + if !ok { + return nil, errors.Errorf("block %s does not exist in the DAG", selectedParentHash) + } } node.blueScore, err = binaryserializer.Uint64(buffer, byteOrder) @@ -466,7 +474,12 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) { if _, err := io.ReadFull(buffer, hash[:]); err != nil { return nil, err } - node.blues[i] = dag.index.LookupNode(hash) + + var ok bool + node.blues[i], ok = dag.index.LookupNode(hash) + if !ok { + return nil, errors.Errorf("block %s does not exist in the DAG", selectedParentHash) + } } bluesAnticoneSizesLen, err := wire.ReadVarInt(buffer) @@ -484,8 +497,8 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) { if err != nil { return nil, err } - blue := dag.index.LookupNode(hash) - if blue == nil { + blue, ok := dag.index.LookupNode(hash) + if !ok { return nil, errors.Errorf("couldn't find block with hash %s", hash) } node.bluesAnticoneSizes[blue] = dagconfig.KType(bluesAnticoneSize) @@ -590,8 +603,8 @@ func blockHashFromBlockIndexKey(BlockIndexKey []byte) (*daghash.Hash, error) { // This function is safe for concurrent access. func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) { // Lookup the block hash in block index and ensure it is in the DAG - node := dag.index.LookupNode(hash) - if node == nil { + node, ok := dag.index.LookupNode(hash) + if !ok { str := fmt.Sprintf("block %s is not in the DAG", hash) return nil, errNotInDAG(str) } diff --git a/blockdag/difficulty_test.go b/blockdag/difficulty_test.go index 8a30892c2..a46791388 100644 --- a/blockdag/difficulty_test.go +++ b/blockdag/difficulty_test.go @@ -114,7 +114,11 @@ func TestDifficulty(t *testing.T) { if isOrphan { t.Fatalf("block was unexpectedly orphan") } - return dag.index.LookupNode(block.BlockHash()) + node, ok := dag.index.LookupNode(block.BlockHash()) + if !ok { + t.Fatalf("block %s does not exist in the DAG", block.BlockHash()) + } + return node } tip := dag.genesis for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ { diff --git a/blockdag/ghostdag_test.go b/blockdag/ghostdag_test.go index 985ffabd0..be0c1b563 100644 --- a/blockdag/ghostdag_test.go +++ b/blockdag/ghostdag_test.go @@ -215,7 +215,10 @@ func TestGHOSTDAG(t *testing.T) { t.Fatalf("TestGHOSTDAG: block %v was unexpectedly orphan", blockData.id) } - node := dag.index.LookupNode(utilBlock.Hash()) + node, ok := dag.index.LookupNode(utilBlock.Hash()) + if !ok { + t.Fatalf("block %s does not exist in the DAG", utilBlock.Hash()) + } blockByIDMap[blockData.id] = node idByBlockMap[node] = blockData.id @@ -305,8 +308,15 @@ func TestBlueAnticoneSizeErrors(t *testing.T) { } // Get references to the tips of the two chains - blockNodeA := dag.index.LookupNode(currentBlockA.BlockHash()) - blockNodeB := dag.index.LookupNode(currentBlockB.BlockHash()) + blockNodeA, ok := dag.index.LookupNode(currentBlockA.BlockHash()) + if !ok { + t.Fatalf("block %s does not exist in the DAG", currentBlockA.BlockHash()) + } + + blockNodeB, ok := dag.index.LookupNode(currentBlockB.BlockHash()) + if !ok { + t.Fatalf("block %s does not exist in the DAG", currentBlockB.BlockHash()) + } // Try getting the blueAnticoneSize between them. Since the two // blocks are not in the anticones of eachother, this should fail. @@ -359,7 +369,10 @@ func TestGHOSTDAGErrors(t *testing.T) { // Try to rerun GHOSTDAG on the last block. GHOSTDAG uses // reachability data, so we expect it to fail. - blockNode3 := dag.index.LookupNode(block3.BlockHash()) + blockNode3, ok := dag.index.LookupNode(block3.BlockHash()) + if !ok { + t.Fatalf("block %s does not exist in the DAG", block3.BlockHash()) + } _, err = dag.ghostdag(blockNode3) if err == nil { t.Fatalf("TestGHOSTDAGErrors: ghostdag unexpectedly succeeded") diff --git a/blockdag/process_test.go b/blockdag/process_test.go index 53ee3c049..58dafce16 100644 --- a/blockdag/process_test.go +++ b/blockdag/process_test.go @@ -63,8 +63,8 @@ func TestProcessOrphans(t *testing.T) { } // Make sure that the child block had been rejected - node := dag.index.LookupNode(childBlock.Hash()) - if node == nil { + node, ok := dag.index.LookupNode(childBlock.Hash()) + if !ok { t.Fatalf("TestProcessOrphans: child block missing from block index") } if !dag.index.NodeStatus(node).KnownInvalid() { diff --git a/blockdag/reachabilitystore.go b/blockdag/reachabilitystore.go index beeccb44d..dea254784 100644 --- a/blockdag/reachabilitystore.go +++ b/blockdag/reachabilitystore.go @@ -176,7 +176,10 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C } // Connect the treeNode with its blockNode - reachabilityData.treeNode.blockNode = store.dag.index.LookupNode(hash) + reachabilityData.treeNode.blockNode, ok = store.dag.index.LookupNode(hash) + if !ok { + return errors.Errorf("block %s does not exist in the DAG", hash) + } return nil } @@ -392,8 +395,8 @@ func (store *reachabilityStore) deserializeFutureCoveringSet(r io.Reader, destin if err != nil { return err } - blockNode := store.dag.index.LookupNode(blockHash) - if blockNode == nil { + blockNode, ok := store.dag.index.LookupNode(blockHash) + if !ok { return errors.Errorf("blockNode not found for hash %s", blockHash) } blockReachabilityData, ok := store.reachabilityDataByHash(blockHash) diff --git a/blockdag/test_utils.go b/blockdag/test_utils.go index 012194d97..f4307598e 100644 --- a/blockdag/test_utils.go +++ b/blockdag/test_utils.go @@ -146,8 +146,8 @@ func SetVirtualForTest(dag *BlockDAG, virtual VirtualForTest) VirtualForTest { func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (VirtualForTest, error) { parents := newBlockSet() for _, hash := range parentHashes { - parent := dag.index.LookupNode(hash) - if parent == nil { + parent, ok := dag.index.LookupNode(hash) + if !ok { return nil, errors.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash) } parents.add(parent) diff --git a/blockdag/utxodiffstore_test.go b/blockdag/utxodiffstore_test.go index d7c6d89d7..3727080a5 100644 --- a/blockdag/utxodiffstore_test.go +++ b/blockdag/utxodiffstore_test.go @@ -114,8 +114,8 @@ func TestClearOldEntries(t *testing.T) { for i := 0; i < 10; i++ { processedBlock := PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil) - node := dag.index.LookupNode(processedBlock.BlockHash()) - if node == nil { + node, ok := dag.index.LookupNode(processedBlock.BlockHash()) + if !ok { t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash()) } blockNodes[i] = node @@ -144,13 +144,13 @@ func TestClearOldEntries(t *testing.T) { // Add a block on top of the genesis to force the retrieval of all diffData processedBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) - node := dag.index.LookupNode(processedBlock.BlockHash()) - if node == nil { + node, ok := dag.index.LookupNode(processedBlock.BlockHash()) + if !ok { t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash()) } // Make sure that the child-of-genesis node isn't in the loaded set - _, ok := dag.utxoDiffStore.loaded[node] + _, ok = dag.utxoDiffStore.loaded[node] if ok { t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash) } diff --git a/blockdag/utxoio.go b/blockdag/utxoio.go index 6af7f386f..c7bb474c9 100644 --- a/blockdag/utxoio.go +++ b/blockdag/utxoio.go @@ -52,7 +52,12 @@ func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffData if err != nil { return nil, err } - diffData.diffChild = diffStore.dag.index.LookupNode(hash) + + var ok bool + diffData.diffChild, ok = diffStore.dag.index.LookupNode(hash) + if !ok { + return nil, errors.Errorf("block %s does not exist in the DAG", hash) + } } diffData.diff, err = deserializeUTXODiff(r) diff --git a/blockdag/validate_test.go b/blockdag/validate_test.go index 9b8a993bc..ec2f7554e 100644 --- a/blockdag/validate_test.go +++ b/blockdag/validate_test.go @@ -125,12 +125,6 @@ func TestCheckConnectBlockTemplate(t *testing.T) { "block 4: %v", err) } - blockNode3 := dag.index.LookupNode(blocks[3].Hash()) - blockNode4 := dag.index.LookupNode(blocks[4].Hash()) - if blockNode3.children.contains(blockNode4) { - t.Errorf("Block 4 wasn't successfully detached as a child from block3") - } - // Block 3a should connect even though it does not build on dag tips. err = dag.CheckConnectBlockTemplateNoLock(blocks[5]) if err != nil { From 6219b934300b652bb26a4d490c5c3cb95d9bf7c0 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Mon, 25 May 2020 14:30:43 +0300 Subject: [PATCH 38/77] [NOD-1018] Exit after 2 minutes if graceful shutdown fails (#732) * [NOD-1018] Exit after 2 minutes if graceful shutdown fails * [NOD-1018] Change time.Tick to time.After --- kaspad.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/kaspad.go b/kaspad.go index 7b3db8260..7293f0036 100644 --- a/kaspad.go +++ b/kaspad.go @@ -13,6 +13,7 @@ import ( "runtime/debug" "runtime/pprof" "strings" + "time" "github.com/kaspanet/kaspad/dbaccess" @@ -145,7 +146,20 @@ func kaspadMain(serverChan chan<- *server.Server) error { defer func() { kasdLog.Infof("Gracefully shutting down the server...") server.Stop() - server.WaitForShutdown() + + shutdownDone := make(chan struct{}) + go func() { + server.WaitForShutdown() + shutdownDone <- struct{}{} + }() + + const shutdownTimeout = 2 * time.Minute + + select { + case <-shutdownDone: + case <-time.After(shutdownTimeout): + kasdLog.Criticalf("Graceful shutdown timed out %s. Terminating...", shutdownTimeout) + } srvrLog.Infof("Server shutdown complete") }() server.Start() From fc00275d9cb0107433fa31c1861a3109857caff9 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Wed, 27 May 2020 17:37:03 +0300 Subject: [PATCH 39/77] [NOD-553] Get rid of base58 (#735) * [NOD-553] Get rid of wif. * [NOD-553] Get rid of base58. --- util/base58/README.md | 23 ----- util/base58/alphabet.go | 49 --------- util/base58/base58.go | 75 -------------- util/base58/base58_test.go | 97 ------------------ util/base58/base58bench_test.go | 34 ------- util/base58/base58check.go | 52 ---------- util/base58/base58check_test.go | 65 ------------ util/base58/doc.go | 19 ---- util/base58/example_test.go | 70 ------------- util/base58/genalphabet.go | 79 --------------- util/bloom/filter_test.go | 12 +-- util/wif.go | 170 -------------------------------- util/wif_test.go | 79 --------------- 13 files changed, 2 insertions(+), 822 deletions(-) delete mode 100644 util/base58/README.md delete mode 100644 util/base58/alphabet.go delete mode 100644 util/base58/base58.go delete mode 100644 util/base58/base58_test.go delete mode 100644 util/base58/base58bench_test.go delete mode 100644 util/base58/base58check.go delete mode 100644 util/base58/base58check_test.go delete mode 100644 util/base58/doc.go delete mode 100644 util/base58/example_test.go delete mode 100644 util/base58/genalphabet.go delete mode 100644 util/wif.go delete mode 100644 util/wif_test.go diff --git a/util/base58/README.md b/util/base58/README.md deleted file mode 100644 index 0635bfa41..000000000 --- a/util/base58/README.md +++ /dev/null @@ -1,23 +0,0 @@ -base58 -========== - -[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/util/base58) - -Package base58 provides an API for encoding and decoding to and from the -modified base58 encoding. - -A comprehensive suite of tests is provided to ensure proper functionality. - - -## Examples - -* [Decode Example](http://godoc.org/github.com/kaspanet/kaspad/util/base58#example-Decode) - Demonstrates how to decode modified base58 encoded data. -* [Encode Example](http://godoc.org/github.com/kaspanet/kaspad/util/base58#example-Encode) - Demonstrates how to encode data using the modified base58 encoding scheme. -* [CheckDecode Example](http://godoc.org/github.com/kaspanet/kaspad/util/base58#example-CheckDecode) - Demonstrates how to decode Base58Check encoded data. -* [CheckEncode Example](http://godoc.org/github.com/kaspanet/kaspad/util/base58#example-CheckEncode) - Demonstrates how to encode data using the Base58Check encoding scheme. - diff --git a/util/base58/alphabet.go b/util/base58/alphabet.go deleted file mode 100644 index 6921dd821..000000000 --- a/util/base58/alphabet.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2015 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -// AUTOGENERATED by genalphabet.go; do not edit. - -package base58 - -const ( - // alphabet is the modified base58 alphabet used by kaspa. - alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" - - alphabetIdx0 = '1' -) - -var b58 = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 0, 1, 2, 3, 4, 5, 6, - 7, 8, 255, 255, 255, 255, 255, 255, - 255, 9, 10, 11, 12, 13, 14, 15, - 16, 255, 17, 18, 19, 20, 21, 255, - 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 255, 255, 255, 255, 255, - 255, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 255, 44, 45, 46, - 47, 48, 49, 50, 51, 52, 53, 54, - 55, 56, 57, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, -} diff --git a/util/base58/base58.go b/util/base58/base58.go deleted file mode 100644 index 19a72de2c..000000000 --- a/util/base58/base58.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2013-2015 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package base58 - -import ( - "math/big" -) - -//go:generate go run genalphabet.go - -var bigRadix = big.NewInt(58) -var bigZero = big.NewInt(0) - -// Decode decodes a modified base58 string to a byte slice. -func Decode(b string) []byte { - answer := big.NewInt(0) - j := big.NewInt(1) - - scratch := new(big.Int) - for i := len(b) - 1; i >= 0; i-- { - tmp := b58[b[i]] - if tmp == 255 { - return []byte("") - } - scratch.SetInt64(int64(tmp)) - scratch.Mul(j, scratch) - answer.Add(answer, scratch) - j.Mul(j, bigRadix) - } - - tmpval := answer.Bytes() - - var numZeros int - for numZeros = 0; numZeros < len(b); numZeros++ { - if b[numZeros] != alphabetIdx0 { - break - } - } - flen := numZeros + len(tmpval) - val := make([]byte, flen) - copy(val[numZeros:], tmpval) - - return val -} - -// Encode encodes a byte slice to a modified base58 string. -func Encode(b []byte) string { - x := new(big.Int) - x.SetBytes(b) - - answer := make([]byte, 0, len(b)*136/100) - for x.Cmp(bigZero) > 0 { - mod := new(big.Int) - x.DivMod(x, bigRadix, mod) - answer = append(answer, alphabet[mod.Int64()]) - } - - // leading zero bytes - for _, i := range b { - if i != 0 { - break - } - answer = append(answer, alphabetIdx0) - } - - // reverse - alen := len(answer) - for i := 0; i < alen/2; i++ { - answer[i], answer[alen-1-i] = answer[alen-1-i], answer[i] - } - - return string(answer) -} diff --git a/util/base58/base58_test.go b/util/base58/base58_test.go deleted file mode 100644 index b657467db..000000000 --- a/util/base58/base58_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) 2013-2017 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package base58_test - -import ( - "bytes" - "encoding/hex" - "github.com/kaspanet/kaspad/util/base58" - "testing" -) - -var stringTests = []struct { - in string - out string -}{ - {"", ""}, - {" ", "Z"}, - {"-", "n"}, - {"0", "q"}, - {"1", "r"}, - {"-1", "4SU"}, - {"11", "4k8"}, - {"abc", "ZiCa"}, - {"1234598760", "3mJr7AoUXx2Wqd"}, - {"abcdefghijklmnopqrstuvwxyz", "3yxU3u1igY8WkgtjK92fbJQCd4BZiiT1v25f"}, - {"00000000000000000000000000000000000000000000000000000000000000", "3sN2THZeE9Eh9eYrwkvZqNstbHGvrxSAM7gXUXvyFQP8XvQLUqNCS27icwUeDT7ckHm4FUHM2mTVh1vbLmk7y"}, -} - -var invalidStringTests = []struct { - in string - out string -}{ - {"0", ""}, - {"O", ""}, - {"I", ""}, - {"l", ""}, - {"3mJr0", ""}, - {"O3yxU", ""}, - {"3sNI", ""}, - {"4kl8", ""}, - {"0OIl", ""}, - {"!@#$%^&*()-_=+~`", ""}, -} - -var hexTests = []struct { - in string - out string -}{ - {"61", "2g"}, - {"626262", "a3gV"}, - {"636363", "aPEr"}, - {"73696d706c792061206c6f6e6720737472696e67", "2cFupjhnEsSn59qHXstmK2ffpLv2"}, - {"00eb15231dfceb60925886b67d065299925915aeb172c06647", "1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L"}, - {"516b6fcd0f", "ABnLTmg"}, - {"bf4f89001e670274dd", "3SEo3LWLoPntC"}, - {"572e4794", "3EFU7m"}, - {"ecac89cad93923c02321", "EJDM8drfXA6uyA"}, - {"10c8511e", "Rt5zm"}, - {"00000000000000000000", "1111111111"}, -} - -func TestBase58(t *testing.T) { - // Encode tests - for x, test := range stringTests { - tmp := []byte(test.in) - if res := base58.Encode(tmp); res != test.out { - t.Errorf("Encode test #%d failed: got: %s want: %s", - x, res, test.out) - continue - } - } - - // Decode tests - for x, test := range hexTests { - b, err := hex.DecodeString(test.in) - if err != nil { - t.Errorf("hex.DecodeString failed failed #%d: got: %s", x, test.in) - continue - } - if res := base58.Decode(test.out); !bytes.Equal(res, b) { - t.Errorf("Decode test #%d failed: got: %q want: %q", - x, res, test.in) - continue - } - } - - // Decode with invalid input - for x, test := range invalidStringTests { - if res := base58.Decode(test.in); string(res) != test.out { - t.Errorf("Decode invalidString test #%d failed: got: %q want: %q", - x, res, test.out) - continue - } - } -} diff --git a/util/base58/base58bench_test.go b/util/base58/base58bench_test.go deleted file mode 100644 index d06888821..000000000 --- a/util/base58/base58bench_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2013-2014 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package base58_test - -import ( - "bytes" - "github.com/kaspanet/kaspad/util/base58" - "testing" -) - -func BenchmarkBase58Encode(b *testing.B) { - b.StopTimer() - data := bytes.Repeat([]byte{0xff}, 5000) - b.SetBytes(int64(len(data))) - b.StartTimer() - - for i := 0; i < b.N; i++ { - base58.Encode(data) - } -} - -func BenchmarkBase58Decode(b *testing.B) { - b.StopTimer() - data := bytes.Repeat([]byte{0xff}, 5000) - encoded := base58.Encode(data) - b.SetBytes(int64(len(encoded))) - b.StartTimer() - - for i := 0; i < b.N; i++ { - base58.Decode(encoded) - } -} diff --git a/util/base58/base58check.go b/util/base58/base58check.go deleted file mode 100644 index 62579b3cd..000000000 --- a/util/base58/base58check.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2013-2014 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package base58 - -import ( - "crypto/sha256" - "github.com/pkg/errors" -) - -// ErrChecksum indicates that the checksum of a check-encoded string does not verify against -// the checksum. -var ErrChecksum = errors.New("checksum error") - -// ErrInvalidFormat indicates that the check-encoded string has an invalid format. -var ErrInvalidFormat = errors.New("invalid format: version and/or checksum bytes missing") - -// checksum: first four bytes of sha256^2 -func checksum(input []byte) (cksum [4]byte) { - h := sha256.Sum256(input) - h2 := sha256.Sum256(h[:]) - copy(cksum[:], h2[:4]) - return -} - -// CheckEncode prepends a version byte and appends a four byte checksum. -func CheckEncode(input []byte, version byte) string { - b := make([]byte, 0, 1+len(input)+4) - b = append(b, version) - b = append(b, input[:]...) - cksum := checksum(b) - b = append(b, cksum[:]...) - return Encode(b) -} - -// CheckDecode decodes a string that was encoded with CheckEncode and verifies the checksum. -func CheckDecode(input string) (result []byte, version byte, err error) { - decoded := Decode(input) - if len(decoded) < 5 { - return nil, 0, ErrInvalidFormat - } - version = decoded[0] - var cksum [4]byte - copy(cksum[:], decoded[len(decoded)-4:]) - if checksum(decoded[:len(decoded)-4]) != cksum { - return nil, 0, ErrChecksum - } - payload := decoded[1 : len(decoded)-4] - result = append(result, payload...) - return -} diff --git a/util/base58/base58check_test.go b/util/base58/base58check_test.go deleted file mode 100644 index 6535a72cc..000000000 --- a/util/base58/base58check_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2013-2014 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package base58_test - -import ( - "github.com/kaspanet/kaspad/util/base58" - "testing" -) - -var checkEncodingStringTests = []struct { - version byte - in string - out string -}{ - {20, "", "3MNQE1X"}, - {20, " ", "B2Kr6dBE"}, - {20, "-", "B3jv1Aft"}, - {20, "0", "B482yuaX"}, - {20, "1", "B4CmeGAC"}, - {20, "-1", "mM7eUf6kB"}, - {20, "11", "mP7BMTDVH"}, - {20, "abc", "4QiVtDjUdeq"}, - {20, "1234598760", "ZmNb8uQn5zvnUohNCEPP"}, - {20, "abcdefghijklmnopqrstuvwxyz", "K2RYDcKfupxwXdWhSAxQPCeiULntKm63UXyx5MvEH2"}, - {20, "00000000000000000000000000000000000000000000000000000000000000", "bi1EWXwJay2udZVxLJozuTb8Meg4W9c6xnmJaRDjg6pri5MBAxb9XwrpQXbtnqEoRV5U2pixnFfwyXC8tRAVC8XxnjK"}, -} - -func TestBase58Check(t *testing.T) { - for x, test := range checkEncodingStringTests { - // test encoding - if res := base58.CheckEncode([]byte(test.in), test.version); res != test.out { - t.Errorf("CheckEncode test #%d failed: got %s, want: %s", x, res, test.out) - } - - // test decoding - res, version, err := base58.CheckDecode(test.out) - if err != nil { - t.Errorf("CheckDecode test #%d failed with err: %v", x, err) - } else if version != test.version { - t.Errorf("CheckDecode test #%d failed: got version: %d want: %d", x, version, test.version) - } else if string(res) != test.in { - t.Errorf("CheckDecode test #%d failed: got: %s want: %s", x, res, test.in) - } - } - - // test the two decoding failure cases - // case 1: checksum error - _, _, err := base58.CheckDecode("3MNQE1Y") - if err != base58.ErrChecksum { - t.Error("Checkdecode test failed, expected ErrChecksum") - } - // case 2: invalid formats (string lengths below 5 mean the version byte and/or the checksum - // bytes are missing). - testString := "" - for len := 0; len < 4; len++ { - // make a string of length `len` - _, _, err = base58.CheckDecode(testString) - if err != base58.ErrInvalidFormat { - t.Error("Checkdecode test failed, expected ErrInvalidFormat") - } - } - -} diff --git a/util/base58/doc.go b/util/base58/doc.go deleted file mode 100644 index 0d61593a9..000000000 --- a/util/base58/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Package base58 provides an API for working with modified base58 and Base58Check -encodings. - -Modified Base58 Encoding - -Standard base58 encoding is similar to standard base64 encoding except, as the -name implies, it uses a 58 character alphabet which results in an alphanumeric -string and allows some characters which are problematic for humans to be -excluded. Due to this, there can be various base58 alphabets. - -The modified base58 alphabet used by kaspa, and hence this package, omits the -0, O, I, and l characters that look the same in many fonts and are therefore -hard to humans to distinguish. - -At the time of this writing, the Base58 encoding scheme is primarily used -for kaspa private keys. -*/ -package base58 diff --git a/util/base58/example_test.go b/util/base58/example_test.go deleted file mode 100644 index 26badb89e..000000000 --- a/util/base58/example_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2014 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package base58_test - -import ( - "fmt" - "github.com/kaspanet/kaspad/util/base58" -) - -// This example demonstrates how to decode modified base58 encoded data. -func ExampleDecode() { - // Decode example modified base58 encoded data. - encoded := "25JnwSn7XKfNQ" - decoded := base58.Decode(encoded) - - // Show the decoded data. - fmt.Println("Decoded Data:", string(decoded)) - - // Output: - // Decoded Data: Test data -} - -// This example demonstrates how to encode data using the modified base58 -// encoding scheme. -func ExampleEncode() { - // Encode example data with the modified base58 encoding scheme. - data := []byte("Test data") - encoded := base58.Encode(data) - - // Show the encoded data. - fmt.Println("Encoded Data:", encoded) - - // Output: - // Encoded Data: 25JnwSn7XKfNQ -} - -// This example demonstrates how to decode Base58Check encoded data. -func ExampleCheckDecode() { - // Decode an example Base58Check encoded data. - encoded := "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa" - decoded, version, err := base58.CheckDecode(encoded) - if err != nil { - fmt.Println(err) - return - } - - // Show the decoded data. - fmt.Printf("Decoded data: %x\n", decoded) - fmt.Println("Version Byte:", version) - - // Output: - // Decoded data: 62e907b15cbf27d5425399ebf6f0fb50ebb88f18 - // Version Byte: 0 -} - -// This example demonstrates how to encode data using the Base58Check encoding -// scheme. -func ExampleCheckEncode() { - // Encode example data with the Base58Check encoding scheme. - data := []byte("Test data") - encoded := base58.CheckEncode(data, 0) - - // Show the encoded data. - fmt.Println("Encoded Data:", encoded) - - // Output: - // Encoded Data: 182iP79GRURMp7oMHDU -} diff --git a/util/base58/genalphabet.go b/util/base58/genalphabet.go deleted file mode 100644 index e8aa8bf8b..000000000 --- a/util/base58/genalphabet.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2015 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -//+build ignore - -package base58 - -import ( - "bytes" - "io" - "log" - "os" - "strconv" -) - -var ( - start = []byte(`// Copyright (c) 2015 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -// AUTOGENERATED by genalphabet.go; do not edit. - -package base58 - -const ( - // alphabet is the modified base58 alphabet used by kaspa. - alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" - - alphabetIdx0 = '1' -) - -var b58 = [256]byte{`) - - end = []byte(`}`) - - alphabet = []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") - tab = []byte("\t") - invalid = []byte("255") - comma = []byte(",") - space = []byte(" ") - nl = []byte("\n") -) - -func write(w io.Writer, b []byte) { - _, err := w.Write(b) - if err != nil { - log.Fatal(err) - } -} - -func main() { - fi, err := os.Create("alphabet.go") - if err != nil { - log.Fatal(err) - } - defer fi.Close() - - write(fi, start) - write(fi, nl) - for i := byte(0); i < 32; i++ { - write(fi, tab) - for j := byte(0); j < 8; j++ { - idx := bytes.IndexByte(alphabet, i*8+j) - if idx == -1 { - write(fi, invalid) - } else { - write(fi, strconv.AppendInt(nil, int64(idx), 10)) - } - write(fi, comma) - if j != 7 { - write(fi, space) - } - } - write(fi, nl) - } - write(fi, end) - write(fi, nl) -} diff --git a/util/bloom/filter_test.go b/util/bloom/filter_test.go index 350e2c084..6e8f4e43f 100644 --- a/util/bloom/filter_test.go +++ b/util/bloom/filter_test.go @@ -212,18 +212,10 @@ func TestFilterInsertWithTweak(t *testing.T) { // TestFilterInsertKey ensures inserting public keys and addresses works as // expected. func TestFilterInsertKey(t *testing.T) { - secret := "5Kg1gnAjaLfKiwhhPpGS3QfRg2m6awQvaj98JCZBZQ5SuS2F15C" - - wif, err := util.DecodeWIF(secret) - if err != nil { - t.Errorf("TestFilterInsertKey DecodeWIF failed: %v", err) - return - } - f := bloom.NewFilter(2, 0, 0.001, wire.BloomUpdateAll) - serializedPubKey, err := wif.SerializePubKey() + serializedPubKey, err := hex.DecodeString("045b81f0017e2091e2edcd5eecf10d5bdd120a5514cb3ee65b8447ec18bfc4575c6d5bf415e54e03b1067934a0f0ba76b01c6b9ab227142ee1d543764b69d901e0") if err != nil { - t.Errorf("TestFilterInsertKey SerializePubKey failed: %v", err) + t.Errorf("TestFilterInsertKey DecodeString failed: %v", err) return } f.Add(serializedPubKey) diff --git a/util/wif.go b/util/wif.go deleted file mode 100644 index f199d5f53..000000000 --- a/util/wif.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (c) 2013-2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package util - -import ( - "bytes" - "github.com/kaspanet/go-secp256k1" - "github.com/kaspanet/kaspad/util/base58" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/pkg/errors" -) - -// ErrMalformedPrivateKey describes an error where a WIF-encoded private -// key cannot be decoded due to being improperly formatted. This may occur -// if the byte length is incorrect or an unexpected magic number was -// encountered. -var ErrMalformedPrivateKey = errors.New("malformed private key") - -// compressMagic is the magic byte used to identify a WIF encoding for -// an address created from a compressed serialized public key. -const compressMagic byte = 0x01 - -// WIF contains the individual components described by the Wallet Import Format -// (WIF). A WIF string is typically used to represent a private key and its -// associated address in a way that may be easily copied and imported into or -// exported from wallet software. WIF strings may be decoded into this -// structure by calling DecodeWIF or created with a user-provided private key -// by calling NewWIF. -type WIF struct { - // PrivKey is the private key being imported or exported. - PrivKey *secp256k1.PrivateKey - - // CompressPubKey specifies whether the address controlled by the - // imported or exported private key was created by hashing a - // compressed (33-byte) serialized public key, rather than an - // uncompressed (65-byte) one. - CompressPubKey bool - - // netID is the kaspa network identifier byte used when - // WIF encoding the private key. - netID byte -} - -// NewWIF creates a new WIF structure to export an address and its private key -// as a string encoded in the Wallet Import Format. The compress argument -// specifies whether the address intended to be imported or exported was created -// by serializing the public key compressed rather than uncompressed. -func NewWIF(privKey *secp256k1.PrivateKey, privateKeyID byte, compress bool) (*WIF, error) { - return &WIF{privKey, compress, privateKeyID}, nil -} - -// IsForNet returns whether or not the decoded WIF structure is associated -// with the passed kaspa network. -func (w *WIF) IsForNet(privateKeyID byte) bool { - return w.netID == privateKeyID -} - -// DecodeWIF creates a new WIF structure by decoding the string encoding of -// the import format. -// -// The WIF string must be a base58-encoded string of the following byte -// sequence: -// -// * 1 byte to identify the network, must be 0x80 for mainnet or 0xef for -// either testnet or the regression test network -// * 32 bytes of a binary-encoded, big-endian, zero-padded private key -// * Optional 1 byte (equal to 0x01) if the address being imported or exported -// was created by taking the RIPEMD160 after SHA256 hash of a serialized -// compressed (33-byte) public key -// * 4 bytes of checksum, must equal the first four bytes of the double SHA256 -// of every byte before the checksum in this sequence -// -// If the base58-decoded byte sequence does not match this, DecodeWIF will -// return a non-nil error. ErrMalformedPrivateKey is returned when the WIF -// is of an impossible length or the expected compressed pubkey magic number -// does not equal the expected value of 0x01. ErrChecksumMismatch is returned -// if the expected WIF checksum does not match the calculated checksum. -func DecodeWIF(wif string) (*WIF, error) { - decoded := base58.Decode(wif) - decodedLen := len(decoded) - var compress bool - - // Length of base58 decoded WIF must be 32 bytes + an optional 1 byte - // (0x01) if compressed, plus 1 byte for netID + 4 bytes of checksum. - switch decodedLen { - case 1 + secp256k1.SerializedPrivateKeySize + 1 + 4: - if decoded[33] != compressMagic { - return nil, ErrMalformedPrivateKey - } - compress = true - case 1 + secp256k1.SerializedPrivateKeySize + 4: - compress = false - default: - return nil, ErrMalformedPrivateKey - } - - // Checksum is first four bytes of double SHA256 of the identifier byte - // and privKey. Verify this matches the final 4 bytes of the decoded - // private key. - var tosum []byte - if compress { - tosum = decoded[:1+secp256k1.SerializedPrivateKeySize+1] - } else { - tosum = decoded[:1+secp256k1.SerializedPrivateKeySize] - } - cksum := daghash.DoubleHashB(tosum)[:4] - if !bytes.Equal(cksum, decoded[decodedLen-4:]) { - return nil, ErrChecksumMismatch - } - - netID := decoded[0] - privKeyBytes := decoded[1 : 1+secp256k1.SerializedPrivateKeySize] - privKey, err := secp256k1.DeserializePrivateKeyFromSlice(privKeyBytes) - if err != nil { - return nil, err - } - return &WIF{privKey, compress, netID}, nil -} - -// String creates the Wallet Import Format string encoding of a WIF structure. -// See DecodeWIF for a detailed breakdown of the format and requirements of -// a valid WIF string. -func (w *WIF) String() string { - // Precalculate size. Maximum number of bytes before base58 encoding - // is one byte for the network, 32 bytes of private key, possibly one - // extra byte if the pubkey is to be compressed, and finally four - // bytes of checksum. - encodeLen := 1 + secp256k1.SerializedPrivateKeySize + 4 - if w.CompressPubKey { - encodeLen++ - } - - a := make([]byte, 0, encodeLen) - a = append(a, w.netID) - // Pad and append bytes manually, instead of using Serialize, to - // avoid another call to make. - a = paddedAppend(secp256k1.SerializedPrivateKeySize, a, w.PrivKey.Serialize()[:]) - if w.CompressPubKey { - a = append(a, compressMagic) - } - cksum := daghash.DoubleHashB(a)[:4] - a = append(a, cksum...) - return base58.Encode(a) -} - -// SerializePubKey serializes the associated public key of the imported or -// exported private key in either a compressed or uncompressed format. The -// serialization format chosen depends on the value of w.CompressPubKey. -func (w *WIF) SerializePubKey() ([]byte, error) { - pk, err := w.PrivKey.SchnorrPublicKey() - if err != nil { - return nil, err - } - if w.CompressPubKey { - return pk.SerializeCompressed() - } - return pk.SerializeUncompressed() -} - -// paddedAppend appends the src byte slice to dst, returning the new slice. -// If the length of the source is smaller than the passed size, leading zero -// bytes are appended to the dst slice before appending src. -func paddedAppend(size uint, dst, src []byte) []byte { - for i := 0; i < int(size)-len(src); i++ { - dst = append(dst, 0) - } - return append(dst, src...) -} diff --git a/util/wif_test.go b/util/wif_test.go deleted file mode 100644 index 17396572d..000000000 --- a/util/wif_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2013, 2014 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package util_test - -import ( - "testing" - - "github.com/kaspanet/go-secp256k1" - "github.com/kaspanet/kaspad/dagconfig" - . "github.com/kaspanet/kaspad/util" -) - -func TestEncodeDecodeWIF(t *testing.T) { - priv1, err := secp256k1.DeserializePrivateKey(&secp256k1.SerializedPrivateKey{ - 0x0c, 0x28, 0xfc, 0xa3, 0x86, 0xc7, 0xa2, 0x27, - 0x60, 0x0b, 0x2f, 0xe5, 0x0b, 0x7c, 0xae, 0x11, - 0xec, 0x86, 0xd3, 0xbf, 0x1f, 0xbe, 0x47, 0x1b, - 0xe8, 0x98, 0x27, 0xe1, 0x9d, 0x72, 0xaa, 0x1d}) - - if err != nil { - t.Fatal(err) - } - - priv2, err := secp256k1.DeserializePrivateKey(&secp256k1.SerializedPrivateKey{ - 0xdd, 0xa3, 0x5a, 0x14, 0x88, 0xfb, 0x97, 0xb6, - 0xeb, 0x3f, 0xe6, 0xe9, 0xef, 0x2a, 0x25, 0x81, - 0x4e, 0x39, 0x6f, 0xb5, 0xdc, 0x29, 0x5f, 0xe9, - 0x94, 0xb9, 0x67, 0x89, 0xb2, 0x1a, 0x03, 0x98}) - - if err != nil { - t.Fatal(err) - } - - wif1, err := NewWIF(priv1, dagconfig.MainnetParams.PrivateKeyID, false) - if err != nil { - t.Fatal(err) - } - wif2, err := NewWIF(priv2, dagconfig.TestnetParams.PrivateKeyID, true) - if err != nil { - t.Fatal(err) - } - - tests := []struct { - wif *WIF - encoded string - }{ - { - wif1, - "5HueCGU8rMjxEXxiPuD5BDku4MkFqeZyd4dZ1jvhTVqvbTLvyTJ", - }, - { - wif2, - "cV1Y7ARUr9Yx7BR55nTdnR7ZXNJphZtCCMBTEZBJe1hXt2kB684q", - }, - } - - for _, test := range tests { - // Test that encoding the WIF structure matches the expected string. - s := test.wif.String() - if s != test.encoded { - t.Errorf("TestEncodeDecodePrivateKey failed: want '%s', got '%s'", - test.encoded, s) - continue - } - - // Test that decoding the expected string results in the original WIF - // structure. - w, err := DecodeWIF(test.encoded) - if err != nil { - t.Error(err) - continue - } - if got := w.String(); got != test.encoded { - t.Errorf("NewWIF failed: want '%v', got '%v'", test.wif, got) - } - } -} From 672f02490a61a910bbff46965b64e5356b3f6571 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Thu, 28 May 2020 09:55:59 +0300 Subject: [PATCH 40/77] [NOD-763] Change genesis version (#737) --- blockdag/dag_test.go | 2 +- blockdag/ghostdag_test.go | 4 +- blockdag/testdata/blk_0_to_4.dat | Bin 2055 -> 2055 bytes blockdag/testdata/blk_3A.dat | Bin 467 -> 467 bytes blockdag/testdata/blk_3B.dat | Bin 354 -> 354 bytes blockdag/testdata/blk_3C.dat | Bin 382 -> 382 bytes blockdag/testdata/blk_3D.dat | Bin 508 -> 508 bytes blockdag/virtualblock_test.go | 2 +- dagconfig/genesis.go | 70 +++++----- dagconfig/genesis_test.go | 219 +++++++++++-------------------- 10 files changed, 113 insertions(+), 184 deletions(-) diff --git a/blockdag/dag_test.go b/blockdag/dag_test.go index 3f73fa8a7..a56d21122 100644 --- a/blockdag/dag_test.go +++ b/blockdag/dag_test.go @@ -207,7 +207,7 @@ func TestIsKnownBlock(t *testing.T) { {hash: dagconfig.SimnetParams.GenesisHash.String(), want: true}, // Block 3b should be present (as a second child of Block 2). - {hash: "48a752afbe36ad66357f751f8dee4f75665d24e18f644d83a3409b398405b46b", want: true}, + {hash: "2a697c985ab868ea95d84e6dcd7e88301296679149e73bca46eef2d0f2995944", want: true}, // Block 100000 should be present (as an orphan). {hash: "65b20b048a074793ebfd1196e49341c8d194dabfc6b44a4fd0c607406e122baf", want: true}, diff --git a/blockdag/ghostdag_test.go b/blockdag/ghostdag_test.go index be0c1b563..28260417e 100644 --- a/blockdag/ghostdag_test.go +++ b/blockdag/ghostdag_test.go @@ -33,7 +33,7 @@ func TestGHOSTDAG(t *testing.T) { }{ { k: 3, - expectedReds: []string{"F", "G", "H", "I", "N", "O"}, + expectedReds: []string{"F", "G", "H", "I", "O", "P"}, dagData: []*testBlockData{ { parents: []string{"A"}, @@ -166,7 +166,7 @@ func TestGHOSTDAG(t *testing.T) { id: "T", expectedScore: 13, expectedSelectedParent: "S", - expectedBlues: []string{"S", "P", "Q"}, + expectedBlues: []string{"S", "Q", "N"}, }, }, }, diff --git a/blockdag/testdata/blk_0_to_4.dat b/blockdag/testdata/blk_0_to_4.dat index d7d516eba16f02ca09dadb513db0fd51c4ec50db..1b4097f24c097d971264df342bd1e2d7d2fb1059 100644 GIT binary patch delta 876 zcmZn{Xcyp-{eH(*g^>XS1Saw%@-B@&7YE|}|6i}bFtKXCgLZXeLS;!amvLc-Zh9SC zbwvAO?YGNQcM40N^IRJC#N{iKIMaWo*QQ=AtMypL&*?PwOy*x&t<-u_B}<$6k~G8Q zN=65(Wyq$2j0XZnkgbe_jGz3QEoibUQ&_zu*H=S@?YZH%d6s@$Y<9Kl<+Fob+Zk_k zB)z(GD&*}$dB@y8H?5-jo|hCRK2+r~W;yP5ODSXf$xB6&wQKY?KVBJQ`TX62#Sa|x zEjp8)_IxV)_@?~1-nHp_~cE!RhqDW+48cy zTK^A$`qpojBl!UmU8nReA0va zBA7g9ezeNqbLPrFes43=hGw0*s{V&pRqkGDFJol-wzQ#8C~{l-e}~71Y?rUOw$5{# zLgOrMcMnCogJ#DKLJcnS-n*%0zXHkE;AjI95ML8=8rauWtbPhBK1;V-@3?0F?xgoQ z^We{nI~Hq}HkkkX^?Zvk|DhvxZ!vv+No3o7p;uWy%)EBI(0K2AwIyip&Fevnakb1gIV4l58yjE?FOUO`^qi+^MAR&boj!uWz$zx(d!Gh zi(OT@KJi%1L=7RII^K8c&H|qr5468aNnJO!joD?F)$-JRTzkZ~3sz*#StM@3`sYu< zx27_imC@%We`FC?Amk9B9~USTsYWHn`W&~v{UBqS`mTUqdwJfmJqVh1B-HI7!;|;M IXF>i00MJQ-h5!Hn delta 876 zcmZn{Xcyp-{eH(*g^_^)h!`gFB=X8!7L5Zk8UFvTS74r4wcp`?<{4MllIWjT{hY)6 zrSBVZZZBJLtacgp|VvhpFr zXVr_7D;XWEWRXk-*#rWNAO<5L<0t=S3!3c86jr|@tC{PQgzgR9?JZBuq6}h#tirmN z?+f2nDyV4c8*s$&deq}Kwgo>soB4hOo+*==le@z-g@!#qM{!1-f+w!^37I#Ir*PVMC^0#B>!T)OlGdex%`iq@k zTDqR+u{0H4yxJq&Zi!y2pyZGH-(JmjRg}l+YeG%~`?`wNPeGyg{(auMrVE)pCMwJy ze)b!GG~(ux*%Pi4n|&ax_y1{3Uw^$L((z%%K36f#OJUpVE-%P9V!lkrM5E|&QuaoR z#oLrtEZNqu;RAEfR*Q_8CAB|f)Z;~WEwSEQ(=(-2J5lGVvgu{tNqH~bxBN~$zQL90 zl7(68Cehsj;*N}Ord0@92=8xulFBDjU-fDGrRmRW=S`?ycR{fE+fXV;> diff --git a/blockdag/testdata/blk_3A.dat b/blockdag/testdata/blk_3A.dat index 0251c8fa23535176126a31a2aa35cdbed9f5e32d..1d4fbec3f9903e406e2377cdbdc3adbe7ea45546 100644 GIT binary patch delta 254 zcmcc2e3`jk_WK>%(~JxtAi(%jPA|^cRZGL=Mc-Bhf058>lUTF(V~m3K#x2R8AaTD^ zvHq>Ut*djetNZoP7!K!pEFnf@q3$@HZ<$hRrNo- zs&e;Idl@6sx1|k*LXq3r|2sTBWV?LLwRN7`6dGr7yZb8I9W*;`5NdFl_ufr4`xVjW z;y_OM|G!>=3B&*bCXnk0I_*CYFih@aG@ST1OJT)l>2~WK*X-Y&^gd@E{F!mbV$IS9 U^Pj(-ZxQA{bj0rM%(~JxtAi$`j+spNT*%3dJ4-&h7Grd{g5U6{n==>p@+eVzd|FV)< zT|B*iyf9W|a(sBZuU6!7tGwTXio5q+Jx|B^WN6m>5xKQ_|C%(0;=k1i{Fhp|w&io5 zE$)hLuRHfPY5#$MVR9d%;l#gL3JSgV@AK9*UC8V)QDOe@v)}lm5jU62 To^YMm>;qxF|4&bDXY>I8@k4NN diff --git a/blockdag/testdata/blk_3B.dat b/blockdag/testdata/blk_3B.dat index 43ecec766ed2aa65fde28731c1ab86f99fca6e28..8adc090c98fe2f36afebb739e6722ef1c6f5d913 100644 GIT binary patch delta 189 zcmaFF^oXfm_WK>%C`JYl5McZ%rx)k!s-@xbqHn8$zewn`Nvv7?F-Adq%C`JYl5Mb2N?dAHv?1-Pq2Z`Okncl2#2-Lk(bpDXdZ6nU!e_6?_ z73X3zo)k|oyu9(M&D7gfC)U*7n0+DPqc-Ou7H^fd*T%Ot?_ZO~Q2e(#f&Wqq*S37_ zv&CJ}?RDqghWzc=dGP<5z>H4My8dG4mzJ*Qc`QwZ7q1o!w_BpuDk%Bm{>!+lWy=ZteYJE?W;|4X9hrUH||90RR7gAO(@SSRknL8;3i%)<5paOU^e_^8vV{DRqE1`TFOy6A!|~ JKJAk-0Za>kU%CJQ delta 207 zcmV;=05Jdl0{#Mj9rxTlb^!nY0000G0xT_!3je0WPBicsyZZv{uYgf4+;Y#tJliq} zjs9w51-NR13i22&*e$n&JO|4Ik4LUH||90RR7gAOMlNSRf#c-`@>>g3xJ(r|gvw^hMkBy`#&)-3)QqoBQUOY$d3+^_8Ue`(!> z$1;)2mGX7J7zQ1z4qNo3n8j{N(#;xy-<+N~x2Y1V1i^fk**ez#gLdr9kO zkG_L?#T%aVp08kQ@DMAa&&7eH|NpO7fVhc)ff>et=tJe9nic!)!J{(a$je+!rL29L z^Up9h2D`d`jrn5dIKw{HXG$0YNHgP#&(iJIJFeNkJL!GSJoq!?j>Vd#4dy?8J>MeC zf9QzaTacMdjDi25fRW)7s}@M3_)3vm;;t{ZdhX@^B^5Ta@Oa%WgR@cI?I88I07xGb Pi53#092i0nK#}VJP7`a{XU+#Lwh|#O~irZ`L;i>fR|jf5_&x5ohnetYlV2R#jCu ztIvV#pVqv)W4-dy*^kp^HNEd!{zP;0O7E3P?ypW;@I^6x`4it)t{vwmv)&+L^^Hpp z4&`Srn7X2h#dbQ_8a#yjWzjg0^#A|$3NU5N5XZr|s63b`gpU38;8B@yCFelGzik6PtY? ztoQ$EkXcNOf&Za^k>L}o7D%J`N|9UQt}nNG?&ba^6*jZ*c-=08vr*pdAoaKaNFNi4 O780Wz7(x(0k?R2E1*Gc$ diff --git a/blockdag/virtualblock_test.go b/blockdag/virtualblock_test.go index 1ee543c6f..fbe414b1d 100644 --- a/blockdag/virtualblock_test.go +++ b/blockdag/virtualblock_test.go @@ -97,7 +97,7 @@ func TestVirtualBlock(t *testing.T) { tipsToSet: []*blockNode{}, tipsToAdd: []*blockNode{node0, node1, node2, node3, node4, node5, node6}, expectedTips: blockSetFromSlice(node2, node5, node6), - expectedSelectedParent: node6, + expectedSelectedParent: node5, }, } diff --git a/dagconfig/genesis.go b/dagconfig/genesis.go index 2aa2ddbc8..28f3b51e1 100644 --- a/dagconfig/genesis.go +++ b/dagconfig/genesis.go @@ -42,10 +42,10 @@ var genesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, genesisTxIns, genesisTxOuts, // genesisHash is the hash of the first block in the block DAG for the main // network (genesis block). var genesisHash = daghash.Hash{ - 0x9b, 0x22, 0x59, 0x44, 0x66, 0xf0, 0xbe, 0x50, - 0x7c, 0x1c, 0x8a, 0xf6, 0x06, 0x27, 0xe6, 0x33, - 0x38, 0x7e, 0xd1, 0xd5, 0x8c, 0x42, 0x59, 0x1a, - 0x31, 0xac, 0x9a, 0xa6, 0x2e, 0xd5, 0x2b, 0x0f, + 0x9c, 0xf9, 0x7d, 0xd6, 0xbc, 0x25, 0xb2, 0xb8, + 0x6c, 0xd0, 0xe1, 0x9e, 0x3a, 0x2f, 0xab, 0x3d, + 0x3e, 0x3f, 0x4d, 0x95, 0x09, 0x85, 0x8f, 0x99, + 0xc8, 0xe4, 0xc2, 0x15, 0x78, 0xac, 0x79, 0x6a, } // genesisMerkleRoot is the hash of the first transaction in the genesis block @@ -61,14 +61,14 @@ var genesisMerkleRoot = daghash.Hash{ // public transaction ledger for the main network. var genesisBlock = wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, + Version: 0x10000000, ParentHashes: []*daghash.Hash{}, HashMerkleRoot: &genesisMerkleRoot, AcceptedIDMerkleRoot: &daghash.Hash{}, UTXOCommitment: &daghash.ZeroHash, - Timestamp: time.Unix(0x5cdac4b0, 0), + Timestamp: time.Unix(0x5ece5ba4, 0), Bits: 0x207fffff, - Nonce: 0x1, + Nonce: 0, }, Transactions: []*wire.MsgTx{genesisCoinbaseTx}, } @@ -103,10 +103,10 @@ var devnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, devnetGenesisTxIns, dev // devGenesisHash is the hash of the first block in the block DAG for the development // network (genesis block). var devnetGenesisHash = daghash.Hash{ - 0x17, 0x59, 0x5c, 0x09, 0xdd, 0x1a, 0x51, 0x65, - 0x14, 0xbc, 0x19, 0xff, 0x29, 0xea, 0xf3, 0xcb, - 0xe2, 0x76, 0xf0, 0xc7, 0x86, 0xf8, 0x0c, 0x53, - 0x59, 0xbe, 0xee, 0x0c, 0x2b, 0x5d, 0x00, 0x00, + 0xd3, 0xc0, 0xf4, 0xa7, 0x91, 0xa2, 0x2e, 0x27, + 0x90, 0x38, 0x6d, 0x47, 0x7b, 0x26, 0x15, 0xaf, + 0xaf, 0xa6, 0x3a, 0xad, 0xd5, 0xfa, 0x37, 0xf3, + 0x5e, 0x70, 0xfb, 0xfc, 0x07, 0x31, 0x00, 0x00, } // devnetGenesisMerkleRoot is the hash of the first transaction in the genesis block @@ -122,14 +122,14 @@ var devnetGenesisMerkleRoot = daghash.Hash{ // public transaction ledger for the development network. var devnetGenesisBlock = wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, + Version: 0x10000000, ParentHashes: []*daghash.Hash{}, HashMerkleRoot: &devnetGenesisMerkleRoot, AcceptedIDMerkleRoot: &daghash.Hash{}, UTXOCommitment: &daghash.ZeroHash, - Timestamp: time.Unix(0x5e15e758, 0), + Timestamp: time.Unix(0x5ece5ba4, 0), Bits: 0x1e7fffff, - Nonce: 0x282ac, + Nonce: 0x227e6, }, Transactions: []*wire.MsgTx{devnetGenesisCoinbaseTx}, } @@ -164,10 +164,10 @@ var regtestGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, regtestGenesisTxIns, r // devGenesisHash is the hash of the first block in the block DAG for the development // network (genesis block). var regtestGenesisHash = daghash.Hash{ - 0xfc, 0x02, 0x19, 0x6f, 0x79, 0x7a, 0xed, 0x2d, - 0x0f, 0x31, 0xa5, 0xbd, 0x32, 0x13, 0x29, 0xc7, - 0x7c, 0x0c, 0x5c, 0x1a, 0x5b, 0x7c, 0x20, 0x68, - 0xb7, 0xc9, 0x9f, 0x61, 0x13, 0x11, 0x00, 0x00, + 0xc7, 0x7f, 0x3f, 0xb1, 0xe8, 0xf8, 0xcf, 0xa4, + 0xf5, 0x6e, 0xeb, 0x9a, 0x35, 0xd4, 0x58, 0x10, + 0xc8, 0xd6, 0x6d, 0x07, 0x76, 0x53, 0x75, 0xa2, + 0x73, 0xc0, 0x4e, 0xeb, 0xed, 0x61, 0x00, 0x00, } // regtestGenesisMerkleRoot is the hash of the first transaction in the genesis block @@ -183,14 +183,14 @@ var regtestGenesisMerkleRoot = daghash.Hash{ // public transaction ledger for the development network. var regtestGenesisBlock = wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, + Version: 0x10000000, ParentHashes: []*daghash.Hash{}, HashMerkleRoot: ®testGenesisMerkleRoot, AcceptedIDMerkleRoot: &daghash.Hash{}, UTXOCommitment: &daghash.ZeroHash, - Timestamp: time.Unix(0x5e15e2d8, 0), + Timestamp: time.Unix(0x5ece5ba4, 0), Bits: 0x1e7fffff, - Nonce: 0x15a6, + Nonce: 0x31516, }, Transactions: []*wire.MsgTx{regtestGenesisCoinbaseTx}, } @@ -224,10 +224,10 @@ var simnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, simnetGenesisTxIns, sim // simnetGenesisHash is the hash of the first block in the block DAG for // the simnet (genesis block). var simnetGenesisHash = daghash.Hash{ - 0xff, 0x69, 0xcc, 0x45, 0x45, 0x74, 0x5b, 0xf9, - 0xd5, 0x4e, 0x43, 0x56, 0x4f, 0x1b, 0xdf, 0x31, - 0x09, 0xb7, 0x76, 0xaa, 0x2a, 0x33, 0x35, 0xc9, - 0xa1, 0x80, 0xe0, 0x92, 0xbb, 0xae, 0xcd, 0x49, + 0x2b, 0x7b, 0x81, 0x60, 0x79, 0x74, 0x83, 0x0a, + 0x33, 0x71, 0x88, 0x2d, 0x67, 0x7e, 0x06, 0x7b, + 0x58, 0x87, 0xa3, 0x2b, 0xed, 0xa7, 0x65, 0xb9, + 0x13, 0x1b, 0xce, 0x49, 0xa5, 0x56, 0xe4, 0x44, } // simnetGenesisMerkleRoot is the hash of the first transaction in the genesis block @@ -243,14 +243,14 @@ var simnetGenesisMerkleRoot = daghash.Hash{ // public transaction ledger for the development network. var simnetGenesisBlock = wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, + Version: 0x10000000, ParentHashes: []*daghash.Hash{}, HashMerkleRoot: &simnetGenesisMerkleRoot, AcceptedIDMerkleRoot: &daghash.Hash{}, UTXOCommitment: &daghash.ZeroHash, - Timestamp: time.Unix(0x5e15d31c, 0), + Timestamp: time.Unix(0x5ece5ba5, 0), Bits: 0x207fffff, - Nonce: 0x3, + Nonce: 0x0, }, Transactions: []*wire.MsgTx{simnetGenesisCoinbaseTx}, } @@ -282,10 +282,10 @@ var testnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, testnetGenesisTxIns, t // testnetGenesisHash is the hash of the first block in the block DAG for the test // network (genesis block). var testnetGenesisHash = daghash.Hash{ - 0x22, 0x15, 0x34, 0xa9, 0xff, 0x10, 0xdd, 0x47, - 0xcd, 0x21, 0x11, 0x25, 0xc5, 0x6d, 0x85, 0x9a, - 0x97, 0xc8, 0x63, 0x63, 0x79, 0x40, 0x80, 0x04, - 0x74, 0xe6, 0x29, 0x7b, 0xbc, 0x08, 0x00, 0x00, + 0x6b, 0xac, 0xe2, 0xfc, 0x1d, 0x1c, 0xaf, 0x38, + 0x72, 0x0b, 0x9d, 0xf5, 0xcc, 0x2b, 0xf4, 0x6d, + 0xf4, 0x2c, 0x05, 0xf9, 0x3d, 0x94, 0xb1, 0xc6, + 0x6a, 0xea, 0x1b, 0x81, 0x4c, 0x22, 0x00, 0x00, } // testnetGenesisMerkleRoot is the hash of the first transaction in the genesis block @@ -301,14 +301,14 @@ var testnetGenesisMerkleRoot = daghash.Hash{ // public transaction ledger for testnet. var testnetGenesisBlock = wire.MsgBlock{ Header: wire.BlockHeader{ - Version: 1, + Version: 0x10000000, ParentHashes: []*daghash.Hash{}, HashMerkleRoot: &testnetGenesisMerkleRoot, AcceptedIDMerkleRoot: &daghash.ZeroHash, UTXOCommitment: &daghash.ZeroHash, - Timestamp: time.Unix(0x5e15adfe, 0), + Timestamp: time.Unix(0x5ece5ba4, 0), Bits: 0x1e7fffff, - Nonce: 0x20a1, + Nonce: 0x6d249, }, Transactions: []*wire.MsgTx{testnetGenesisCoinbaseTx}, } diff --git a/dagconfig/genesis_test.go b/dagconfig/genesis_test.go index 99e827a7d..2f810e86a 100644 --- a/dagconfig/genesis_test.go +++ b/dagconfig/genesis_test.go @@ -148,14 +148,14 @@ func TestDevnetGenesisBlock(t *testing.T) { // genesisBlockBytes are the wire encoded bytes for the genesis block of the // main network as of protocol version 1. var genesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x72, 0x10, 0x35, 0x85, 0xdd, 0xac, 0x82, 0x5c, 0x49, 0x13, 0x9f, + 0x00, 0x00, 0x00, 0x10, 0x00, 0x72, 0x10, 0x35, 0x85, 0xdd, 0xac, 0x82, 0x5c, 0x49, 0x13, 0x9f, 0xc0, 0x0e, 0x37, 0xc0, 0x45, 0x71, 0xdf, 0xd9, 0xf6, 0x36, 0xdf, 0x4c, 0x42, 0x72, 0x7b, 0x9e, 0x86, 0xdd, 0x37, 0xd2, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0xc4, 0xda, 0x5c, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x5b, 0xce, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63, @@ -171,164 +171,93 @@ var genesisBlockBytes = []byte{ // regtestGenesisBlockBytes are the wire encoded bytes for the genesis block of // the regression test network as of protocol version 1. var regtestGenesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x9f, 0x62, - 0xc9, 0x2b, 0x16, 0x17, 0xb3, 0x41, 0x6d, 0x9e, - 0x2d, 0x87, 0x93, 0xfd, 0x72, 0x77, 0x4d, 0x1d, - 0x6f, 0x6d, 0x38, 0x5b, 0xf1, 0x24, 0x1b, 0xdc, - 0x96, 0xce, 0xbf, 0xa1, 0x09, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0xe2, 0x15, - 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0x1e, 0xa6, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f, - 0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63, - 0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, - 0x32, 0xec, 0xb4, 0xf8, 0x3c, 0x7a, 0x32, 0x0f, - 0xd2, 0xe5, 0x24, 0x77, 0x89, 0x43, 0x3a, 0x78, - 0x0a, 0xda, 0x68, 0x2d, 0xf6, 0xaa, 0xb1, 0x19, - 0xdd, 0xd8, 0x97, 0x15, 0x4b, 0xcb, 0x42, 0x25, - 0x17, 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, - 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, - 0xc7, 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87, - 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x72, 0x65, + 0x00, 0x00, 0x00, 0x10, 0x00, 0x3a, 0x9f, 0x62, 0xc9, 0x2b, 0x16, 0x17, 0xb3, 0x41, 0x6d, 0x9e, + 0x2d, 0x87, 0x93, 0xfd, 0x72, 0x77, 0x4d, 0x1d, 0x6f, 0x6d, 0x38, 0x5b, 0xf1, 0x24, 0x1b, 0xdc, + 0x96, 0xce, 0xbf, 0xa1, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x5b, 0xce, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, + 0x1e, 0x16, 0x15, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63, + 0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, + 0x32, 0xec, 0xb4, 0xf8, 0x3c, 0x7a, 0x32, 0x0f, 0xd2, 0xe5, 0x24, 0x77, 0x89, 0x43, 0x3a, 0x78, + 0x0a, 0xda, 0x68, 0x2d, 0xf6, 0xaa, 0xb1, 0x19, 0xdd, 0xd8, 0x97, 0x15, 0x4b, 0xcb, 0x42, 0x25, + 0x17, 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, + 0xc7, 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x72, 0x65, 0x67, 0x74, 0x65, 0x73, 0x74, } // testnetGenesisBlockBytes are the wire encoded bytes for the genesis block of // the test network as of protocol version 1. var testnetGenesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x88, 0x05, 0xd0, - 0xe7, 0x8f, 0x41, 0x77, 0x39, 0x2c, 0xb6, 0xbb, - 0xb4, 0x19, 0xa8, 0x48, 0x4a, 0xdf, 0x77, 0xb0, - 0x82, 0xd6, 0x70, 0xd8, 0x24, 0x6a, 0x36, 0x05, - 0xaa, 0xbd, 0x7a, 0xd1, 0x62, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xad, 0x15, - 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0x1e, 0xa1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f, - 0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63, - 0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, - 0x72, 0xe6, 0x7e, 0x37, 0xa1, 0x34, 0x89, 0x23, - 0x24, 0xaf, 0xae, 0x99, 0x1f, 0x89, 0x09, 0x41, - 0x1a, 0x4d, 0x58, 0xfe, 0x5a, 0x04, 0xb0, 0x3e, - 0xeb, 0x1b, 0x5b, 0xb8, 0x65, 0xa8, 0x65, 0x0f, - 0x01, 0x00, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, - 0x74, 0x65, 0x73, 0x74, 0x6e, 0x65, 0x74, + 0x00, 0x00, 0x00, 0x10, 0x00, 0x88, 0x05, 0xd0, 0xe7, 0x8f, 0x41, 0x77, 0x39, 0x2c, 0xb6, 0xbb, + 0xb4, 0x19, 0xa8, 0x48, 0x4a, 0xdf, 0x77, 0xb0, 0x82, 0xd6, 0x70, 0xd8, 0x24, 0x6a, 0x36, 0x05, + 0xaa, 0xbd, 0x7a, 0xd1, 0x62, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x5b, 0xce, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, + 0x1e, 0x49, 0xd2, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63, + 0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, + 0x72, 0xe6, 0x7e, 0x37, 0xa1, 0x34, 0x89, 0x23, 0x24, 0xaf, 0xae, 0x99, 0x1f, 0x89, 0x09, 0x41, + 0x1a, 0x4d, 0x58, 0xfe, 0x5a, 0x04, 0xb0, 0x3e, 0xeb, 0x1b, 0x5b, 0xb8, 0x65, 0xa8, 0x65, 0x0f, + 0x01, 0x00, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x74, 0x65, 0x73, 0x74, 0x6e, 0x65, 0x74, } // simnetGenesisBlockBytes are the wire encoded bytes for the genesis block of // the simulation test network as of protocol version 1. var simnetGenesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0xb0, 0x1c, 0x3b, - 0x9e, 0x0d, 0x9a, 0xc0, 0x80, 0x0a, 0x08, 0x42, - 0x50, 0x02, 0xa3, 0xea, 0xdb, 0xed, 0xc8, 0xd0, - 0xad, 0x35, 0x03, 0xd8, 0x0e, 0x11, 0x3c, 0x7b, - 0xb2, 0xb5, 0x20, 0xe5, 0x84, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0xd3, 0x15, - 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0x20, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f, - 0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63, - 0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, - 0x48, 0xd3, 0x23, 0x9c, 0xf9, 0x88, 0x2b, 0x63, - 0xc7, 0x33, 0x0f, 0xa3, 0x64, 0xf2, 0xdb, 0x39, - 0x73, 0x5f, 0x2b, 0xa8, 0xd5, 0x7b, 0x5c, 0x31, - 0x68, 0xc9, 0x63, 0x37, 0x5c, 0xe7, 0x41, 0x24, - 0x17, 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, - 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, - 0xc7, 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87, - 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x73, 0x69, + 0x00, 0x00, 0x00, 0x10, 0x00, 0xb0, 0x1c, 0x3b, 0x9e, 0x0d, 0x9a, 0xc0, 0x80, 0x0a, 0x08, 0x42, + 0x50, 0x02, 0xa3, 0xea, 0xdb, 0xed, 0xc8, 0xd0, 0xad, 0x35, 0x03, 0xd8, 0x0e, 0x11, 0x3c, 0x7b, + 0xb2, 0xb5, 0x20, 0xe5, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xa5, 0x5b, 0xce, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63, + 0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, + 0x48, 0xd3, 0x23, 0x9c, 0xf9, 0x88, 0x2b, 0x63, 0xc7, 0x33, 0x0f, 0xa3, 0x64, 0xf2, 0xdb, 0x39, + 0x73, 0x5f, 0x2b, 0xa8, 0xd5, 0x7b, 0x5c, 0x31, 0x68, 0xc9, 0x63, 0x37, 0x5c, 0xe7, 0x41, 0x24, + 0x17, 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, + 0xc7, 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x73, 0x69, 0x6d, 0x6e, 0x65, 0x74, } // devnetGenesisBlockBytes are the wire encoded bytes for the genesis block of // the development network as of protocol version 1. var devnetGenesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x16, 0x0a, 0xc6, - 0x8b, 0x77, 0x08, 0xf4, 0x96, 0xa3, 0x07, 0x05, - 0xbc, 0x92, 0xda, 0xee, 0x73, 0x26, 0x5e, 0xd0, - 0x85, 0x78, 0xa2, 0x5d, 0x02, 0x49, 0x8a, 0x2a, - 0x22, 0xef, 0x41, 0xc9, 0xc3, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0xe7, 0x15, - 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0x1e, 0xac, 0x82, 0x02, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f, - 0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63, - 0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, - 0xc7, 0x0c, 0x02, 0x9e, 0xb2, 0x2e, 0xb3, 0xad, - 0x24, 0x10, 0xfe, 0x2c, 0xdb, 0x8e, 0x1d, 0xde, - 0x81, 0x5b, 0xbb, 0x42, 0xfe, 0xb4, 0x93, 0xd6, - 0xe3, 0xbe, 0x86, 0x02, 0xe6, 0x3a, 0x65, 0x24, - 0x17, 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, - 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, - 0xc7, 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87, - 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x64, 0x65, + 0x00, 0x00, 0x00, 0x10, 0x00, 0x16, 0x0a, 0xc6, 0x8b, 0x77, 0x08, 0xf4, 0x96, 0xa3, 0x07, 0x05, + 0xbc, 0x92, 0xda, 0xee, 0x73, 0x26, 0x5e, 0xd0, 0x85, 0x78, 0xa2, 0x5d, 0x02, 0x49, 0x8a, 0x2a, + 0x22, 0xef, 0x41, 0xc9, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x5b, 0xce, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, + 0x1e, 0xe6, 0x27, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0x0e, 0x00, 0x00, 0x0b, 0x2f, 0x50, 0x32, 0x53, 0x48, 0x2f, 0x62, 0x74, 0x63, + 0x64, 0x2f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, + 0xc7, 0x0c, 0x02, 0x9e, 0xb2, 0x2e, 0xb3, 0xad, 0x24, 0x10, 0xfe, 0x2c, 0xdb, 0x8e, 0x1d, 0xde, + 0x81, 0x5b, 0xbb, 0x42, 0xfe, 0xb4, 0x93, 0xd6, 0xe3, 0xbe, 0x86, 0x02, 0xe6, 0x3a, 0x65, 0x24, + 0x17, 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, + 0xc7, 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x64, 0x65, 0x76, 0x6e, 0x65, 0x74, } From a4c189862492119d638bac5bbaf5649a338693c1 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Sun, 31 May 2020 10:50:46 +0300 Subject: [PATCH 41/77] [NOD-1012] Disable subnetworks (#731) * [NOD-1012] Disallow non-native/coinbase transactions. * [NOD-1012] Fix logic error. * [NOD-1012] Fix/skip tests and remove --subnetwork. * [NOD-1012] Disconnect from non-native peers. * [NOD-1012] Don't skip subnetwork tests. * [NOD-1012] Use EnableNonNativeSubnetworks in peer.go. * [NOD-1012] Set EnableNonNativeSubnetworks = true in the tests that need them rather than by default in Simnet. --- blockdag/error.go | 3 +++ blockdag/external_dag_test.go | 2 ++ blockdag/validate.go | 10 ++++++++++ config/config.go | 10 ---------- dagconfig/params.go | 18 ++++++++++++++++++ mempool/mempool.go | 8 ++++++++ mempool/mempool_test.go | 1 + peer/example_test.go | 3 +++ peer/peer.go | 6 ++++++ peer/peer_test.go | 13 +++++++++---- 10 files changed, 60 insertions(+), 14 deletions(-) diff --git a/blockdag/error.go b/blockdag/error.go index cc86f7bb6..bd3e27a02 100644 --- a/blockdag/error.go +++ b/blockdag/error.go @@ -69,6 +69,9 @@ const ( // the expected value. ErrBadUTXOCommitment + // ErrInvalidSubnetwork indicates the subnetwork is now allowed. + ErrInvalidSubnetwork + // ErrFinalityPointTimeTooOld indicates a block has a timestamp before the // last finality point. ErrFinalityPointTimeTooOld diff --git a/blockdag/external_dag_test.go b/blockdag/external_dag_test.go index 64275aa1b..2910a59fa 100644 --- a/blockdag/external_dag_test.go +++ b/blockdag/external_dag_test.go @@ -186,6 +186,7 @@ func TestSubnetworkRegistry(t *testing.T) { params := dagconfig.SimnetParams params.K = 1 params.BlockCoinbaseMaturity = 0 + params.EnableNonNativeSubnetworks = true dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{ DAGParams: ¶ms, }) @@ -410,6 +411,7 @@ func TestGasLimit(t *testing.T) { params := dagconfig.SimnetParams params.K = 1 params.BlockCoinbaseMaturity = 0 + params.EnableNonNativeSubnetworks = true dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{ DAGParams: ¶ms, }) diff --git a/blockdag/validate.go b/blockdag/validate.go index a42dad058..e1321e681 100644 --- a/blockdag/validate.go +++ b/blockdag/validate.go @@ -509,6 +509,16 @@ func (dag *BlockDAG) checkBlockSanity(block *util.Block, flags BehaviorFlags) (t } } + // Disallow non-native/coinbase subnetworks in networks that don't allow them + if !dag.dagParams.EnableNonNativeSubnetworks { + for _, tx := range transactions { + if !(tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) || + tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)) { + return 0, ruleError(ErrInvalidSubnetwork, "non-native/coinbase subnetworks are not allowed") + } + } + } + // Do some preliminary checks on each transaction to ensure they are // sane before continuing. for _, tx := range transactions { diff --git a/config/config.go b/config/config.go index 0b1e58569..548a8e161 100644 --- a/config/config.go +++ b/config/config.go @@ -127,7 +127,6 @@ type Flags struct { DropAcceptanceIndex bool `long:"dropacceptanceindex" description:"Deletes the hash-based acceptance index from the database on start up and then exits."` RelayNonStd bool `long:"relaynonstd" description:"Relay non-standard transactions regardless of the default settings for the active network."` RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."` - Subnetwork string `long:"subnetwork" description:"If subnetwork ID is specified, than node will request and process only payloads from specified subnetwork. And if subnetwork ID is ommited, than payloads of all subnetworks are processed. Subnetworks with IDs 2 through 255 are reserved for future use and are currently not allowed."` ResetDatabase bool `long:"reset-db" description:"Reset database before starting node. It's needed when switching between subnetworks."` NetworkFlags } @@ -629,15 +628,6 @@ func loadConfig() (*Config, []string, error) { activeConfig.MiningAddrs = append(activeConfig.MiningAddrs, addr) } - if activeConfig.Flags.Subnetwork != "" { - activeConfig.SubnetworkID, err = subnetworkid.NewFromStr(activeConfig.Flags.Subnetwork) - if err != nil { - return nil, nil, err - } - } else { - activeConfig.SubnetworkID = nil - } - // Add default port to all listener addresses if needed and remove // duplicate addresses. activeConfig.Listeners, err = network.NormalizeAddresses(activeConfig.Listeners, diff --git a/dagconfig/params.go b/dagconfig/params.go index bdcbebf70..6ca677cbe 100644 --- a/dagconfig/params.go +++ b/dagconfig/params.go @@ -176,6 +176,9 @@ type Params struct { // Address encoding magics PrivateKeyID byte // First byte of a WIF private key + + // EnableNonNativeSubnetworks enables non-native/coinbase transactions + EnableNonNativeSubnetworks bool } // NormalizeRPCServerAddress returns addr with the current network default @@ -230,6 +233,9 @@ var MainnetParams = Params{ // Address encoding magics PrivateKeyID: 0x80, // starts with 5 (uncompressed) or K (compressed) + + // EnableNonNativeSubnetworks enables non-native/coinbase transactions + EnableNonNativeSubnetworks: false, } // RegressionNetParams defines the network parameters for the regression test @@ -280,6 +286,9 @@ var RegressionNetParams = Params{ // Address encoding magics PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed) + + // EnableNonNativeSubnetworks enables non-native/coinbase transactions + EnableNonNativeSubnetworks: false, } // TestnetParams defines the network parameters for the test Kaspa network. @@ -328,6 +337,9 @@ var TestnetParams = Params{ // Address encoding magics PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed) + + // EnableNonNativeSubnetworks enables non-native/coinbase transactions + EnableNonNativeSubnetworks: false, } // SimnetParams defines the network parameters for the simulation test Kaspa @@ -380,6 +392,9 @@ var SimnetParams = Params{ PrivateKeyID: 0x64, // starts with 4 (uncompressed) or F (compressed) // Human-readable part for Bech32 encoded addresses Prefix: util.Bech32PrefixKaspaSim, + + // EnableNonNativeSubnetworks enables non-native/coinbase transactions + EnableNonNativeSubnetworks: false, } // DevnetParams defines the network parameters for the development Kaspa network. @@ -428,6 +443,9 @@ var DevnetParams = Params{ // Address encoding magics PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed) + + // EnableNonNativeSubnetworks enables non-native/coinbase transactions + EnableNonNativeSubnetworks: false, } var ( diff --git a/mempool/mempool.go b/mempool/mempool.go index 9a3395629..72293a1dc 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -812,6 +812,14 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([] return nil, nil, txRuleError(wire.RejectInvalid, str) } + // Disallow non-native/coinbase subnetworks in networks that don't allow them + if !mp.cfg.DAGParams.EnableNonNativeSubnetworks { + if !(tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) || + tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)) { + return nil, nil, txRuleError(wire.RejectInvalid, "non-native/coinbase subnetworks are not allowed") + } + } + // Perform preliminary sanity checks on the transaction. This makes // use of blockDAG which contains the invariant rules for what // transactions are allowed into blocks. diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index 1e8ae7337..19ea66c03 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -1811,6 +1811,7 @@ var dummyBlock = wire.MsgBlock{ func TestTransactionGas(t *testing.T) { params := dagconfig.SimnetParams params.BlockCoinbaseMaturity = 0 + params.EnableNonNativeSubnetworks = true tc, spendableOuts, teardownFunc, err := newPoolHarness(t, ¶ms, 6, "TestTransactionGas") if err != nil { t.Fatalf("unable to create test pool: %v", err) diff --git a/peer/example_test.go b/peer/example_test.go index 8bcbc425a..32daad9ea 100644 --- a/peer/example_test.go +++ b/peer/example_test.go @@ -6,6 +6,7 @@ package peer_test import ( "fmt" + "github.com/kaspanet/kaspad/util/subnetworkid" "net" "time" @@ -31,6 +32,7 @@ func mockRemotePeer() error { UserAgentVersion: "1.0.0", // User agent version to advertise. DAGParams: &dagconfig.SimnetParams, SelectedTipHash: fakeSelectedTipFn, + SubnetworkID: subnetworkid.SubnetworkIDNative, } // Accept connections on the simnet port. @@ -90,6 +92,7 @@ func Example_newOutboundPeer() { }, }, SelectedTipHash: fakeSelectedTipFn, + SubnetworkID: subnetworkid.SubnetworkIDNative, } p, err := peer.NewOutboundPeer(peerCfg, "127.0.0.1:18555") if err != nil { diff --git a/peer/peer.go b/peer/peer.go index fe23d1d4a..7d4f4830e 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -897,6 +897,12 @@ func (p *Peer) handleRemoteVersionMsg(msg *wire.MsgVersion) error { return errors.New(reason) } + // Disconnect from non-native/coinbase subnetworks in networks that don't allow them + if !p.cfg.DAGParams.EnableNonNativeSubnetworks && + !msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) { + return errors.New("non-native subnetworks are not allowed") + } + // Disconnect if: // - we are a full node and the outbound connection we've initiated is a partial node // - the remote node is partial and our subnetwork doesn't match their subnetwork diff --git a/peer/peer_test.go b/peer/peer_test.go index fbc8942c0..d8ddec170 100644 --- a/peer/peer_test.go +++ b/peer/peer_test.go @@ -5,6 +5,7 @@ package peer import ( + "github.com/kaspanet/kaspad/util/subnetworkid" "io" "net" "strconv" @@ -223,6 +224,7 @@ func TestPeerConnection(t *testing.T) { ProtocolVersion: wire.ProtocolVersion, // Configure with older version Services: 0, SelectedTipHash: fakeSelectedTipFn, + SubnetworkID: subnetworkid.SubnetworkIDNative, } outPeerCfg := &Config{ Listeners: MessageListeners{ @@ -243,6 +245,7 @@ func TestPeerConnection(t *testing.T) { ProtocolVersion: wire.ProtocolVersion + 1, Services: wire.SFNodeNetwork, SelectedTipHash: fakeSelectedTipFn, + SubnetworkID: subnetworkid.SubnetworkIDNative, } wantStats1 := peerStats{ @@ -256,8 +259,8 @@ func TestPeerConnection(t *testing.T) { wantLastPingNonce: uint64(0), wantLastPingMicros: int64(0), wantTimeOffset: int64(0), - wantBytesSent: 195, // 171 version + 24 verack - wantBytesReceived: 195, + wantBytesSent: 215, // 191 version + 24 verack + wantBytesReceived: 215, } wantStats2 := peerStats{ wantUserAgent: wire.DefaultUserAgent + "peer:1.0(comment)/", @@ -270,8 +273,8 @@ func TestPeerConnection(t *testing.T) { wantLastPingNonce: uint64(0), wantLastPingMicros: int64(0), wantTimeOffset: int64(0), - wantBytesSent: 195, // 171 version + 24 verack - wantBytesReceived: 195, + wantBytesSent: 215, // 191 version + 24 verack + wantBytesReceived: 215, } tests := []struct { @@ -401,6 +404,7 @@ func TestPeerListeners(t *testing.T) { DAGParams: &dagconfig.MainnetParams, Services: wire.SFNodeBloom, SelectedTipHash: fakeSelectedTipFn, + SubnetworkID: subnetworkid.SubnetworkIDNative, } outPeerCfg := &Config{} @@ -517,6 +521,7 @@ func TestOutboundPeer(t *testing.T) { UserAgentComments: []string{"comment"}, DAGParams: &dagconfig.MainnetParams, Services: 0, + SubnetworkID: subnetworkid.SubnetworkIDNative, } _, p, err := setupPeers(peerCfg, peerCfg) From 3a22249be904a2965d9419d74e75998c85fc78fb Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Sun, 31 May 2020 14:13:30 +0300 Subject: [PATCH 42/77] [NOD-1012] Fix erroneous partial node check (#739) * [NOD-1012] Fix bad partial node check. * [NOD-1012] Fix unit tests. --- peer/example_test.go | 5 ++--- peer/peer.go | 7 +++---- peer/peer_test.go | 17 ++++++++--------- 3 files changed, 13 insertions(+), 16 deletions(-) diff --git a/peer/example_test.go b/peer/example_test.go index 32daad9ea..11ce44191 100644 --- a/peer/example_test.go +++ b/peer/example_test.go @@ -6,7 +6,6 @@ package peer_test import ( "fmt" - "github.com/kaspanet/kaspad/util/subnetworkid" "net" "time" @@ -32,7 +31,7 @@ func mockRemotePeer() error { UserAgentVersion: "1.0.0", // User agent version to advertise. DAGParams: &dagconfig.SimnetParams, SelectedTipHash: fakeSelectedTipFn, - SubnetworkID: subnetworkid.SubnetworkIDNative, + SubnetworkID: nil, } // Accept connections on the simnet port. @@ -92,7 +91,7 @@ func Example_newOutboundPeer() { }, }, SelectedTipHash: fakeSelectedTipFn, - SubnetworkID: subnetworkid.SubnetworkIDNative, + SubnetworkID: nil, } p, err := peer.NewOutboundPeer(peerCfg, "127.0.0.1:18555") if err != nil { diff --git a/peer/peer.go b/peer/peer.go index 7d4f4830e..8f9838cc7 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -897,10 +897,9 @@ func (p *Peer) handleRemoteVersionMsg(msg *wire.MsgVersion) error { return errors.New(reason) } - // Disconnect from non-native/coinbase subnetworks in networks that don't allow them - if !p.cfg.DAGParams.EnableNonNativeSubnetworks && - !msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) { - return errors.New("non-native subnetworks are not allowed") + // Disconnect from partial nodes in networks that don't allow them + if !p.cfg.DAGParams.EnableNonNativeSubnetworks && msg.SubnetworkID != nil { + return errors.New("partial nodes are not allowed") } // Disconnect if: diff --git a/peer/peer_test.go b/peer/peer_test.go index d8ddec170..a28e178ce 100644 --- a/peer/peer_test.go +++ b/peer/peer_test.go @@ -5,7 +5,6 @@ package peer import ( - "github.com/kaspanet/kaspad/util/subnetworkid" "io" "net" "strconv" @@ -224,7 +223,7 @@ func TestPeerConnection(t *testing.T) { ProtocolVersion: wire.ProtocolVersion, // Configure with older version Services: 0, SelectedTipHash: fakeSelectedTipFn, - SubnetworkID: subnetworkid.SubnetworkIDNative, + SubnetworkID: nil, } outPeerCfg := &Config{ Listeners: MessageListeners{ @@ -245,7 +244,7 @@ func TestPeerConnection(t *testing.T) { ProtocolVersion: wire.ProtocolVersion + 1, Services: wire.SFNodeNetwork, SelectedTipHash: fakeSelectedTipFn, - SubnetworkID: subnetworkid.SubnetworkIDNative, + SubnetworkID: nil, } wantStats1 := peerStats{ @@ -259,8 +258,8 @@ func TestPeerConnection(t *testing.T) { wantLastPingNonce: uint64(0), wantLastPingMicros: int64(0), wantTimeOffset: int64(0), - wantBytesSent: 215, // 191 version + 24 verack - wantBytesReceived: 215, + wantBytesSent: 195, // 171 version + 24 verack + wantBytesReceived: 195, } wantStats2 := peerStats{ wantUserAgent: wire.DefaultUserAgent + "peer:1.0(comment)/", @@ -273,8 +272,8 @@ func TestPeerConnection(t *testing.T) { wantLastPingNonce: uint64(0), wantLastPingMicros: int64(0), wantTimeOffset: int64(0), - wantBytesSent: 215, // 191 version + 24 verack - wantBytesReceived: 215, + wantBytesSent: 195, // 171 version + 24 verack + wantBytesReceived: 195, } tests := []struct { @@ -404,7 +403,7 @@ func TestPeerListeners(t *testing.T) { DAGParams: &dagconfig.MainnetParams, Services: wire.SFNodeBloom, SelectedTipHash: fakeSelectedTipFn, - SubnetworkID: subnetworkid.SubnetworkIDNative, + SubnetworkID: nil, } outPeerCfg := &Config{} @@ -521,7 +520,7 @@ func TestOutboundPeer(t *testing.T) { UserAgentComments: []string{"comment"}, DAGParams: &dagconfig.MainnetParams, Services: 0, - SubnetworkID: subnetworkid.SubnetworkIDNative, + SubnetworkID: nil, } _, p, err := setupPeers(peerCfg, peerCfg) From 2d798a5611b3a141384e5fa2cc198571449d4497 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Mon, 1 Jun 2020 14:09:18 +0300 Subject: [PATCH 43/77] [NOD-1020] Do send addr response to getaddr messages even if there aren't any addresses to send. (#740) --- peer/peer.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/peer/peer.go b/peer/peer.go index 8f9838cc7..d5288f6ae 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -755,18 +755,12 @@ func (p *Peer) localVersionMsg() (*wire.MsgVersion, error) { // addresses. This function is useful over manually sending the message via // QueueMessage since it automatically limits the addresses to the maximum // number allowed by the message and randomizes the chosen addresses when there -// are too many. It returns the addresses that were actually sent and no -// message will be sent if there are no entries in the provided addresses slice. +// are too many. It returns the addresses that were actually sent. // // This function is safe for concurrent access. func (p *Peer) PushAddrMsg(addresses []*wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) ([]*wire.NetAddress, error) { addressCount := len(addresses) - // Nothing to send. - if addressCount == 0 { - return nil, nil - } - msg := wire.NewMsgAddr(false, subnetworkID) msg.AddrList = make([]*wire.NetAddress, addressCount) copy(msg.AddrList, addresses) From 95c8b8e9d86a6f636348167476968af7818e2f0c Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Wed, 3 Jun 2020 16:04:14 +0300 Subject: [PATCH 44/77] [NOD-1023] Rename isCurrent/current to isSynced/synced (#742) * [NOD-1023] Rename BlockDAG.isCurrent to isSynced. * [NOD-1023] Rename SyncManager.current to synced. * [NOD-1023] Fix comments. --- blockdag/dag.go | 22 +++++++++++----------- blockdag/thresholdstate.go | 4 ++-- netsync/manager.go | 18 +++++++++--------- server/p2p/on_version.go | 2 +- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/blockdag/dag.go b/blockdag/dag.go index df925c962..5d0796d31 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -551,8 +551,8 @@ func (node *blockNode) validateAcceptedIDMerkleRoot(dag *BlockDAG, txsAcceptance func (dag *BlockDAG) connectBlock(node *blockNode, block *util.Block, selectedParentAnticone []*blockNode, fastAdd bool) (*chainUpdates, error) { // No warnings about unknown rules or versions until the DAG is - // current. - if dag.isCurrent() { + // synced. + if dag.isSynced() { // Warn if any unknown new rules are either about to activate or // have already been activated. if err := dag.warnUnknownRuleActivations(node); err != nil { @@ -1310,18 +1310,18 @@ func updateTipsUTXO(dag *BlockDAG, virtualUTXO UTXOSet) error { return nil } -// isCurrent returns whether or not the DAG believes it is current. Several +// isSynced returns whether or not the DAG believes it is synced. Several // factors are used to guess, but the key factors that allow the DAG to -// believe it is current are: +// believe it is synced are: // - Latest block has a timestamp newer than 24 hours ago // // This function MUST be called with the DAG state lock held (for reads). -func (dag *BlockDAG) isCurrent() bool { - // Not current if the virtual's selected parent has a timestamp +func (dag *BlockDAG) isSynced() bool { + // Not synced if the virtual's selected parent has a timestamp // before 24 hours ago. If the DAG is empty, we take the genesis // block timestamp. // - // The DAG appears to be current if none of the checks reported + // The DAG appears to be syncned if none of the checks reported // otherwise. var dagTimestamp int64 selectedTip := dag.selectedTip() @@ -1341,17 +1341,17 @@ func (dag *BlockDAG) Now() time.Time { return dag.timeSource.Now() } -// IsCurrent returns whether or not the DAG believes it is current. Several +// IsSynced returns whether or not the DAG believes it is synced. Several // factors are used to guess, but the key factors that allow the DAG to -// believe it is current are: +// believe it is synced are: // - Latest block has a timestamp newer than 24 hours ago // // This function is safe for concurrent access. -func (dag *BlockDAG) IsCurrent() bool { +func (dag *BlockDAG) IsSynced() bool { dag.dagLock.RLock() defer dag.dagLock.RUnlock() - return dag.isCurrent() + return dag.isSynced() } // selectedTip returns the current selected tip for the DAG. diff --git a/blockdag/thresholdstate.go b/blockdag/thresholdstate.go index 506dc67ee..640b1f4a5 100644 --- a/blockdag/thresholdstate.go +++ b/blockdag/thresholdstate.go @@ -336,8 +336,8 @@ func (dag *BlockDAG) initThresholdCaches() error { } // No warnings about unknown rules or versions until the DAG is - // current. - if dag.isCurrent() { + // synced. + if dag.isSynced() { // Warn if a high enough percentage of the last blocks have // unexpected versions. bestNode := dag.selectedTip() diff --git a/netsync/manager.go b/netsync/manager.go index fc43d7fbe..7d27c30ad 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -417,15 +417,15 @@ func (sm *SyncManager) handleTxMsg(tmsg *txMsg) { sm.peerNotifier.AnnounceNewTransactions(acceptedTxs) } -// current returns true if we believe we are synced with our peers, false if we +// synced returns true if we believe we are synced with our peers, false if we // still have blocks to check // -// We consider ourselves current iff both of the following are true: +// We consider ourselves synced iff both of the following are true: // 1. there's no syncPeer, a.k.a. all connected peers are at the same tip -// 2. the DAG considers itself current - to prevent attacks where a peer sends an +// 2. the DAG considers itself synced - to prevent attacks where a peer sends an // unknown tip but never lets us sync to it. -func (sm *SyncManager) current() bool { - return sm.syncPeer == nil && sm.dag.IsCurrent() +func (sm *SyncManager) synced() bool { + return sm.syncPeer == nil && sm.dag.IsSynced() } // restartSyncIfNeeded finds a new sync candidate if we're not expecting any @@ -754,7 +754,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { log.Errorf("Failed to send invs from queue: %s", err) } - if haveUnknownInvBlock && !sm.current() { + if haveUnknownInvBlock && !sm.synced() { // If one of the inv messages is an unknown block // it is an indication that one of our peers has more // up-to-date data than us. @@ -1017,9 +1017,9 @@ func (sm *SyncManager) handleBlockDAGNotification(notification *blockdag.Notific } }) - // Relay if we are current and the block was not just now unorphaned. - // Otherwise peers that are current should already know about it - if sm.current() && !data.WasUnorphaned { + // Relay if we are synced and the block was not just now unorphaned. + // Otherwise peers that are synced should already know about it + if sm.synced() && !data.WasUnorphaned { iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash()) sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header) } diff --git a/server/p2p/on_version.go b/server/p2p/on_version.go index 0a553e3d0..ff0a18ec1 100644 --- a/server/p2p/on_version.go +++ b/server/p2p/on_version.go @@ -27,7 +27,7 @@ func (sp *Peer) OnVersion(_ *peer.Peer, msg *wire.MsgVersion) { if !sp.Inbound() { // TODO(davec): Only do this if not doing the initial block // download and the local address is routable. - if !config.ActiveConfig().DisableListen /* && isCurrent? */ { + if !config.ActiveConfig().DisableListen { // Get address that best matches. lna := addrManager.GetBestLocalAddress(sp.NA()) if addrmgr.IsRoutable(lna) { From d15c009b3c4ab434ebb98587b6c69bbb6cea6b82 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Thu, 4 Jun 2020 15:11:05 +0300 Subject: [PATCH 45/77] [NOD-1030] Disconnect from syncPeers that send orphan blocks (#744) * [NOD-1030] Disconnect from syncPeers that send orphan blocks. * [NOD-1030] Remove debug log. * [NOD-1030] Remove unnecessary call to stopSyncFromPeer. --- netsync/manager.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/netsync/manager.go b/netsync/manager.go index fc43d7fbe..31923cc62 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -529,8 +529,17 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { return } - // Request the parents for the orphan block from the peer that sent it. if isOrphan { + // If we received an orphan block from the sync peer, it is + // misbehaving and must be disconnected from. + if peer == sm.syncPeer { + log.Errorf("Received an orphan block %s from sync peer %s. Disconnecting...", + blockHash, peer) + peer.Disconnect() + return + } + + // Request the parents for the orphan block from the peer that sent it. missingAncestors, err := sm.dag.GetOrphanMissingAncestorHashes(blockHash) if err != nil { log.Errorf("Failed to find missing ancestors for block %s: %s", From 96930bd6eaca40ff5a47d8bfe8e1c3a078d76057 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Sun, 7 Jun 2020 09:19:28 +0300 Subject: [PATCH 46/77] [NOD-1039] Remove the call to SetGCPercent. (#745) --- kaspad.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/kaspad.go b/kaspad.go index 7293f0036..5d33de171 100644 --- a/kaspad.go +++ b/kaspad.go @@ -10,7 +10,6 @@ import ( "os" "path/filepath" "runtime" - "runtime/debug" "runtime/pprof" "strings" "time" @@ -265,12 +264,6 @@ func main() { // Use all processor cores. runtime.GOMAXPROCS(runtime.NumCPU()) - // Block and transaction processing can cause bursty allocations. This - // limits the garbage collector from excessively overallocating during - // bursts. This value was arrived at with the help of profiling live - // usage. - debug.SetGCPercent(10) - // Up some limits. if err := limits.SetLimits(); err != nil { fmt.Fprintf(os.Stderr, "failed to set limits: %s\n", err) From 35c733a4c1881873118c84adc823f2ec80ce5e28 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Sun, 7 Jun 2020 16:31:17 +0300 Subject: [PATCH 47/77] [NOD-970] Add isSyncing flag (#747) * [NOD-970] Add isSyncing flag * [NOD-970] Rename shouldSendSelectedTip->peerShouldSendSelectedTip --- netsync/manager.go | 56 +++++++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index 31923cc62..df11785b1 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -132,13 +132,13 @@ type requestQueueAndSet struct { // peerSyncState stores additional information that the SyncManager tracks // about a peer. type peerSyncState struct { - syncCandidate bool - lastSelectedTipRequest time.Time - isPendingForSelectedTip bool - requestQueueMtx sync.Mutex - requestQueues map[wire.InvType]*requestQueueAndSet - requestedTxns map[daghash.TxID]struct{} - requestedBlocks map[daghash.Hash]struct{} + syncCandidate bool + lastSelectedTipRequest time.Time + peerShouldSendSelectedTip bool + requestQueueMtx sync.Mutex + requestQueues map[wire.InvType]*requestQueueAndSet + requestedTxns map[daghash.TxID]struct{} + requestedBlocks map[daghash.Hash]struct{} } // SyncManager is used to communicate block related messages with peers. The @@ -158,6 +158,7 @@ type SyncManager struct { wg sync.WaitGroup quit chan struct{} syncPeerLock sync.Mutex + isSyncing bool // These fields should only be accessed from the messageHandler thread rejectedTxns map[daghash.TxID]struct{} @@ -206,13 +207,21 @@ func (sm *SyncManager) startSync() { syncPeer.SelectedTipHash(), syncPeer.Addr()) syncPeer.PushGetBlockLocatorMsg(syncPeer.SelectedTipHash(), sm.dagParams.GenesisHash) + sm.isSyncing = true sm.syncPeer = syncPeer return } + pendingForSelectedTips := false + if sm.shouldQueryPeerSelectedTips() { + sm.isSyncing = true hasSyncCandidates := false for peer, state := range sm.peerStates { + if state.peerShouldSendSelectedTip { + pendingForSelectedTips = true + continue + } if !state.syncCandidate { continue } @@ -222,21 +231,26 @@ func (sm *SyncManager) startSync() { continue } - queueMsgGetSelectedTip(peer, state) + sm.queueMsgGetSelectedTip(peer, state) + pendingForSelectedTips = true } if !hasSyncCandidates { log.Warnf("No sync peer candidates available") } } + + if !pendingForSelectedTips { + sm.isSyncing = false + } } func (sm *SyncManager) shouldQueryPeerSelectedTips() bool { return sm.dag.Now().Sub(sm.dag.CalcPastMedianTime()) > minDAGTimeDelay } -func queueMsgGetSelectedTip(peer *peerpkg.Peer, state *peerSyncState) { +func (sm *SyncManager) queueMsgGetSelectedTip(peer *peerpkg.Peer, state *peerSyncState) { state.lastSelectedTipRequest = time.Now() - state.isPendingForSelectedTip = true + state.peerShouldSendSelectedTip = true peer.QueueMessage(wire.NewMsgGetSelectedTip(), nil) } @@ -417,17 +431,6 @@ func (sm *SyncManager) handleTxMsg(tmsg *txMsg) { sm.peerNotifier.AnnounceNewTransactions(acceptedTxs) } -// current returns true if we believe we are synced with our peers, false if we -// still have blocks to check -// -// We consider ourselves current iff both of the following are true: -// 1. there's no syncPeer, a.k.a. all connected peers are at the same tip -// 2. the DAG considers itself current - to prevent attacks where a peer sends an -// unknown tip but never lets us sync to it. -func (sm *SyncManager) current() bool { - return sm.syncPeer == nil && sm.dag.IsCurrent() -} - // restartSyncIfNeeded finds a new sync candidate if we're not expecting any // blocks from the current one. func (sm *SyncManager) restartSyncIfNeeded() { @@ -763,7 +766,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { log.Errorf("Failed to send invs from queue: %s", err) } - if haveUnknownInvBlock && !sm.current() { + if haveUnknownInvBlock && !sm.isSyncing { // If one of the inv messages is an unknown block // it is an indication that one of our peers has more // up-to-date data than us. @@ -848,7 +851,8 @@ func (sm *SyncManager) sendInvsFromRequestQueue(peer *peerpkg.Peer, state *peerS if err != nil { return err } - if sm.syncPeer == nil || sm.isSynced() { + if !sm.isSyncing || sm.isSynced() { + log.Criticalf("wtf? sm.isSyncing: %t sm.isSynced: %t", sm.isSyncing, sm.isSynced()) err := sm.addInvsToGetDataMessageFromQueue(gdmsg, state, wire.InvTypeBlock, wire.MaxInvPerGetDataMsg) if err != nil { return err @@ -918,12 +922,12 @@ func (sm *SyncManager) handleSelectedTipMsg(msg *selectedTipMsg) { peer := msg.peer selectedTipHash := msg.selectedTipHash state := sm.peerStates[peer] - if !state.isPendingForSelectedTip { + if !state.peerShouldSendSelectedTip { log.Warnf("Got unrequested selected tip message from %s -- "+ "disconnecting", peer.Addr()) peer.Disconnect() } - state.isPendingForSelectedTip = false + state.peerShouldSendSelectedTip = false if selectedTipHash.IsEqual(peer.SelectedTipHash()) { return } @@ -1028,7 +1032,7 @@ func (sm *SyncManager) handleBlockDAGNotification(notification *blockdag.Notific // Relay if we are current and the block was not just now unorphaned. // Otherwise peers that are current should already know about it - if sm.current() && !data.WasUnorphaned { + if sm.isSynced() && !data.WasUnorphaned { iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash()) sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header) } From 9c78a797e484b1b3bc0e9aadd9b1421702a4648a Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Sun, 7 Jun 2020 16:35:48 +0300 Subject: [PATCH 48/77] [NOD-1041] Call outboundPeerConnected and outboundPeerConnectionFailed directly instead of routing them through peerHandler (#748) * [NOD-1041] Fix a deadlock between connHandler and peerHandler. * [NOD-1041] Simplified the fix. --- server/p2p/p2p.go | 120 ++++++++++++++++++---------------------------- 1 file changed, 46 insertions(+), 74 deletions(-) diff --git a/server/p2p/p2p.go b/server/p2p/p2p.go index 4bf5cb03e..8b1dacdf6 100644 --- a/server/p2p/p2p.go +++ b/server/p2p/p2p.go @@ -109,15 +109,6 @@ type relayMsg struct { data interface{} } -type outboundPeerConnectedMsg struct { - connReq *connmgr.ConnReq - conn net.Conn -} - -type outboundPeerConnectionFailedMsg struct { - connReq *connmgr.ConnReq -} - // Peer extends the peer to maintain state shared by the server and // the blockmanager. type Peer struct { @@ -229,19 +220,17 @@ type Server struct { DAG *blockdag.BlockDAG TxMemPool *mempool.TxPool - modifyRebroadcastInv chan interface{} - newPeers chan *Peer - donePeers chan *Peer - banPeers chan *Peer - newOutboundConnection chan *outboundPeerConnectedMsg - newOutboundConnectionFailed chan *outboundPeerConnectionFailedMsg - Query chan interface{} - relayInv chan relayMsg - broadcast chan broadcastMsg - wg sync.WaitGroup - nat serverutils.NAT - TimeSource blockdag.TimeSource - services wire.ServiceFlag + modifyRebroadcastInv chan interface{} + newPeers chan *Peer + donePeers chan *Peer + banPeers chan *Peer + Query chan interface{} + relayInv chan relayMsg + broadcast chan broadcastMsg + wg sync.WaitGroup + nat serverutils.NAT + TimeSource blockdag.TimeSource + services wire.ServiceFlag // We add to quitWaitGroup before every instance in which we wait for // the quit channel so that all those instances finish before we shut @@ -977,17 +966,17 @@ func (s *Server) inboundPeerConnected(conn net.Conn) { // peer instance, associates it with the relevant state such as the connection // request instance and the connection itself, and finally notifies the address // manager of the attempt. -func (s *Server) outboundPeerConnected(state *peerState, msg *outboundPeerConnectedMsg) { - sp := newServerPeer(s, msg.connReq.Permanent) - outboundPeer, err := peer.NewOutboundPeer(newPeerConfig(sp), msg.connReq.Addr.String()) +func (s *Server) outboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) { + sp := newServerPeer(s, connReq.Permanent) + outboundPeer, err := peer.NewOutboundPeer(newPeerConfig(sp), connReq.Addr.String()) if err != nil { - srvrLog.Debugf("Cannot create outbound peer %s: %s", msg.connReq.Addr, err) - s.connManager.Disconnect(msg.connReq.ID()) + srvrLog.Debugf("Cannot create outbound peer %s: %s", connReq.Addr, err) + s.connManager.Disconnect(connReq.ID()) } sp.Peer = outboundPeer - sp.connReq = msg.connReq + sp.connReq = connReq - s.peerConnected(sp, msg.conn) + s.peerConnected(sp, conn) s.addrManager.Attempt(sp.NA()) } @@ -1012,20 +1001,20 @@ func (s *Server) peerConnected(sp *Peer, conn net.Conn) { // outboundPeerConnected is invoked by the connection manager when a new // outbound connection failed to be established. -func (s *Server) outboundPeerConnectionFailed(msg *outboundPeerConnectionFailedMsg) { +func (s *Server) outboundPeerConnectionFailed(connReq *connmgr.ConnReq) { // If the connection request has no address // associated to it, do nothing. - if msg.connReq.Addr == nil { + if connReq.Addr == nil { return } - host, portStr, err := net.SplitHostPort(msg.connReq.Addr.String()) + host, portStr, err := net.SplitHostPort(connReq.Addr.String()) if err != nil { - srvrLog.Debugf("Cannot extract address host and port %s: %s", msg.connReq.Addr, err) + srvrLog.Debugf("Cannot extract address host and port %s: %s", connReq.Addr, err) } port, err := strconv.ParseUint(portStr, 10, 16) if err != nil { - srvrLog.Debugf("Cannot parse port %s: %s", msg.connReq.Addr, err) + srvrLog.Debugf("Cannot parse port %s: %s", connReq.Addr, err) } // defaultServices is used here because Attempt makes no use @@ -1137,12 +1126,6 @@ out: }) s.quitWaitGroup.Done() break out - - case opcMsg := <-s.newOutboundConnection: - s.outboundPeerConnected(state, opcMsg) - - case opcfMsg := <-s.newOutboundConnectionFailed: - s.outboundPeerConnectionFailed(opcfMsg) } } @@ -1497,23 +1480,21 @@ func NewServer(listenAddrs []string, dagParams *dagconfig.Params, interrupt <-ch maxPeers := config.ActiveConfig().TargetOutboundPeers + config.ActiveConfig().MaxInboundPeers s := Server{ - DAGParams: dagParams, - addrManager: amgr, - newPeers: make(chan *Peer, maxPeers), - donePeers: make(chan *Peer, maxPeers), - banPeers: make(chan *Peer, maxPeers), - Query: make(chan interface{}), - relayInv: make(chan relayMsg, maxPeers), - broadcast: make(chan broadcastMsg, maxPeers), - quit: make(chan struct{}), - modifyRebroadcastInv: make(chan interface{}), - newOutboundConnection: make(chan *outboundPeerConnectedMsg, config.ActiveConfig().TargetOutboundPeers), - newOutboundConnectionFailed: make(chan *outboundPeerConnectionFailedMsg, config.ActiveConfig().TargetOutboundPeers), - nat: nat, - TimeSource: blockdag.NewTimeSource(), - services: services, - SigCache: txscript.NewSigCache(config.ActiveConfig().SigCacheMaxSize), - notifyNewTransactions: notifyNewTransactions, + DAGParams: dagParams, + addrManager: amgr, + newPeers: make(chan *Peer, maxPeers), + donePeers: make(chan *Peer, maxPeers), + banPeers: make(chan *Peer, maxPeers), + Query: make(chan interface{}), + relayInv: make(chan relayMsg, maxPeers), + broadcast: make(chan broadcastMsg, maxPeers), + quit: make(chan struct{}), + modifyRebroadcastInv: make(chan interface{}), + nat: nat, + TimeSource: blockdag.NewTimeSource(), + services: services, + SigCache: txscript.NewSigCache(config.ActiveConfig().SigCacheMaxSize), + notifyNewTransactions: notifyNewTransactions, } // Create indexes if needed. @@ -1576,23 +1557,14 @@ func NewServer(listenAddrs []string, dagParams *dagconfig.Params, interrupt <-ch // Create a connection manager. cmgr, err := connmgr.New(&connmgr.Config{ - Listeners: listeners, - OnAccept: s.inboundPeerConnected, - RetryDuration: connectionRetryInterval, - TargetOutbound: uint32(config.ActiveConfig().TargetOutboundPeers), - Dial: serverutils.KaspadDial, - OnConnection: func(c *connmgr.ConnReq, conn net.Conn) { - s.newOutboundConnection <- &outboundPeerConnectedMsg{ - connReq: c, - conn: conn, - } - }, - OnConnectionFailed: func(c *connmgr.ConnReq) { - s.newOutboundConnectionFailed <- &outboundPeerConnectionFailedMsg{ - connReq: c, - } - }, - AddrManager: s.addrManager, + Listeners: listeners, + OnAccept: s.inboundPeerConnected, + RetryDuration: connectionRetryInterval, + TargetOutbound: uint32(config.ActiveConfig().TargetOutboundPeers), + Dial: serverutils.KaspadDial, + OnConnection: s.outboundPeerConnected, + OnConnectionFailed: s.outboundPeerConnectionFailed, + AddrManager: s.addrManager, }) if err != nil { return nil, err From b4dba782fba79063c9893a3853d1d26cefdb8ffe Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Sun, 7 Jun 2020 17:50:57 +0300 Subject: [PATCH 49/77] [NOD-1040] Increase maxBlueScoreDifferenceToKeepLoaded to 1500 (#746) * [NOD-1040] Don't remove DAG tips from the diffStore's loaded set * [NOD-1040] Fix TestClearOldEntries. * Revert "[NOD-1040] Fix TestClearOldEntries." This reverts commit e0705814 * Revert "[NOD-1040] Don't remove DAG tips from the diffStore's loaded set" This reverts commit d3eba1c1 * [NOD-1040] Increase maxBlueScoreDifferenceToKeepLoaded to 1500. --- blockdag/utxodiffstore.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockdag/utxodiffstore.go b/blockdag/utxodiffstore.go index 1fa88168f..dc910b9b6 100644 --- a/blockdag/utxodiffstore.go +++ b/blockdag/utxodiffstore.go @@ -153,7 +153,7 @@ func (diffStore *utxoDiffStore) clearDirtyEntries() { // maxBlueScoreDifferenceToKeepLoaded is the maximum difference // between the virtual's blueScore and a blockNode's blueScore // under which to keep diff data loaded in memory. -var maxBlueScoreDifferenceToKeepLoaded uint64 = 100 +var maxBlueScoreDifferenceToKeepLoaded uint64 = 1500 // clearOldEntries removes entries whose blue score is lower than // virtual.blueScore - maxBlueScoreDifferenceToKeepLoaded. From 4a50d94633a52a5b87bce0ae08d751b45fff00c4 Mon Sep 17 00:00:00 2001 From: Mike Zak Date: Sun, 7 Jun 2020 17:54:30 +0300 Subject: [PATCH 50/77] Update to v0.4.1 --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index ee8407794..bac602838 100644 --- a/version/version.go +++ b/version/version.go @@ -11,7 +11,7 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs const ( appMajor uint = 0 appMinor uint = 4 - appPatch uint = 0 + appPatch uint = 1 ) // appBuild is defined as a variable so it can be overridden during the build From 222477b33e813b8f59cfd882db52087971c433ed Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Mon, 8 Jun 2020 12:14:58 +0300 Subject: [PATCH 51/77] [NOD-1040] Don't remove DAG tips from the diffStore's loaded set (#750) * [NOD-1040] Don't remove DAG tips from the diffStore's loaded set * [NOD-1040] Remove a debug log. --- blockdag/utxodiffstore.go | 10 +++++++--- blockdag/utxodiffstore_test.go | 7 ++++--- netsync/manager.go | 1 - 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/blockdag/utxodiffstore.go b/blockdag/utxodiffstore.go index dc910b9b6..ee1652ebf 100644 --- a/blockdag/utxodiffstore.go +++ b/blockdag/utxodiffstore.go @@ -153,10 +153,12 @@ func (diffStore *utxoDiffStore) clearDirtyEntries() { // maxBlueScoreDifferenceToKeepLoaded is the maximum difference // between the virtual's blueScore and a blockNode's blueScore // under which to keep diff data loaded in memory. -var maxBlueScoreDifferenceToKeepLoaded uint64 = 1500 +var maxBlueScoreDifferenceToKeepLoaded uint64 = 100 // clearOldEntries removes entries whose blue score is lower than -// virtual.blueScore - maxBlueScoreDifferenceToKeepLoaded. +// virtual.blueScore - maxBlueScoreDifferenceToKeepLoaded. Note +// that tips are not removed either even if their blue score is +// lower than the above. func (diffStore *utxoDiffStore) clearOldEntries() { virtualBlueScore := diffStore.dag.VirtualBlueScore() minBlueScore := virtualBlueScore - maxBlueScoreDifferenceToKeepLoaded @@ -164,9 +166,11 @@ func (diffStore *utxoDiffStore) clearOldEntries() { minBlueScore = 0 } + tips := diffStore.dag.virtual.tips() + toRemove := make(map[*blockNode]struct{}) for node := range diffStore.loaded { - if node.blueScore < minBlueScore { + if node.blueScore < minBlueScore && !tips.contains(node) { toRemove[node] = struct{}{} } } diff --git a/blockdag/utxodiffstore_test.go b/blockdag/utxodiffstore_test.go index d7c6d89d7..b71bbf3e4 100644 --- a/blockdag/utxodiffstore_test.go +++ b/blockdag/utxodiffstore_test.go @@ -149,10 +149,11 @@ func TestClearOldEntries(t *testing.T) { t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash()) } - // Make sure that the child-of-genesis node isn't in the loaded set + // Make sure that the child-of-genesis node is in the loaded set, since it + // is a tip. _, ok := dag.utxoDiffStore.loaded[node] - if ok { - t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash) + if !ok { + t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash) } // Make sure that all the old nodes still do not exist in the loaded set diff --git a/netsync/manager.go b/netsync/manager.go index df11785b1..0ab7c4e50 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -852,7 +852,6 @@ func (sm *SyncManager) sendInvsFromRequestQueue(peer *peerpkg.Peer, state *peerS return err } if !sm.isSyncing || sm.isSynced() { - log.Criticalf("wtf? sm.isSyncing: %t sm.isSynced: %t", sm.isSyncing, sm.isSynced()) err := sm.addInvsToGetDataMessageFromQueue(gdmsg, state, wire.InvTypeBlock, wire.MaxInvPerGetDataMsg) if err != nil { return err From b6a6e577c4a23ea43be6e35c0f236a8fbac10044 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Tue, 9 Jun 2020 12:12:18 +0300 Subject: [PATCH 52/77] [NOD-1013] Don't block handleBlockDAGNotification when calling peerNotifier (#749) * [NOD-1013] Don't block handleBlockDAGNotification when calling peerNotifier * [NOD-1013] Add comment --- netsync/manager.go | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index 0ab7c4e50..2a3227c98 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -1029,17 +1029,22 @@ func (sm *SyncManager) handleBlockDAGNotification(notification *blockdag.Notific } }) - // Relay if we are current and the block was not just now unorphaned. - // Otherwise peers that are current should already know about it - if sm.isSynced() && !data.WasUnorphaned { - iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash()) - sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header) - } + // sm.peerNotifier sends messages to the rebroadcastHandler, so we call + // it in its own goroutine so it won't block dag.ProcessBlock in case + // rebroadcastHandler channel is full. + spawn(func() { + // Relay if we are current and the block was not just now unorphaned. + // Otherwise peers that are current should already know about it + if sm.isSynced() && !data.WasUnorphaned { + iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash()) + sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header) + } - for msg := range ch { - sm.peerNotifier.TransactionConfirmed(msg.Tx) - sm.peerNotifier.AnnounceNewTransactions(msg.AcceptedTxs) - } + for msg := range ch { + sm.peerNotifier.TransactionConfirmed(msg.Tx) + sm.peerNotifier.AnnounceNewTransactions(msg.AcceptedTxs) + } + }) } } From 20da1b9c9ac79ec84b260647f6b3de57e0412aff Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Wed, 10 Jun 2020 16:05:02 +0300 Subject: [PATCH 53/77] [NOD-1048] Make leveldb compaction much less frequent (#756) * [NOD-1048] Make leveldb compaction much less frequent. Also, allocate an entire gigabyte for leveldb's blockCache and writeBuffer. * [NOD-1048] Implement changing the options for testing purposes. * [NOD-1048] Rename originalOptions to originalLDBOptions. * [NOD-1048] Add a comment. --- blockdag/test_utils.go | 12 ++++++++++++ database/ffldb/ldb/leveldb.go | 2 +- database/ffldb/ldb/options.go | 19 +++++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 database/ffldb/ldb/options.go diff --git a/blockdag/test_utils.go b/blockdag/test_utils.go index 012194d97..d9e62730b 100644 --- a/blockdag/test_utils.go +++ b/blockdag/test_utils.go @@ -5,9 +5,11 @@ package blockdag import ( "compress/bzip2" "encoding/binary" + "github.com/kaspanet/kaspad/database/ffldb/ldb" "github.com/kaspanet/kaspad/dbaccess" "github.com/kaspanet/kaspad/util" "github.com/pkg/errors" + "github.com/syndtr/goleveldb/leveldb/opt" "io" "io/ioutil" "os" @@ -62,6 +64,15 @@ func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), err return nil, nil, errors.Errorf("error creating temp dir: %s", err) } + // We set ldb.Options here to return nil because normally + // the database is initialized with very large caches that + // can make opening/closing the database for every test + // quite heavy. + originalLDBOptions := ldb.Options + ldb.Options = func() *opt.Options { + return nil + } + dbPath := filepath.Join(tmpDir, dbName) _ = os.RemoveAll(dbPath) err = dbaccess.Open(dbPath) @@ -75,6 +86,7 @@ func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), err spawnWaitGroup.Wait() spawn = realSpawn dbaccess.Close() + ldb.Options = originalLDBOptions os.RemoveAll(dbPath) } } else { diff --git a/database/ffldb/ldb/leveldb.go b/database/ffldb/ldb/leveldb.go index 0b3f08e51..27fab5600 100644 --- a/database/ffldb/ldb/leveldb.go +++ b/database/ffldb/ldb/leveldb.go @@ -15,7 +15,7 @@ type LevelDB struct { // NewLevelDB opens a leveldb instance defined by the given path. func NewLevelDB(path string) (*LevelDB, error) { // Open leveldb. If it doesn't exist, create it. - ldb, err := leveldb.OpenFile(path, nil) + ldb, err := leveldb.OpenFile(path, Options()) // If the database is corrupted, attempt to recover. if _, corrupted := err.(*ldbErrors.ErrCorrupted); corrupted { diff --git a/database/ffldb/ldb/options.go b/database/ffldb/ldb/options.go new file mode 100644 index 000000000..604f0e417 --- /dev/null +++ b/database/ffldb/ldb/options.go @@ -0,0 +1,19 @@ +package ldb + +import "github.com/syndtr/goleveldb/leveldb/opt" + +var ( + defaultOptions = opt.Options{ + Compression: opt.NoCompression, + BlockCacheCapacity: 512 * opt.MiB, + WriteBuffer: 512 * opt.MiB, + IteratorSamplingRate: 512 * opt.MiB, + } + + // Options is a function that returns a leveldb + // opt.Options struct for opening a database. + // It's defined as a variable for the sake of testing. + Options = func() *opt.Options { + return &defaultOptions + } +) From 8bbced5925091e7a59252666b40b59c68a80e93f Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Wed, 10 Jun 2020 16:05:48 +0300 Subject: [PATCH 54/77] [NOD-1051] Don't disconnect from sync peer if it sends an orphan (#757) --- netsync/manager.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index 0ab7c4e50..253fe9d55 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -533,15 +533,6 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { } if isOrphan { - // If we received an orphan block from the sync peer, it is - // misbehaving and must be disconnected from. - if peer == sm.syncPeer { - log.Errorf("Received an orphan block %s from sync peer %s. Disconnecting...", - blockHash, peer) - peer.Disconnect() - return - } - // Request the parents for the orphan block from the peer that sent it. missingAncestors, err := sm.dag.GetOrphanMissingAncestorHashes(blockHash) if err != nil { From d6d34238d207e79239c047289039a8b0d1dde4e4 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Wed, 10 Jun 2020 16:13:13 +0300 Subject: [PATCH 55/77] [NOD-1049] Allow empty addr messages (#753) --- server/p2p/on_addr.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/server/p2p/on_addr.go b/server/p2p/on_addr.go index ce663a203..e80e5cf12 100644 --- a/server/p2p/on_addr.go +++ b/server/p2p/on_addr.go @@ -18,14 +18,6 @@ func (sp *Peer) OnAddr(_ *peer.Peer, msg *wire.MsgAddr) { return } - // A message that has no addresses is invalid. - if len(msg.AddrList) == 0 { - peerLog.Errorf("Command [%s] from %s does not contain any addresses", - msg.Command(), sp.Peer) - sp.Disconnect() - return - } - if msg.IncludeAllSubnetworks { peerLog.Errorf("Got unexpected IncludeAllSubnetworks=true in [%s] command from %s", msg.Command(), sp.Peer) From 3e5a840c5af05426b8b96911cba0c8bebf14f9ee Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Thu, 11 Jun 2020 11:56:25 +0300 Subject: [PATCH 56/77] [NOD-1052] Add a lock around clearOldEntries to protect against concurrent access of utxoDiffStore.loaded. (#758) --- blockdag/utxodiffstore.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/blockdag/utxodiffstore.go b/blockdag/utxodiffstore.go index ee1652ebf..46899f5e1 100644 --- a/blockdag/utxodiffstore.go +++ b/blockdag/utxodiffstore.go @@ -160,6 +160,9 @@ var maxBlueScoreDifferenceToKeepLoaded uint64 = 100 // that tips are not removed either even if their blue score is // lower than the above. func (diffStore *utxoDiffStore) clearOldEntries() { + diffStore.mtx.HighPriorityWriteLock() + defer diffStore.mtx.HighPriorityWriteUnlock() + virtualBlueScore := diffStore.dag.VirtualBlueScore() minBlueScore := virtualBlueScore - maxBlueScoreDifferenceToKeepLoaded if maxBlueScoreDifferenceToKeepLoaded > virtualBlueScore { From b0d4a92e47d85ddb2d7ec47e6e2e8c24003f1ccc Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Thu, 11 Jun 2020 12:19:49 +0300 Subject: [PATCH 57/77] [NOD-1046] Delete redundant conversion from rule error (#755) --- blockdag/dag.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/blockdag/dag.go b/blockdag/dag.go index 5d0796d31..2b24d5ef4 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -577,10 +577,6 @@ func (dag *BlockDAG) connectBlock(node *blockNode, newBlockPastUTXO, txsAcceptanceData, newBlockFeeData, newBlockMultiSet, err := node.verifyAndBuildUTXO(dag, block.Transactions(), fastAdd) if err != nil { - var ruleErr RuleError - if ok := errors.As(err, &ruleErr); ok { - return nil, ruleError(ruleErr.ErrorCode, fmt.Sprintf("error verifying UTXO for %s: %s", node, err)) - } return nil, errors.Wrapf(err, "error verifying UTXO for %s", node) } From ba4a89488ee6548e5bff037bc9894684cc0491fa Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Thu, 11 Jun 2020 15:54:11 +0300 Subject: [PATCH 58/77] [NOD-530] Remove coinbase inputs and add blue score to payload (#752) * [NOD-530] Remove coinbase inputs and add blue score to payload * [NOD-530] Fix comment * [NOD-530] Change util.Block private fields comments --- blockdag/accept.go | 2 - blockdag/blockheap_test.go | 4 +- blockdag/coinbase.go | 93 +++--------- blockdag/dag.go | 22 ++- blockdag/dag_test.go | 4 +- blockdag/ghostdag_test.go | 4 +- blockdag/scriptval.go | 9 -- blockdag/testdata/blk_0_to_4.dat | Bin 2055 -> 1856 bytes blockdag/testdata/blk_3A.dat | Bin 467 -> 430 bytes blockdag/testdata/blk_3B.dat | Bin 354 -> 317 bytes blockdag/testdata/blk_3C.dat | Bin 382 -> 382 bytes blockdag/testdata/blk_3D.dat | Bin 508 -> 508 bytes blockdag/utxoset.go | 42 +++--- blockdag/utxoset_test.go | 6 +- dagconfig/genesis.go | 179 ++++++++---------------- dagconfig/genesis_test.go | 129 ++++++++--------- mempool/mempool_test.go | 49 +++---- server/rpc/rpcwebsocket.go | 8 +- util/block.go | 40 ++++-- util/coinbasepayload/coinbasepayload.go | 66 +++++++++ 20 files changed, 293 insertions(+), 364 deletions(-) create mode 100644 util/coinbasepayload/coinbasepayload.go diff --git a/blockdag/accept.go b/blockdag/accept.go index 9a81c0311..a78cc4f9b 100644 --- a/blockdag/accept.go +++ b/blockdag/accept.go @@ -105,8 +105,6 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er } } - block.SetBlueScore(newNode.blueScore) - // Connect the passed block to the DAG. This also handles validation of the // transaction scripts. chainUpdates, err := dag.addBlock(newNode, block, selectedParentAnticone, flags) diff --git a/blockdag/blockheap_test.go b/blockdag/blockheap_test.go index a7d383107..b85e826dc 100644 --- a/blockdag/blockheap_test.go +++ b/blockdag/blockheap_test.go @@ -11,14 +11,14 @@ import ( func TestBlockHeap(t *testing.T) { // Create a new database and DAG instance to run tests against. dag, teardownFunc, err := DAGSetup("TestBlockHeap", true, Config{ - DAGParams: &dagconfig.MainnetParams, + DAGParams: &dagconfig.SimnetParams, }) if err != nil { t.Fatalf("TestBlockHeap: Failed to setup DAG instance: %s", err) } defer teardownFunc() - block0Header := dagconfig.MainnetParams.GenesisBlock.Header + block0Header := dagconfig.SimnetParams.GenesisBlock.Header block0, _ := dag.newBlockNode(&block0Header, newBlockSet()) block100000Header := Block100000.Header diff --git a/blockdag/coinbase.go b/blockdag/coinbase.go index a0cd6d3f4..28d35d500 100644 --- a/blockdag/coinbase.go +++ b/blockdag/coinbase.go @@ -5,15 +5,14 @@ import ( "bytes" "encoding/binary" "github.com/kaspanet/kaspad/dbaccess" - "github.com/kaspanet/kaspad/util/subnetworkid" - "github.com/pkg/errors" - "io" - "math" - "github.com/kaspanet/kaspad/util" + "github.com/kaspanet/kaspad/util/coinbasepayload" "github.com/kaspanet/kaspad/util/daghash" + "github.com/kaspanet/kaspad/util/subnetworkid" "github.com/kaspanet/kaspad/util/txsort" "github.com/kaspanet/kaspad/wire" + "github.com/pkg/errors" + "io" ) // compactFeeData is a specialized data type to store a compact list of fees @@ -98,7 +97,10 @@ func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Bl return nil } blockCoinbaseTx := block.CoinbaseTransaction().MsgTx() - scriptPubKey, extraData, err := DeserializeCoinbasePayload(blockCoinbaseTx) + _, scriptPubKey, extraData, err := coinbasepayload.DeserializeCoinbasePayload(blockCoinbaseTx) + if errors.Is(err, coinbasepayload.ErrIncorrectScriptPubKeyLen) { + return ruleError(ErrBadCoinbaseTransaction, err.Error()) + } if err != nil { return err } @@ -125,16 +127,15 @@ func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceD txOuts := []*wire.TxOut{} for _, blue := range node.blues { - txIn, txOut, err := coinbaseInputAndOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData) + txOut, err := coinbaseOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData) if err != nil { return nil, err } - txIns = append(txIns, txIn) if txOut != nil { txOuts = append(txOuts, txOut) } } - payload, err := SerializeCoinbasePayload(scriptPubKey, extraData) + payload, err := coinbasepayload.SerializeCoinbasePayload(node.blueScore, scriptPubKey, extraData) if err != nil { return nil, err } @@ -143,83 +144,33 @@ func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceD return util.NewTx(sortedCoinbaseTx), nil } -// SerializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data. -func SerializeCoinbasePayload(scriptPubKey []byte, extraData []byte) ([]byte, error) { - w := &bytes.Buffer{} - err := wire.WriteVarInt(w, uint64(len(scriptPubKey))) - if err != nil { - return nil, err - } - _, err = w.Write(scriptPubKey) - if err != nil { - return nil, err - } - _, err = w.Write(extraData) - if err != nil { - return nil, err - } - return w.Bytes(), nil -} - -// DeserializeCoinbasePayload deserialize the coinbase payload to its component (scriptPubKey and extra data). -func DeserializeCoinbasePayload(tx *wire.MsgTx) (scriptPubKey []byte, extraData []byte, err error) { - r := bytes.NewReader(tx.Payload) - scriptPubKeyLen, err := wire.ReadVarInt(r) - if err != nil { - return nil, nil, err - } - scriptPubKey = make([]byte, scriptPubKeyLen) - _, err = r.Read(scriptPubKey) - if err != nil { - return nil, nil, err - } - extraData = make([]byte, r.Len()) - if r.Len() != 0 { - _, err = r.Read(extraData) - if err != nil { - return nil, nil, err - } - } - return scriptPubKey, extraData, nil -} - -// feeInputAndOutputForBlueBlock calculates the input and output that should go into the coinbase transaction of blueBlock -// If blueBlock gets no fee - returns only txIn and nil for txOut -func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode, - txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) ( - *wire.TxIn, *wire.TxOut, error) { +// coinbaseOutputForBlueBlock calculates the output that should go into the coinbase transaction of blueBlock +// If blueBlock gets no fee - returns nil for txOut +func coinbaseOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode, + txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (*wire.TxOut, error) { blockTxsAcceptanceData, ok := txsAcceptanceData.FindAcceptanceData(blueBlock.hash) if !ok { - return nil, nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash) + return nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash) } blockFeeData, ok := feeData[*blueBlock.hash] if !ok { - return nil, nil, errors.Errorf("No feeData for block %s", blueBlock.hash) + return nil, errors.Errorf("No feeData for block %s", blueBlock.hash) } if len(blockTxsAcceptanceData.TxAcceptanceData) != blockFeeData.Len() { - return nil, nil, errors.Errorf( + return nil, errors.Errorf( "length of accepted transaction data(%d) and fee data(%d) is not equal for block %s", len(blockTxsAcceptanceData.TxAcceptanceData), blockFeeData.Len(), blueBlock.hash) } - txIn := &wire.TxIn{ - SignatureScript: []byte{}, - PreviousOutpoint: wire.Outpoint{ - TxID: daghash.TxID(*blueBlock.hash), - Index: math.MaxUint32, - }, - Sequence: wire.MaxTxInSequenceNum, - } - totalFees := uint64(0) feeIterator := blockFeeData.iterator() for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData { fee, err := feeIterator.next() if err != nil { - return nil, nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err) + return nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err) } if txAcceptanceData.IsAccepted { totalFees += fee @@ -229,13 +180,13 @@ func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode, totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.dagParams) + totalFees if totalReward == 0 { - return txIn, nil, nil + return nil, nil } // the ScriptPubKey for the coinbase is parsed from the coinbase payload - scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx()) + _, scriptPubKey, _, err := coinbasepayload.DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx()) if err != nil { - return nil, nil, err + return nil, err } txOut := &wire.TxOut{ @@ -243,5 +194,5 @@ func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode, ScriptPubKey: scriptPubKey, } - return txIn, txOut, nil + return txOut, nil } diff --git a/blockdag/dag.go b/blockdag/dag.go index 2b24d5ef4..c5b58cd0c 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -654,22 +654,20 @@ func (node *blockNode) selectedParentMultiset(dag *BlockDAG) (*secp256k1.MultiSe } func addTxToMultiset(ms *secp256k1.MultiSet, tx *wire.MsgTx, pastUTXO UTXOSet, blockBlueScore uint64) (*secp256k1.MultiSet, error) { - isCoinbase := tx.IsCoinBase() - if !isCoinbase { - for _, txIn := range tx.TxIn { - entry, ok := pastUTXO.Get(txIn.PreviousOutpoint) - if !ok { - return nil, errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint) - } + for _, txIn := range tx.TxIn { + entry, ok := pastUTXO.Get(txIn.PreviousOutpoint) + if !ok { + return nil, errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint) + } - var err error - ms, err = removeUTXOFromMultiset(ms, entry, &txIn.PreviousOutpoint) - if err != nil { - return nil, err - } + var err error + ms, err = removeUTXOFromMultiset(ms, entry, &txIn.PreviousOutpoint) + if err != nil { + return nil, err } } + isCoinbase := tx.IsCoinBase() for i, txOut := range tx.TxOut { outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i)) entry := NewUTXOEntry(txOut, isCoinbase, blockBlueScore) diff --git a/blockdag/dag_test.go b/blockdag/dag_test.go index a56d21122..022a804cc 100644 --- a/blockdag/dag_test.go +++ b/blockdag/dag_test.go @@ -207,7 +207,7 @@ func TestIsKnownBlock(t *testing.T) { {hash: dagconfig.SimnetParams.GenesisHash.String(), want: true}, // Block 3b should be present (as a second child of Block 2). - {hash: "2a697c985ab868ea95d84e6dcd7e88301296679149e73bca46eef2d0f2995944", want: true}, + {hash: "2eb8903d3eb7f977ab329649f56f4125afa532662f7afe5dba0d4a3f1b93746f", want: true}, // Block 100000 should be present (as an orphan). {hash: "65b20b048a074793ebfd1196e49341c8d194dabfc6b44a4fd0c607406e122baf", want: true}, @@ -1264,7 +1264,7 @@ func TestDoubleSpends(t *testing.T) { func TestUTXOCommitment(t *testing.T) { // Create a new database and dag instance to run tests against. - params := dagconfig.DevnetParams + params := dagconfig.SimnetParams params.BlockCoinbaseMaturity = 0 dag, teardownFunc, err := DAGSetup("TestUTXOCommitment", true, Config{ DAGParams: ¶ms, diff --git a/blockdag/ghostdag_test.go b/blockdag/ghostdag_test.go index 28260417e..1556db239 100644 --- a/blockdag/ghostdag_test.go +++ b/blockdag/ghostdag_test.go @@ -33,7 +33,7 @@ func TestGHOSTDAG(t *testing.T) { }{ { k: 3, - expectedReds: []string{"F", "G", "H", "I", "O", "P"}, + expectedReds: []string{"F", "G", "H", "I", "N", "Q"}, dagData: []*testBlockData{ { parents: []string{"A"}, @@ -166,7 +166,7 @@ func TestGHOSTDAG(t *testing.T) { id: "T", expectedScore: 13, expectedSelectedParent: "S", - expectedBlues: []string{"S", "Q", "N"}, + expectedBlues: []string{"S", "O", "P"}, }, }, }, diff --git a/blockdag/scriptval.go b/blockdag/scriptval.go index 543319c6d..83f872755 100644 --- a/blockdag/scriptval.go +++ b/blockdag/scriptval.go @@ -179,11 +179,6 @@ func newTxValidator(utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscr // ValidateTransactionScripts validates the scripts for the passed transaction // using multiple goroutines. func ValidateTransactionScripts(tx *util.Tx, utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscript.SigCache) error { - // Don't validate coinbase transaction scripts. - if tx.IsCoinBase() { - return nil - } - // Collect all of the transaction inputs and required information for // validation. txIns := tx.MsgTx().TxIn @@ -213,10 +208,6 @@ func checkBlockScripts(block *blockNode, utxoSet UTXOSet, transactions []*util.T } txValItems := make([]*txValidateItem, 0, numInputs) for _, tx := range transactions { - // Skip coinbase transactions. - if tx.IsCoinBase() { - continue - } for txInIdx, txIn := range tx.MsgTx().TxIn { txVI := &txValidateItem{ txInIndex: txInIdx, diff --git a/blockdag/testdata/blk_0_to_4.dat b/blockdag/testdata/blk_0_to_4.dat index 1b4097f24c097d971264df342bd1e2d7d2fb1059..596af3b944e18f2be08dd471ac3a3556bcb53037 100644 GIT binary patch literal 1856 zcmb3Be#iDB0~iP}xCb3qF7WLX-|M3GtEoslb_9<;^NTzPq6I{_pg>YKHi=0mnmTSg&wi^>~W% z<#RvXeO~sYN=H~;eqDU!iAKQA%dhXWUSnl?8UCnyW=w=n#Ii68pWZdsW0yXMISdz_ zgcLBa5P$>>Bf}?FEszi{Kf#QIX@l{R=^c3v9T#uUY3Tjs{o8>13#a#0iG{HnCqL}; zXnAaLVDAb8m}Xp-z~uG8A()~M3cO_yK>^0At&P{XpR|Z=k$Umz-t(nJi!OQ9E6A-7 zF}A*A)wXw~SE^g<$G|IH_rqWJ=>J-?(EW$1%>K-^>gQI~R8GtKam_im)OmAy@%rLF zNn0wWrIiNUyZztyAIsPNQ&)6eH1Y-~Rfqc@c;rxWqTRAHA^KITc-Il*i~P6uNT>GA zxVNSv@?kPk;K6*&1a=%%0}oyVqUeM9mM zU?~|_;KAf!fd_ILBOy7M~5tlGcr9TaSqI6fEPAzbf{P#F?qtt`=E0;=N)=HeOkvy_jt@CTr zUd~wt%+Qd*6x_@4?17s#m zV(Y2UK8szyg4QjLp2~Z3vHO_^9IIq*PqWxQSMoyQ*ClTJ616_lO{Q12&hk>}nrhbK z#kNCcmVQtFqvJad*GVnOZtC@TYF@kS%;unyYsd3N0+Y)#)cjdz3+(!I=ZA36t3zTA z2d;NbIkc)wyj!>LHI1q&G}##YCkX}i(`<+e)p zXy2SMdESP(OKrmc{&vn^-LP!y%X^=X8n6(EEjFk;N^Cv37Op7EwoUNltQ}Dv$5zR+ XU4F$ac_6gT^UnTgr@u#TQ8Tsxtg38Q literal 2055 zcmb3Be#cgYkpTn*7&gdQ&*PnSpn;3SDS&D5tJ`l+Tv%($e1lKWrh3y>g{LiGWfTxg zqtC^GwEzEKuK+WK5o9^U0Wcvf^nW1WV_@Lc4=@V$&`&B!PSFPmfdE`JK0`V^E-TOZ z*`b|$+?aoH%BS0w#qruJu2#nwW}Hkmk9qE>BEC}OmbmN7t)6?ie@TVSEIeMf%iwI3 zcYAhXaY3SPab|8_YKbf`tfHV{#i(7~m{3{L%w=5Ip_^XERvpp4So`hr)Sbf8=RB8& zJ#qQUB+m4o>9wg>%W6GV@pC$jJ(KyDRx7ohRLRn2z9h}?RnYU}IlWE1;bCVO`&axn zKFkxXwC~x=kAkripQRj@Q}!=aquz+5M-EH}*X8Z~SObQ@^cxLii&MT!zC8)dvT2iascWPs2i3lIyFX z!uH(o+dNA@E;hT`_43)luI-FBI+9-9ITiBup}b@6pPN=uea}k@6CbMb7_%IAyQP$| z{p6)0$=Wq~n;)-?v3&k+!QuxF`WBr@PkTOO|8!vFXL`5&^r|D z@K|VR=KE!QM||?8-YQMlzifF~UakL!K>at%kwO^eP$m$Ak&w%%AHtH+k*D{c{I6?0 zbH0K@S%Hc2jr((VY`LTx!?e5eLr0k@&JbpT#vd$%!R{v%G9W!njDaMl5vW}#6iSr- zl+%lIcGc2wdC|93!CxeF+9cL2{ura6y>UzOCrI3{%yQ1Z9BJHlvLrf*9d3WXxKwf}c` ze8_hBnrrJkw<$Ev;&xZGJ7{*?Ak^S8@4cI9_A8L06y{1uloE0o^`rDKV|W73*F7Bm zKl0a#{}bJ_!`}Hy(W#bQ@2~J&6`1TEhcil(iNYj+pTw8vwwHe`y&`kYB5@1WKYt3oHI><{ zM2b>aVuwU2A(v4SJ+4Z diff --git a/blockdag/testdata/blk_3A.dat b/blockdag/testdata/blk_3A.dat index 1d4fbec3f9903e406e2377cdbdc3adbe7ea45546..c1d01b38af3e097cfe3a908357acd8fbf619d1fe 100644 GIT binary patch delta 264 zcmcc2ypFkE_WK>%WsD3UAi%hE$Hb);_nlnCPV^V6ZmlbidM0??<1Am#{os{Z*I4G& zyK$_a`nV!ua@{AlWkS*IrT5w^UP^1*L~jm#wL2v}V3h-F%-WOur{6aH5o+@|^<%d& zd!hHFqpUyJ^$)HvopAF>q>f(T&H0<|tz4hPd?RtL#BDc?8p(u{QrWK zpnGv3r~Ln4uK=P!fC=on$(r|gvw^hMkBy`#&)-3)QqoBQUOY$d3+^KyFqemyk#`cfkcPY0RE811uDI#X`)ACgb{XqCa|%$0xq-e#r^%{p~e{SU9I+`ZIZ z#>n(-X+xn<!Ok#;xXLX$0=ga|?5kFrb}sg_*T7{s3)Ce2?8M@N z#1wt7`&WFHZnxfX&Hmj|M~0r7GeHFN9^8$^e{07lI#O?SA+Nf D0cEDa diff --git a/blockdag/testdata/blk_3B.dat b/blockdag/testdata/blk_3B.dat index 8adc090c98fe2f36afebb739e6722ef1c6f5d913..5574379aca7617068e76ac25196273970596311e 100644 GIT binary patch delta 235 zcmaFFw3n$~_WK=MQ$_|55MW%oW8zYa`%W%mC;E$3x7L+MJrlg{ah9*=e(=hyYb^8X zFP+w7?mK$1GgxZHL<#r%S5NGAUYDIDC@62m-r2Ux@Tdc8%-WOur{6aH5o+@|^<%d& zd!hHFqpUyJ^$)HvopAF>q>f(T&H0<|tz4hPd?RtL#BDc?8p(u{QrWK zpnGv3r~Ln4uK=P!fD!DviC0|J)O2_sl~_HLk7ZZj-r^|jyC7)hCv9ooV~--(e;n0M a*k`~DQa|yQv;ZrJ0R;NliNytpDf$5K^jnYs literal 354 zcmb3Be#bV7kpTn*7=Oy?#W}lbX}G-T+p6F%5;|=XYZiZuQPAGFCHWI1?pN|QUiS(w zcziVcvjWGY8jFr+slp2;%y?WWb-qMwS=XGcAFVR@oVoIk-`mWzp;@P{s{i3tmAjYP z%NUuyEo~?iirm)z-{J8g+vRJnt@GTb&^U|RUD58K*>QtVgUh`4ZmQX@h&~qwa>@Vy z^$HL-F)%QK7zADR9|(ZjAb^qK6RQ?ToA^qRTjH)Sw|eg7{v{PQv+#J`E`zgC-t8be zKmcY3gojGL)UbI{-+d=PC|#tzJ88anVS2dEgSI1c1uo5(*`)XUmpF diff --git a/blockdag/testdata/blk_3C.dat b/blockdag/testdata/blk_3C.dat index 94b1c83de16bb25857ffff4f419b8b6bbf291e3c..e6c99481668db8d8612fc55f727f5538c55ec8c4 100644 GIT binary patch delta 207 zcmV;=05Jdl0{#Mj9rxTlb^!nY0000G0@a0q)(hl>7PK1Z^4{mAa-z~oe;^*H6f-;A zI)=TeN~O4wr8wV0L>9=8b0xKYcUtBV*GSC{jNenKYSsjvf878K*aOVhR<#z_@Nas^ z(>JJQ5s*D=N5shlW5;G2UX4iQH+`nevr=@{$8Qu-V|Qo{CQk*M5W4c*_!Cm|4P;W@UH||90RR7gAOMlNSRnoXs$Ij-Z8QFSllCH?XiVQdwJ(r{mCHIn-)%?p Jthkdh0Zi(qVQT;Y delta 207 zcmV;=05Jdl0{#Mj9rxTlb^!nY0000G0vQVRF(9{XSKADw@uN1?is|OTinjsSh+^v8 z%2e&)ANd|GUPDDHC`9OvwIELvRhE(kY7bm8QoUZJZ;%+@c}=(i#pWT`MqspNc%ayr zkoU3%-Bza%b(FCw>>!+lWy=ZteYJE?W;|4X9hrUH||90RR7gAO(@SSRknL8;3i%)<5paOU^e_^8vV{DRqE1`TFOy6A!|~ JKJAk-0Za>kU%CJQ diff --git a/blockdag/testdata/blk_3D.dat b/blockdag/testdata/blk_3D.dat index 7ea3299be5182c66c1d04529bd6418cfd7fbcebb..4bcc0573851b103d226f27553133197f6b994a42 100644 GIT binary patch literal 508 zcmb3Be#iC;BLfHsFfQFOajC_9Cl|33{l%(V>&l~^310U&%hz*1cxBc#mU;D&?@O&6 zlHT1;ns}5=l-*M_YhO=ZNzo&N*no`UjU7R(y&g}^YnPqb98_}cc)mzra(RZDKkICP zU7zm!5Dt2ENDOQZ9wH^^UK~jJ|NnXgm@;OF<6vA=9!wNM$9{Y8s7yHWGFMY6YoF%) zGt7;_uC8BWzSud=u#feb62<`1%=q{Js@TI9a*hAhO#Y@cKf~v~?NZBtKMIo!{6 z|GZ`g$Sfwt!2eLd$nc3(3#3terN}LD*Oyy8_j3P|3Y%GYyl$7l*(mRJka}DIq>qV2 O3yDz<3?T@h$aMg2i>&(r|gvw^hMkBy`#&)-3)QqoBQUOY$d3+^_8Ue`(!> z$1;)2mGX7J7zQ1z4qNo3n8j{N(#;xy-<+N~x2Y1V1i^fk**ez#gLdr9kO zkG_L?#T%aVp08kQ@DMAa&&7eH|NpO7fVhc)ff>et=tJe9nic!)!J{(a$je+!rL29L z^Up9h2D`d`jrn5dIKw{HXG$0YNHgP#&(iJIJFeNkJL!GSJoq!?j>Vd#4dy?8J>MeC zf9QzaTacMdjDi25fRW)7s}@M3_)3vm;;t{ZdhX@^B^5Ta@Oa%WgR@cI?I88I07xGb Pi53#092i0nK#}VJP7 Date: Thu, 11 Jun 2020 16:11:22 +0300 Subject: [PATCH 59/77] [NOD-1048] Use a smaller writeBuffer and use disableSeeksCompaction directly. (#759) --- database/ffldb/ldb/options.go | 8 ++++---- go.mod | 2 +- go.sum | 2 ++ 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/database/ffldb/ldb/options.go b/database/ffldb/ldb/options.go index 604f0e417..92e61cfea 100644 --- a/database/ffldb/ldb/options.go +++ b/database/ffldb/ldb/options.go @@ -4,10 +4,10 @@ import "github.com/syndtr/goleveldb/leveldb/opt" var ( defaultOptions = opt.Options{ - Compression: opt.NoCompression, - BlockCacheCapacity: 512 * opt.MiB, - WriteBuffer: 512 * opt.MiB, - IteratorSamplingRate: 512 * opt.MiB, + Compression: opt.NoCompression, + BlockCacheCapacity: 256 * opt.MiB, + WriteBuffer: 128 * opt.MiB, + DisableSeeksCompaction: true, } // Options is a function that returns a leveldb diff --git a/go.mod b/go.mod index 646cfaebd..9fecff808 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/kaspanet/go-secp256k1 v0.0.2 github.com/kr/pretty v0.1.0 // indirect github.com/pkg/errors v0.9.1 - github.com/syndtr/goleveldb v1.0.0 + github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 golang.org/x/sys v0.0.0-20190426135247-a129542de9ae // indirect golang.org/x/text v0.3.2 // indirect diff --git a/go.sum b/go.sum index 367c3ef64..01e6b5f50 100644 --- a/go.sum +++ b/go.sum @@ -38,6 +38,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= +github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= From 829979b6c73517bb3f2096834dbc40a0e029782b Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Mon, 15 Jun 2020 11:07:52 +0300 Subject: [PATCH 60/77] [NOD-1007] Split checkBlockSanity subroutines (#743) * [NOD-1007] Split checkBlockSanity subroutines. * [NOD-1007] Put back the comments about performance. * [NOD-1007] Make all the functions in checkBlockSanity take a *util.Block. * [NOD-1007] Rename checkBlockTransactionsOrderedBySubnetwork to checkBlockTransactionOrder. * [NOD-1007] Move a comment up a scope level. --- blockdag/validate.go | 144 ++++++++++++++++++++++++++++++++----------- 1 file changed, 108 insertions(+), 36 deletions(-) diff --git a/blockdag/validate.go b/blockdag/validate.go index e1321e681..ff6326924 100644 --- a/blockdag/validate.go +++ b/blockdag/validate.go @@ -400,10 +400,11 @@ func CalcTxMass(tx *util.Tx, previousScriptPubKeys [][]byte) uint64 { // // The flags do not modify the behavior of this function directly, however they // are needed to pass along to checkProofOfWork. -func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags BehaviorFlags) (delay time.Duration, err error) { +func (dag *BlockDAG) checkBlockHeaderSanity(block *util.Block, flags BehaviorFlags) (delay time.Duration, err error) { // Ensure the proof of work bits in the block header is in min/max range // and the block hash is less than the target value described by the // bits. + header := &block.MsgBlock().Header err = dag.checkProofOfWork(header, flags) if err != nil { return 0, err @@ -465,111 +466,182 @@ func checkBlockParentsOrder(header *wire.BlockHeader) error { // The flags do not modify the behavior of this function directly, however they // are needed to pass along to checkBlockHeaderSanity. func (dag *BlockDAG) checkBlockSanity(block *util.Block, flags BehaviorFlags) (time.Duration, error) { - msgBlock := block.MsgBlock() - header := &msgBlock.Header - delay, err := dag.checkBlockHeaderSanity(header, flags) + delay, err := dag.checkBlockHeaderSanity(block, flags) + if err != nil { + return 0, err + } + err = dag.checkBlockContainsAtLeastOneTransaction(block) + if err != nil { + return 0, err + } + err = dag.checkBlockContainsLessThanMaxBlockMassTransactions(block) + if err != nil { + return 0, err + } + err = dag.checkFirstBlockTransactionIsCoinbase(block) + if err != nil { + return 0, err + } + err = dag.checkBlockContainsOnlyOneCoinbase(block) + if err != nil { + return 0, err + } + err = dag.checkBlockTransactionOrder(block) + if err != nil { + return 0, err + } + err = dag.checkNoNonNativeTransactions(block) + if err != nil { + return 0, err + } + err = dag.checkBlockTransactionSanity(block) + if err != nil { + return 0, err + } + err = dag.checkBlockHashMerkleRoot(block) if err != nil { return 0, err } - // A block must have at least one transaction. - numTx := len(msgBlock.Transactions) - if numTx == 0 { - return 0, ruleError(ErrNoTransactions, "block does not contain "+ - "any transactions") + // The following check will be fairly quick since the transaction IDs + // are already cached due to building the merkle tree above. + err = dag.checkBlockDuplicateTransactions(block) + if err != nil { + return 0, err } + err = dag.checkBlockDoubleSpends(block) + if err != nil { + return 0, err + } + return delay, nil +} + +func (dag *BlockDAG) checkBlockContainsAtLeastOneTransaction(block *util.Block) error { + transactions := block.Transactions() + numTx := len(transactions) + if numTx == 0 { + return ruleError(ErrNoTransactions, "block does not contain "+ + "any transactions") + } + return nil +} + +func (dag *BlockDAG) checkBlockContainsLessThanMaxBlockMassTransactions(block *util.Block) error { // A block must not have more transactions than the max block mass or // else it is certainly over the block mass limit. + transactions := block.Transactions() + numTx := len(transactions) if numTx > wire.MaxMassPerBlock { str := fmt.Sprintf("block contains too many transactions - "+ "got %d, max %d", numTx, wire.MaxMassPerBlock) - return 0, ruleError(ErrBlockMassTooHigh, str) + return ruleError(ErrBlockMassTooHigh, str) } + return nil +} - // The first transaction in a block must be a coinbase. +func (dag *BlockDAG) checkFirstBlockTransactionIsCoinbase(block *util.Block) error { transactions := block.Transactions() if !transactions[util.CoinbaseTransactionIndex].IsCoinBase() { - return 0, ruleError(ErrFirstTxNotCoinbase, "first transaction in "+ + return ruleError(ErrFirstTxNotCoinbase, "first transaction in "+ "block is not a coinbase") } + return nil +} - txOffset := util.CoinbaseTransactionIndex + 1 - - // A block must not have more than one coinbase. And transactions must be - // ordered by subnetwork - for i, tx := range transactions[txOffset:] { +func (dag *BlockDAG) checkBlockContainsOnlyOneCoinbase(block *util.Block) error { + transactions := block.Transactions() + for i, tx := range transactions[util.CoinbaseTransactionIndex+1:] { if tx.IsCoinBase() { str := fmt.Sprintf("block contains second coinbase at "+ "index %d", i+2) - return 0, ruleError(ErrMultipleCoinbases, str) - } - if i != 0 && subnetworkid.Less(&tx.MsgTx().SubnetworkID, &transactions[i].MsgTx().SubnetworkID) { - return 0, ruleError(ErrTransactionsNotSorted, "transactions must be sorted by subnetwork") + return ruleError(ErrMultipleCoinbases, str) } } + return nil +} +func (dag *BlockDAG) checkBlockTransactionOrder(block *util.Block) error { + transactions := block.Transactions() + for i, tx := range transactions[util.CoinbaseTransactionIndex+1:] { + if i != 0 && subnetworkid.Less(&tx.MsgTx().SubnetworkID, &transactions[i].MsgTx().SubnetworkID) { + return ruleError(ErrTransactionsNotSorted, "transactions must be sorted by subnetwork") + } + } + return nil +} + +func (dag *BlockDAG) checkNoNonNativeTransactions(block *util.Block) error { // Disallow non-native/coinbase subnetworks in networks that don't allow them if !dag.dagParams.EnableNonNativeSubnetworks { + transactions := block.Transactions() for _, tx := range transactions { if !(tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) || tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)) { - return 0, ruleError(ErrInvalidSubnetwork, "non-native/coinbase subnetworks are not allowed") + return ruleError(ErrInvalidSubnetwork, "non-native/coinbase subnetworks are not allowed") } } } + return nil +} - // Do some preliminary checks on each transaction to ensure they are - // sane before continuing. +func (dag *BlockDAG) checkBlockTransactionSanity(block *util.Block) error { + transactions := block.Transactions() for _, tx := range transactions { err := CheckTransactionSanity(tx, dag.subnetworkID) if err != nil { - return 0, err + return err } } + return nil +} +func (dag *BlockDAG) checkBlockHashMerkleRoot(block *util.Block) error { // Build merkle tree and ensure the calculated merkle root matches the // entry in the block header. This also has the effect of caching all // of the transaction hashes in the block to speed up future hash // checks. hashMerkleTree := BuildHashMerkleTreeStore(block.Transactions()) calculatedHashMerkleRoot := hashMerkleTree.Root() - if !header.HashMerkleRoot.IsEqual(calculatedHashMerkleRoot) { + if !block.MsgBlock().Header.HashMerkleRoot.IsEqual(calculatedHashMerkleRoot) { str := fmt.Sprintf("block hash merkle root is invalid - block "+ "header indicates %s, but calculated value is %s", - header.HashMerkleRoot, calculatedHashMerkleRoot) - return 0, ruleError(ErrBadMerkleRoot, str) + block.MsgBlock().Header.HashMerkleRoot, calculatedHashMerkleRoot) + return ruleError(ErrBadMerkleRoot, str) } + return nil +} - // Check for duplicate transactions. This check will be fairly quick - // since the transaction IDs are already cached due to building the - // merkle tree above. +func (dag *BlockDAG) checkBlockDuplicateTransactions(block *util.Block) error { existingTxIDs := make(map[daghash.TxID]struct{}) + transactions := block.Transactions() for _, tx := range transactions { id := tx.ID() if _, exists := existingTxIDs[*id]; exists { str := fmt.Sprintf("block contains duplicate "+ "transaction %s", id) - return 0, ruleError(ErrDuplicateTx, str) + return ruleError(ErrDuplicateTx, str) } existingTxIDs[*id] = struct{}{} } + return nil +} - // Check for double spends with transactions on the same block. +func (dag *BlockDAG) checkBlockDoubleSpends(block *util.Block) error { usedOutpoints := make(map[wire.Outpoint]*daghash.TxID) + transactions := block.Transactions() for _, tx := range transactions { for _, txIn := range tx.MsgTx().TxIn { if spendingTxID, exists := usedOutpoints[txIn.PreviousOutpoint]; exists { str := fmt.Sprintf("transaction %s spends "+ "outpoint %s that was already spent by "+ "transaction %s in this block", tx.ID(), txIn.PreviousOutpoint, spendingTxID) - return 0, ruleError(ErrDoubleSpendInSameBlock, str) + return ruleError(ErrDoubleSpendInSameBlock, str) } usedOutpoints[txIn.PreviousOutpoint] = tx.ID() } } - - return delay, nil + return nil } // checkBlockHeaderContext performs several validation checks on the block header From d4c9fdf6ac8180fdc507500836780a740eb8b718 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Mon, 15 Jun 2020 12:12:38 +0300 Subject: [PATCH 61/77] [NOD-614] Add ban score (#760) * [NOD-614] Copy bitcoin-core ban score policy * [NOD-614] Add ban score to disconnects * [NOD-614] Fix wrong branch of AddBanScore * [NOD-614] Add ban score on sending too many addresses * [NOD-614] Add comments * [NOD-614] Remove redundant reject messages * [NOD-614] Fix log message * [NOD-614] Ban every node that sends invalid invs * [NOD-614] Make constants for ban scores --- addrmgr/addrmanager.go | 8 ++--- blockdag/dag.go | 2 +- netsync/manager.go | 57 ++++++++++++++++-------------- peer/banscores.go | 34 ++++++++++++++++++ peer/peer.go | 49 ++++++++++++++++++------- server/p2p/on_addr.go | 14 ++++++-- server/p2p/on_fee_filter.go | 6 ++-- server/p2p/on_filter_add.go | 5 ++- server/p2p/on_filter_clear.go | 5 ++- server/p2p/on_get_block_invs.go | 6 ++-- server/p2p/on_get_block_locator.go | 10 +++--- server/p2p/on_inv.go | 5 ++- server/p2p/p2p.go | 21 ++++++----- wire/msgreject.go | 2 ++ 14 files changed, 148 insertions(+), 76 deletions(-) create mode 100644 peer/banscores.go diff --git a/addrmgr/addrmanager.go b/addrmgr/addrmanager.go index 816fe36fd..1f15cf79e 100644 --- a/addrmgr/addrmanager.go +++ b/addrmgr/addrmanager.go @@ -162,10 +162,10 @@ const ( // to a getAddr. If we have less than this amount, we send everything. getAddrMin = 50 - // getAddrMax is the most addresses that we will send in response + // GetAddrMax is the most addresses that we will send in response // to a getAddr (in practise the most addresses we will return from a // call to AddressCache()). - getAddrMax = 2500 + GetAddrMax = 2500 // getAddrPercent is the percentage of total addresses known that we // will share with a call to AddressCache. @@ -839,8 +839,8 @@ func (a *AddrManager) AddressCache(includeAllSubnetworks bool, subnetworkID *sub } numAddresses := len(allAddr) * getAddrPercent / 100 - if numAddresses > getAddrMax { - numAddresses = getAddrMax + if numAddresses > GetAddrMax { + numAddresses = GetAddrMax } if len(allAddr) < getAddrMin { numAddresses = len(allAddr) diff --git a/blockdag/dag.go b/blockdag/dag.go index c5b58cd0c..4493c5ae4 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -165,7 +165,7 @@ type BlockDAG struct { // // This function is safe for concurrent access. func (dag *BlockDAG) IsKnownBlock(hash *daghash.Hash) bool { - return dag.IsInDAG(hash) || dag.IsKnownOrphan(hash) || dag.isKnownDelayedBlock(hash) + return dag.IsInDAG(hash) || dag.IsKnownOrphan(hash) || dag.isKnownDelayedBlock(hash) || dag.IsKnownInvalid(hash) } // AreKnownBlocks returns whether or not the DAG instances has all blocks represented diff --git a/netsync/manager.go b/netsync/manager.go index e0c9be792..977f8cfb4 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -376,9 +376,8 @@ func (sm *SyncManager) handleTxMsg(tmsg *txMsg) { // If we didn't ask for this transaction then the peer is misbehaving. txID := tmsg.tx.ID() if _, exists = state.requestedTxns[*txID]; !exists { - log.Warnf("Got unrequested transaction %s from %s -- "+ - "disconnecting", txID, peer.Addr()) - peer.Disconnect() + peer.AddBanScoreAndPushRejectMsg(wire.CmdTx, wire.RejectNotRequested, (*daghash.Hash)(txID), + peerpkg.BanScoreUnrequestedTx, 0, fmt.Sprintf("got unrequested transaction %s", txID)) return } @@ -412,19 +411,25 @@ func (sm *SyncManager) handleTxMsg(tmsg *txMsg) { // When the error is a rule error, it means the transaction was // simply rejected as opposed to something actually going wrong, // so log it as such. Otherwise, something really did go wrong, - // so log it as an actual error. - if errors.As(err, &mempool.RuleError{}) { - log.Debugf("Rejected transaction %s from %s: %s", - txID, peer, err) - } else { - log.Errorf("Failed to process transaction %s: %s", - txID, err) + // so panic. + ruleErr := &mempool.RuleError{} + if !errors.As(err, ruleErr) { + panic(errors.Wrapf(err, "failed to process transaction %s", txID)) } - // Convert the error into an appropriate reject message and - // send it. - code, reason := mempool.ErrToRejectErr(err) - peer.PushRejectMsg(wire.CmdTx, code, reason, (*daghash.Hash)(txID), false) + shouldIncreaseBanScore := false + if txRuleErr := (&mempool.TxRuleError{}); errors.As(ruleErr.Err, txRuleErr) { + if txRuleErr.RejectCode == wire.RejectInvalid { + shouldIncreaseBanScore = true + } + } else if dagRuleErr := (&blockdag.RuleError{}); errors.As(ruleErr.Err, dagRuleErr) { + shouldIncreaseBanScore = true + } + + if shouldIncreaseBanScore { + peer.AddBanScoreAndPushRejectMsg(wire.CmdTx, wire.RejectInvalid, (*daghash.Hash)(txID), + peerpkg.BanScoreInvalidTx, 0, fmt.Sprintf("rejected transaction %s: %s", txID, err)) + } return } @@ -480,9 +485,8 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { // mode in this case so the DAG code is actually fed the // duplicate blocks. if sm.dagParams != &dagconfig.RegressionNetParams { - log.Warnf("Got unrequested block %s from %s -- "+ - "disconnecting", blockHash, peer.Addr()) - peer.Disconnect() + peer.AddBanScoreAndPushRejectMsg(wire.CmdBlock, wire.RejectNotRequested, blockHash, + peerpkg.BanScoreUnrequestedBlock, 0, fmt.Sprintf("got unrequested block %s", blockHash)) return } } @@ -518,13 +522,8 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { log.Infof("Rejected block %s from %s: %s", blockHash, peer, err) - // Convert the error into an appropriate reject message and - // send it. - code, reason := mempool.ErrToRejectErr(err) - peer.PushRejectMsg(wire.CmdBlock, code, reason, blockHash, false) - - // Disconnect from the misbehaving peer. - peer.Disconnect() + peer.AddBanScoreAndPushRejectMsg(wire.CmdBlock, wire.RejectInvalid, blockHash, + peerpkg.BanScoreInvalidBlock, 0, fmt.Sprintf("got invalid block: %s", err)) return } @@ -718,6 +717,10 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { } if iv.IsBlockOrSyncBlock() { + if sm.dag.IsKnownInvalid(iv.Hash) { + peer.AddBanScoreAndPushRejectMsg(imsg.inv.Command(), wire.RejectInvalid, iv.Hash, peerpkg.BanScoreInvalidInvBlock, 0, fmt.Sprintf("sent inv of invalid block %s", iv.Hash)) + return + } // The block is an orphan block that we already have. // When the existing orphan was processed, it requested // the missing parent blocks. When this scenario @@ -913,9 +916,9 @@ func (sm *SyncManager) handleSelectedTipMsg(msg *selectedTipMsg) { selectedTipHash := msg.selectedTipHash state := sm.peerStates[peer] if !state.peerShouldSendSelectedTip { - log.Warnf("Got unrequested selected tip message from %s -- "+ - "disconnecting", peer.Addr()) - peer.Disconnect() + peer.AddBanScoreAndPushRejectMsg(wire.CmdSelectedTip, wire.RejectNotRequested, nil, + peerpkg.BanScoreUnrequestedSelectedTip, 0, "got unrequested selected tip message") + return } state.peerShouldSendSelectedTip = false if selectedTipHash.IsEqual(peer.SelectedTipHash()) { diff --git a/peer/banscores.go b/peer/banscores.go new file mode 100644 index 000000000..d5a7dc1d8 --- /dev/null +++ b/peer/banscores.go @@ -0,0 +1,34 @@ +package peer + +// Ban scores for misbehaving nodes +const ( + BanScoreUnrequestedBlock = 100 + BanScoreInvalidBlock = 100 + BanScoreInvalidInvBlock = 100 + + BanScoreUnrequestedSelectedTip = 20 + BanScoreUnrequestedTx = 20 + BanScoreInvalidTx = 100 + + BanScoreMalformedMessage = 10 + + BanScoreNonVersionFirstMessage = 1 + BanScoreDuplicateVersion = 1 + BanScoreDuplicateVerack = 1 + + BanScoreSentTooManyAddresses = 20 + BanScoreMsgAddrWithInvalidSubnetwork = 10 + + BanScoreInvalidFeeFilter = 100 + BanScoreNoFilterLoaded = 5 + + BanScoreInvalidMsgGetBlockInvs = 10 + + BanScoreInvalidMsgBlockLocator = 100 + + BanScoreSentTxToBlocksOnly = 20 + + BanScoreNodeBloomFlagViolation = 100 + + BanScoreStallTimeout = 1 +) diff --git a/peer/peer.go b/peer/peer.go index d5288f6ae..90c8a34ec 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -199,6 +199,13 @@ type Config struct { // the DAG. IsInDAG func(*daghash.Hash) bool + // AddBanScore increases the persistent and decaying ban score fields by the + // values passed as parameters. If the resulting score exceeds half of the ban + // threshold, a warning is logged including the reason provided. Further, if + // the score is above the ban threshold, the peer will be banned and + // disconnected. + AddBanScore func(persistent, transient uint32, reason string) + // HostToNetAddress returns the netaddress for the given host. This can be // nil in which case the host will be parsed as an IP address. HostToNetAddress HostToNetAddrFunc @@ -646,6 +653,22 @@ func (p *Peer) IsSelectedTipKnown() bool { return !p.cfg.IsInDAG(p.selectedTipHash) } +// AddBanScore increases the persistent and decaying ban score fields by the +// values passed as parameters. If the resulting score exceeds half of the ban +// threshold, a warning is logged including the reason provided. Further, if +// the score is above the ban threshold, the peer will be banned and +// disconnected. +func (p *Peer) AddBanScore(persistent, transient uint32, reason string) { + p.cfg.AddBanScore(persistent, transient, reason) +} + +// AddBanScoreAndPushRejectMsg increases ban score and sends a +// reject message to the misbehaving peer. +func (p *Peer) AddBanScoreAndPushRejectMsg(command string, code wire.RejectCode, hash *daghash.Hash, persistent, transient uint32, reason string) { + p.PushRejectMsg(command, code, reason, hash, true) + p.cfg.AddBanScore(persistent, transient, reason) +} + // LastSend returns the last send time of the peer. // // This function is safe for concurrent access. @@ -1239,9 +1262,7 @@ out: continue } - log.Debugf("Peer %s appears to be stalled or "+ - "misbehaving, %s timeout -- "+ - "disconnecting", p, command) + p.AddBanScore(BanScoreStallTimeout, 0, fmt.Sprintf("got timeout for command %s", command)) p.Disconnect() break } @@ -1316,15 +1337,15 @@ out: log.Errorf(errMsg) } - // Push a reject message for the malformed message and wait for - // the message to be sent before disconnecting. + // Add ban score, push a reject message for the malformed message + // and wait for the message to be sent before disconnecting. // // NOTE: Ideally this would include the command in the header if // at least that much of the message was valid, but that is not // currently exposed by wire, so just used malformed for the // command. - p.PushRejectMsg("malformed", wire.RejectMalformed, errMsg, nil, - true) + p.AddBanScoreAndPushRejectMsg("malformed", wire.RejectMalformed, nil, + BanScoreMalformedMessage, 0, errMsg) } break out } @@ -1336,18 +1357,18 @@ out: switch msg := rmsg.(type) { case *wire.MsgVersion: - p.PushRejectMsg(msg.Command(), wire.RejectDuplicate, - "duplicate version message", nil, true) - break out + reason := "duplicate version message" + p.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectDuplicate, nil, + BanScoreDuplicateVersion, 0, reason) case *wire.MsgVerAck: // No read lock is necessary because verAckReceived is not written // to in any other goroutine. if p.verAckReceived { - log.Infof("Already received 'verack' from peer %s -- "+ - "disconnecting", p) - break out + p.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectDuplicate, nil, + BanScoreDuplicateVerack, 0, "verack sent twice") + log.Warnf("Already received 'verack' from peer %s", p) } p.markVerAckReceived() if p.cfg.Listeners.OnVerAck != nil { @@ -1867,6 +1888,8 @@ func (p *Peer) readRemoteVersionMsg() error { errStr := "A version message must precede all others" log.Errorf(errStr) + p.AddBanScore(BanScoreNonVersionFirstMessage, 0, errStr) + rejectMsg := wire.NewMsgReject(msg.Command(), wire.RejectMalformed, errStr) return p.writeMessage(rejectMsg) diff --git a/server/p2p/on_addr.go b/server/p2p/on_addr.go index e80e5cf12..a75e38de9 100644 --- a/server/p2p/on_addr.go +++ b/server/p2p/on_addr.go @@ -1,6 +1,8 @@ package p2p import ( + "fmt" + "github.com/kaspanet/kaspad/addrmgr" "github.com/kaspanet/kaspad/config" "github.com/kaspanet/kaspad/peer" "github.com/kaspanet/kaspad/wire" @@ -18,10 +20,16 @@ func (sp *Peer) OnAddr(_ *peer.Peer, msg *wire.MsgAddr) { return } + if len(msg.AddrList) > addrmgr.GetAddrMax { + sp.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectInvalid, nil, + peer.BanScoreSentTooManyAddresses, 0, fmt.Sprintf("address count excceeded %d", addrmgr.GetAddrMax)) + return + } + if msg.IncludeAllSubnetworks { - peerLog.Errorf("Got unexpected IncludeAllSubnetworks=true in [%s] command from %s", - msg.Command(), sp.Peer) - sp.Disconnect() + sp.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectInvalid, nil, + peer.BanScoreMsgAddrWithInvalidSubnetwork, 0, + fmt.Sprintf("got unexpected IncludeAllSubnetworks=true in [%s] command", msg.Command())) return } else if !msg.SubnetworkID.IsEqual(config.ActiveConfig().SubnetworkID) && msg.SubnetworkID != nil { peerLog.Errorf("Only full nodes and %s subnetwork IDs are allowed in [%s] command, but got subnetwork ID %s from %s", diff --git a/server/p2p/on_fee_filter.go b/server/p2p/on_fee_filter.go index 5b015bd28..0749d6034 100644 --- a/server/p2p/on_fee_filter.go +++ b/server/p2p/on_fee_filter.go @@ -1,6 +1,7 @@ package p2p import ( + "fmt" "github.com/kaspanet/kaspad/peer" "github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/wire" @@ -14,9 +15,8 @@ import ( func (sp *Peer) OnFeeFilter(_ *peer.Peer, msg *wire.MsgFeeFilter) { // Check that the passed minimum fee is a valid amount. if msg.MinFee < 0 || msg.MinFee > util.MaxSompi { - peerLog.Debugf("Peer %s sent an invalid feefilter '%s' -- "+ - "disconnecting", sp, util.Amount(msg.MinFee)) - sp.Disconnect() + sp.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectInvalid, nil, + peer.BanScoreInvalidFeeFilter, 0, fmt.Sprintf("sent an invalid feefilter '%s'", util.Amount(msg.MinFee))) return } diff --git a/server/p2p/on_filter_add.go b/server/p2p/on_filter_add.go index 132962990..dd8930246 100644 --- a/server/p2p/on_filter_add.go +++ b/server/p2p/on_filter_add.go @@ -17,9 +17,8 @@ func (sp *Peer) OnFilterAdd(_ *peer.Peer, msg *wire.MsgFilterAdd) { } if sp.filter.IsLoaded() { - peerLog.Debugf("%s sent a filteradd request with no filter "+ - "loaded -- disconnecting", sp) - sp.Disconnect() + sp.AddBanScoreAndPushRejectMsg(wire.CmdFilterAdd, wire.RejectInvalid, nil, + peer.BanScoreNoFilterLoaded, 0, "sent a filteradd request with no filter loaded") return } diff --git a/server/p2p/on_filter_clear.go b/server/p2p/on_filter_clear.go index ba3f2e836..bb80ce557 100644 --- a/server/p2p/on_filter_clear.go +++ b/server/p2p/on_filter_clear.go @@ -17,9 +17,8 @@ func (sp *Peer) OnFilterClear(_ *peer.Peer, msg *wire.MsgFilterClear) { } if !sp.filter.IsLoaded() { - peerLog.Debugf("%s sent a filterclear request with no "+ - "filter loaded -- disconnecting", sp) - sp.Disconnect() + sp.AddBanScoreAndPushRejectMsg(wire.CmdFilterClear, wire.RejectInvalid, nil, + peer.BanScoreNoFilterLoaded, 0, "sent a filterclear request with no filter loaded") return } diff --git a/server/p2p/on_get_block_invs.go b/server/p2p/on_get_block_invs.go index c120c50d6..1fdd0b7e7 100644 --- a/server/p2p/on_get_block_invs.go +++ b/server/p2p/on_get_block_invs.go @@ -1,6 +1,7 @@ package p2p import ( + "fmt" "github.com/kaspanet/kaspad/peer" "github.com/kaspanet/kaspad/wire" ) @@ -23,8 +24,9 @@ func (sp *Peer) OnGetBlockInvs(_ *peer.Peer, msg *wire.MsgGetBlockInvs) { hashList, err := dag.AntiPastHashesBetween(msg.LowHash, msg.HighHash, wire.MaxInvPerMsg) if err != nil { - peerLog.Warnf("Error getting antiPast hashes between %s and %s: %s", msg.LowHash, msg.HighHash, err) - sp.Disconnect() + sp.AddBanScoreAndPushRejectMsg(wire.CmdGetBlockInvs, wire.RejectInvalid, nil, + peer.BanScoreInvalidMsgGetBlockInvs, 0, + fmt.Sprintf("error getting antiPast hashes between %s and %s: %s", msg.LowHash, msg.HighHash, err)) return } diff --git a/server/p2p/on_get_block_locator.go b/server/p2p/on_get_block_locator.go index 52d1f2d66..e88cd73f4 100644 --- a/server/p2p/on_get_block_locator.go +++ b/server/p2p/on_get_block_locator.go @@ -11,13 +11,13 @@ import ( func (sp *Peer) OnGetBlockLocator(_ *peer.Peer, msg *wire.MsgGetBlockLocator) { locator, err := sp.server.DAG.BlockLocatorFromHashes(msg.HighHash, msg.LowHash) if err != nil || len(locator) == 0 { - warning := fmt.Sprintf("Couldn't build a block locator between blocks "+ - "%s and %s that was requested from peer %s", msg.HighHash, msg.LowHash, sp) if err != nil { - warning = fmt.Sprintf("%s: %s", warning, err) + peerLog.Warnf("Couldn't build a block locator between blocks "+ + "%s and %s that was requested from peer %s: %s", msg.HighHash, msg.LowHash, sp, err) } - peerLog.Warnf(warning) - sp.Disconnect() + sp.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectInvalid, nil, + peer.BanScoreInvalidMsgBlockLocator, 0, + fmt.Sprintf("couldn't build a block locator between blocks %s and %s", msg.HighHash, msg.LowHash)) return } diff --git a/server/p2p/on_inv.go b/server/p2p/on_inv.go index 3de26fbe7..3d47e45ca 100644 --- a/server/p2p/on_inv.go +++ b/server/p2p/on_inv.go @@ -23,9 +23,8 @@ func (sp *Peer) OnInv(_ *peer.Peer, msg *wire.MsgInv) { if invVect.Type == wire.InvTypeTx { peerLog.Tracef("Ignoring tx %s in inv from %s -- "+ "blocksonly enabled", invVect.Hash, sp) - peerLog.Infof("Peer %s is announcing "+ - "transactions -- disconnecting", sp) - sp.Disconnect() + sp.AddBanScoreAndPushRejectMsg(msg.Command(), wire.RejectNotRequested, invVect.Hash, + peer.BanScoreSentTxToBlocksOnly, 0, "announced transactions when blocksonly is enabled") return } err := newInv.AddInvVect(invVect) diff --git a/server/p2p/p2p.go b/server/p2p/p2p.go index 81431a65b..426f3b10c 100644 --- a/server/p2p/p2p.go +++ b/server/p2p/p2p.go @@ -8,6 +8,7 @@ package p2p import ( "crypto/rand" "encoding/binary" + "fmt" "math" "net" "runtime" @@ -328,13 +329,8 @@ func (sp *Peer) pushAddrMsg(addresses []*wire.NetAddress, subnetworkID *subnetwo // the score is above the ban threshold, the peer will be banned and // disconnected. func (sp *Peer) addBanScore(persistent, transient uint32, reason string) { - // No warning is logged and no score is calculated if banning is disabled. - if config.ActiveConfig().DisableBanning { - return - } if sp.isWhitelisted { peerLog.Debugf("Misbehaving whitelisted peer %s: %s", sp, reason) - return } warnThreshold := config.ActiveConfig().BanThreshold >> 1 @@ -348,16 +344,22 @@ func (sp *Peer) addBanScore(persistent, transient uint32, reason string) { } return } + score := sp.DynamicBanScore.Increase(persistent, transient) + logMsg := fmt.Sprintf("Misbehaving peer %s: %s -- ban score increased to %d", + sp, reason, score) if score > warnThreshold { - peerLog.Warnf("Misbehaving peer %s: %s -- ban score increased to %d", - sp, reason, score) - if score > config.ActiveConfig().BanThreshold { + peerLog.Warn(logMsg) + if !config.ActiveConfig().DisableBanning && !sp.isWhitelisted && score > config.ActiveConfig().BanThreshold { peerLog.Warnf("Misbehaving peer %s -- banning and disconnecting", sp) sp.server.BanPeer(sp) sp.Disconnect() } + } else if persistent != 0 { + peerLog.Warn(logMsg) + } else { + peerLog.Trace(logMsg) } } @@ -375,7 +377,7 @@ func (sp *Peer) enforceNodeBloomFlag(cmd string) bool { // Disconnect the peer regardless of whether it was // banned. - sp.addBanScore(100, 0, cmd) + sp.addBanScore(peer.BanScoreNodeBloomFlagViolation, 0, cmd) sp.Disconnect() return false } @@ -937,6 +939,7 @@ func newPeerConfig(sp *Peer) *peer.Config { }, SelectedTipHash: sp.selectedTipHash, IsInDAG: sp.blockExists, + AddBanScore: sp.addBanScore, HostToNetAddress: sp.server.addrManager.HostToNetAddress, Proxy: config.ActiveConfig().Proxy, UserAgentName: userAgentName, diff --git a/wire/msgreject.go b/wire/msgreject.go index e0e090aed..267b4d746 100644 --- a/wire/msgreject.go +++ b/wire/msgreject.go @@ -21,6 +21,7 @@ const ( RejectInvalid RejectCode = 0x10 RejectObsolete RejectCode = 0x11 RejectDuplicate RejectCode = 0x12 + RejectNotRequested RejectCode = 0x13 RejectNonstandard RejectCode = 0x40 RejectDust RejectCode = 0x41 RejectInsufficientFee RejectCode = 0x42 @@ -39,6 +40,7 @@ var rejectCodeStrings = map[RejectCode]string{ RejectInsufficientFee: "REJECT_INSUFFICIENTFEE", RejectFinality: "REJECT_FINALITY", RejectDifficulty: "REJECT_DIFFICULTY", + RejectNotRequested: "REJECT_NOTREQUESTED", } // String returns the RejectCode in human-readable form. From 0744e8ebc0367d5e8c35c02880ada3a0515117b4 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Mon, 15 Jun 2020 16:08:25 +0300 Subject: [PATCH 62/77] [NOD-1042] Ignore very high orphans (#761) * [NOD-530] Remove coinbase inputs and add blue score to payload * [NOD-1042] Ignore very high orphans * [NOD-1042] Add ban score to an orphan with malformed blue score * [NOD-1042] Fix log --- netsync/manager.go | 19 +++++++++++++++++++ peer/banscores.go | 7 ++++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index 977f8cfb4..55481a888 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -532,6 +532,25 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { } if isOrphan { + blueScore, err := bmsg.block.BlueScore() + if err != nil { + log.Errorf("Received an orphan block %s with malformed blue score from %s. Disconnecting...", + blockHash, peer) + peer.AddBanScoreAndPushRejectMsg(wire.CmdBlock, wire.RejectInvalid, blockHash, + peerpkg.BanScoreMalformedBlueScoreInOrphan, 0, + fmt.Sprintf("Received an orphan block %s with malformed blue score", blockHash)) + return + } + + const maxOrphanBlueScoreDiff = 10000 + selectedTipBlueScore := sm.dag.SelectedTipBlueScore() + if blueScore > selectedTipBlueScore+maxOrphanBlueScoreDiff { + log.Infof("Orphan block %s has blue score %d and the selected tip blue score is "+ + "%d. Ignoring orphans with a blue score difference from the selected tip greater than %d", + blockHash, blueScore, selectedTipBlueScore, maxOrphanBlueScoreDiff) + return + } + // Request the parents for the orphan block from the peer that sent it. missingAncestors, err := sm.dag.GetOrphanMissingAncestorHashes(blockHash) if err != nil { diff --git a/peer/banscores.go b/peer/banscores.go index d5a7dc1d8..f4529c19a 100644 --- a/peer/banscores.go +++ b/peer/banscores.go @@ -2,9 +2,10 @@ package peer // Ban scores for misbehaving nodes const ( - BanScoreUnrequestedBlock = 100 - BanScoreInvalidBlock = 100 - BanScoreInvalidInvBlock = 100 + BanScoreUnrequestedBlock = 100 + BanScoreInvalidBlock = 100 + BanScoreInvalidInvBlock = 100 + BanScoreMalformedBlueScoreInOrphan = 100 BanScoreUnrequestedSelectedTip = 20 BanScoreUnrequestedTx = 20 From dc643c2d7685b144eae0bd1d94dae7b220cb1e3e Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Tue, 16 Jun 2020 11:01:06 +0300 Subject: [PATCH 63/77] [NOD-833] Remove getBlockTemplate capabilites and move mining address to getBlockTemplate (#762) * [NOD-833] Remove getBlockTemplate capabilites and move mining address to getBlockTemplate * [NOD-833] Fix tests * [NOD-833] Break long lines --- cmd/kaspaminer/config.go | 1 + cmd/kaspaminer/main.go | 12 +- cmd/kaspaminer/mineloop.go | 22 ++-- config/config.go | 22 ---- mining/mining.go | 13 +- rpcclient/mining.go | 12 +- rpcmodel/rpc_commands.go | 5 +- rpcmodel/rpc_commands_test.go | 60 ++++----- rpcmodel/rpc_results.go | 9 -- server/rpc/handle_get_block_template.go | 162 ++++-------------------- server/rpc/rpcserverhelp.go | 18 +-- 11 files changed, 99 insertions(+), 237 deletions(-) diff --git a/cmd/kaspaminer/config.go b/cmd/kaspaminer/config.go index 5f87d947e..392eb70ca 100644 --- a/cmd/kaspaminer/config.go +++ b/cmd/kaspaminer/config.go @@ -36,6 +36,7 @@ type configFlags struct { RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"` RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"` DisableTLS bool `long:"notls" description:"Disable TLS"` + MiningAddr string `long:"miningaddr" description:"Address to mine to"` Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"` NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."` BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."` diff --git a/cmd/kaspaminer/main.go b/cmd/kaspaminer/main.go index 32bf524df..fbd9b2fdd 100644 --- a/cmd/kaspaminer/main.go +++ b/cmd/kaspaminer/main.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "github.com/kaspanet/kaspad/util" "os" "github.com/kaspanet/kaspad/version" @@ -39,15 +40,20 @@ func main() { client, err := connectToServer(cfg) if err != nil { - panic(errors.Wrap(err, "Error connecting to the RPC server")) + panic(errors.Wrap(err, "error connecting to the RPC server")) } defer client.Disconnect() + miningAddr, err := util.DecodeAddress(cfg.MiningAddr, cfg.ActiveNetParams.Prefix) + if err != nil { + panic(errors.Wrap(err, "error decoding mining address")) + } + doneChan := make(chan struct{}) spawn(func() { - err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay, cfg.MineWhenNotSynced) + err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay, cfg.MineWhenNotSynced, miningAddr) if err != nil { - panic(errors.Errorf("Error in mine loop: %s", err)) + panic(errors.Wrap(err, "error in mine loop")) } doneChan <- struct{}{} }) diff --git a/cmd/kaspaminer/mineloop.go b/cmd/kaspaminer/mineloop.go index 58a537eeb..fb4819cc6 100644 --- a/cmd/kaspaminer/mineloop.go +++ b/cmd/kaspaminer/mineloop.go @@ -25,7 +25,9 @@ var hashesTried uint64 const logHashRateInterval = 10 * time.Second -func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64, mineWhenNotSynced bool) error { +func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64, mineWhenNotSynced bool, + miningAddr util.Address) error { + errChan := make(chan error) templateStopChan := make(chan struct{}) @@ -35,7 +37,7 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64, min wg := sync.WaitGroup{} for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ { foundBlock := make(chan *util.Block) - mineNextBlock(client, foundBlock, mineWhenNotSynced, templateStopChan, errChan) + mineNextBlock(client, miningAddr, foundBlock, mineWhenNotSynced, templateStopChan, errChan) block := <-foundBlock templateStopChan <- struct{}{} wg.Add(1) @@ -80,12 +82,12 @@ func logHashRate() { }) } -func mineNextBlock(client *minerClient, foundBlock chan *util.Block, mineWhenNotSynced bool, +func mineNextBlock(client *minerClient, miningAddr util.Address, foundBlock chan *util.Block, mineWhenNotSynced bool, templateStopChan chan struct{}, errChan chan error) { newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult) spawn(func() { - templatesLoop(client, newTemplateChan, errChan, templateStopChan) + templatesLoop(client, miningAddr, newTemplateChan, errChan, templateStopChan) }) spawn(func() { solveLoop(newTemplateChan, foundBlock, mineWhenNotSynced, errChan) @@ -134,7 +136,7 @@ func parseBlock(template *rpcmodel.GetBlockTemplateResult) (*util.Block, error) wire.NewBlockHeader(template.Version, parentHashes, &daghash.Hash{}, acceptedIDMerkleRoot, utxoCommitment, bits, 0)) - for i, txResult := range append([]rpcmodel.GetBlockTemplateResultTx{*template.CoinbaseTxn}, template.Transactions...) { + for i, txResult := range template.Transactions { reader := hex.NewDecoder(strings.NewReader(txResult.Data)) tx := &wire.MsgTx{} if err := tx.KaspaDecode(reader, 0); err != nil { @@ -169,7 +171,9 @@ func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util } -func templatesLoop(client *minerClient, newTemplateChan chan *rpcmodel.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) { +func templatesLoop(client *minerClient, miningAddr util.Address, + newTemplateChan chan *rpcmodel.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) { + longPollID := "" getBlockTemplateLongPoll := func() { if longPollID != "" { @@ -177,7 +181,7 @@ func templatesLoop(client *minerClient, newTemplateChan chan *rpcmodel.GetBlockT } else { log.Infof("Requesting template without longPollID from %s", client.Host()) } - template, err := getBlockTemplate(client, longPollID) + template, err := getBlockTemplate(client, miningAddr, longPollID) if nativeerrors.Is(err, rpcclient.ErrResponseTimedOut) { log.Infof("Got timeout while requesting template '%s' from %s", longPollID, client.Host()) return @@ -205,8 +209,8 @@ func templatesLoop(client *minerClient, newTemplateChan chan *rpcmodel.GetBlockT } } -func getBlockTemplate(client *minerClient, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) { - return client.GetBlockTemplate([]string{"coinbasetxn"}, longPollID) +func getBlockTemplate(client *minerClient, miningAddr util.Address, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) { + return client.GetBlockTemplate(miningAddr.String(), longPollID) } func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, diff --git a/config/config.go b/config/config.go index 548a8e161..97794a9ba 100644 --- a/config/config.go +++ b/config/config.go @@ -117,7 +117,6 @@ type Flags struct { Upnp bool `long:"upnp" description:"Use UPnP to map our listening port outside of NAT"` MinRelayTxFee float64 `long:"minrelaytxfee" description:"The minimum transaction fee in KAS/kB to be considered a non-zero fee."` MaxOrphanTxs int `long:"maxorphantx" description:"Max number of orphan transactions to keep in memory"` - MiningAddrs []string `long:"miningaddr" description:"Add the specified payment address to the list of addresses to use for generated blocks -- At least one address is required if the generate option is set"` BlockMaxMass uint64 `long:"blockmaxmass" description:"Maximum transaction mass to be used when creating a block"` UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."` NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"` @@ -607,27 +606,6 @@ func loadConfig() (*Config, []string, error) { return nil, nil, err } - // Check mining addresses are valid and saved parsed versions. - activeConfig.MiningAddrs = make([]util.Address, 0, len(activeConfig.Flags.MiningAddrs)) - for _, strAddr := range activeConfig.Flags.MiningAddrs { - addr, err := util.DecodeAddress(strAddr, activeConfig.NetParams().Prefix) - if err != nil { - str := "%s: mining address '%s' failed to decode: %s" - err := errors.Errorf(str, funcName, strAddr, err) - fmt.Fprintln(os.Stderr, err) - fmt.Fprintln(os.Stderr, usageMessage) - return nil, nil, err - } - if !addr.IsForPrefix(activeConfig.NetParams().Prefix) { - str := "%s: mining address '%s' is on the wrong network" - err := errors.Errorf(str, funcName, strAddr) - fmt.Fprintln(os.Stderr, err) - fmt.Fprintln(os.Stderr, usageMessage) - return nil, nil, err - } - activeConfig.MiningAddrs = append(activeConfig.MiningAddrs, addr) - } - // Add default port to all listener addresses if needed and remove // duplicate addresses. activeConfig.Listeners, err = network.NormalizeAddresses(activeConfig.Listeners, diff --git a/mining/mining.go b/mining/mining.go index e193f261c..cc8aee705 100644 --- a/mining/mining.go +++ b/mining/mining.go @@ -79,12 +79,6 @@ type BlockTemplate struct { // Height is the height at which the block template connects to the DAG Height uint64 - - // ValidPayAddress indicates whether or not the template coinbase pays - // to an address or is redeemable by anyone. See the documentation on - // NewBlockTemplate for details on which this can be useful to generate - // templates without a coinbase payment address. - ValidPayAddress bool } // BlkTmplGenerator provides a type that can be used to generate block templates @@ -212,10 +206,9 @@ func (g *BlkTmplGenerator) NewBlockTemplate(payToAddress util.Address, extraNonc txsForBlockTemplate.totalMass, util.CompactToBig(msgBlock.Header.Bits)) return &BlockTemplate{ - Block: msgBlock, - TxMasses: txsForBlockTemplate.txMasses, - Fees: txsForBlockTemplate.txFees, - ValidPayAddress: payToAddress != nil, + Block: msgBlock, + TxMasses: txsForBlockTemplate.txMasses, + Fees: txsForBlockTemplate.txFees, }, nil } diff --git a/rpcclient/mining.go b/rpcclient/mining.go index f39ba9ef7..a00cd46af 100644 --- a/rpcclient/mining.go +++ b/rpcclient/mining.go @@ -71,11 +71,11 @@ type FutureGetBlockTemplateResult chan *response // the returned instance. // // See GetBlockTemplate for the blocking version and more details -func (c *Client) GetBlockTemplateAsync(capabilities []string, longPollID string) FutureGetBlockTemplateResult { +func (c *Client) GetBlockTemplateAsync(payAddress string, longPollID string) FutureGetBlockTemplateResult { request := &rpcmodel.TemplateRequest{ - Mode: "template", - Capabilities: capabilities, - LongPollID: longPollID, + Mode: "template", + LongPollID: longPollID, + PayAddress: payAddress, } cmd := rpcmodel.NewGetBlockTemplateCmd(request) return c.sendCmd(cmd) @@ -97,6 +97,6 @@ func (r FutureGetBlockTemplateResult) Receive() (*rpcmodel.GetBlockTemplateResul } // GetBlockTemplate request a block template from the server, to mine upon -func (c *Client) GetBlockTemplate(capabilities []string, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) { - return c.GetBlockTemplateAsync(capabilities, longPollID).Receive() +func (c *Client) GetBlockTemplate(payAddress string, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) { + return c.GetBlockTemplateAsync(payAddress, longPollID).Receive() } diff --git a/rpcmodel/rpc_commands.go b/rpcmodel/rpc_commands.go index f6dbbecb2..f9003fbcb 100644 --- a/rpcmodel/rpc_commands.go +++ b/rpcmodel/rpc_commands.go @@ -206,8 +206,7 @@ func NewGetBlockHeaderCmd(hash string, verbose *bool) *GetBlockHeaderCmd { // TemplateRequest is a request object as defined in BIP22. It is optionally // provided as an pointer argument to GetBlockTemplateCmd. type TemplateRequest struct { - Mode string `json:"mode,omitempty"` - Capabilities []string `json:"capabilities,omitempty"` + Mode string `json:"mode,omitempty"` // Optional long polling. LongPollID string `json:"longPollId,omitempty"` @@ -225,6 +224,8 @@ type TemplateRequest struct { // "proposal". Data string `json:"data,omitempty"` WorkID string `json:"workId,omitempty"` + + PayAddress string `json:"payAddress"` } // convertTemplateRequestField potentially converts the provided value as diff --git a/rpcmodel/rpc_commands_test.go b/rpcmodel/rpc_commands_test.go index 2283836cc..de6694923 100644 --- a/rpcmodel/rpc_commands_test.go +++ b/rpcmodel/rpc_commands_test.go @@ -256,72 +256,72 @@ func TestRPCServerCommands(t *testing.T) { { name: "getBlockTemplate optional - template request", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlockTemplate", `{"mode":"template","capabilities":["longpoll","coinbasetxn"]}`) + return rpcmodel.NewCommand("getBlockTemplate", `{"mode":"template","payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}`) }, staticCmd: func() interface{} { template := rpcmodel.TemplateRequest{ - Mode: "template", - Capabilities: []string{"longpoll", "coinbasetxn"}, + Mode: "template", + PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", } return rpcmodel.NewGetBlockTemplateCmd(&template) }, - marshalled: `{"jsonrpc":"1.0","method":"getBlockTemplate","params":[{"mode":"template","capabilities":["longpoll","coinbasetxn"]}],"id":1}`, + marshalled: `{"jsonrpc":"1.0","method":"getBlockTemplate","params":[{"mode":"template","payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}],"id":1}`, unmarshalled: &rpcmodel.GetBlockTemplateCmd{ Request: &rpcmodel.TemplateRequest{ - Mode: "template", - Capabilities: []string{"longpoll", "coinbasetxn"}, + Mode: "template", + PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", }, }, }, { name: "getBlockTemplate optional - template request with tweaks", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlockTemplate", `{"mode":"template","capabilities":["longPoll","coinbaseTxn"],"sigOpLimit":500,"massLimit":100000000,"maxVersion":1}`) + return rpcmodel.NewCommand("getBlockTemplate", `{"mode":"template","sigOpLimit":500,"massLimit":100000000,"maxVersion":1,"payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}`) }, staticCmd: func() interface{} { template := rpcmodel.TemplateRequest{ - Mode: "template", - Capabilities: []string{"longPoll", "coinbaseTxn"}, - SigOpLimit: 500, - MassLimit: 100000000, - MaxVersion: 1, + Mode: "template", + PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", + SigOpLimit: 500, + MassLimit: 100000000, + MaxVersion: 1, } return rpcmodel.NewGetBlockTemplateCmd(&template) }, - marshalled: `{"jsonrpc":"1.0","method":"getBlockTemplate","params":[{"mode":"template","capabilities":["longPoll","coinbaseTxn"],"sigOpLimit":500,"massLimit":100000000,"maxVersion":1}],"id":1}`, + marshalled: `{"jsonrpc":"1.0","method":"getBlockTemplate","params":[{"mode":"template","sigOpLimit":500,"massLimit":100000000,"maxVersion":1,"payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}],"id":1}`, unmarshalled: &rpcmodel.GetBlockTemplateCmd{ Request: &rpcmodel.TemplateRequest{ - Mode: "template", - Capabilities: []string{"longPoll", "coinbaseTxn"}, - SigOpLimit: int64(500), - MassLimit: int64(100000000), - MaxVersion: 1, + Mode: "template", + PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", + SigOpLimit: int64(500), + MassLimit: int64(100000000), + MaxVersion: 1, }, }, }, { name: "getBlockTemplate optional - template request with tweaks 2", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getBlockTemplate", `{"mode":"template","capabilities":["longPoll","coinbaseTxn"],"sigOpLimit":true,"massLimit":100000000,"maxVersion":1}`) + return rpcmodel.NewCommand("getBlockTemplate", `{"mode":"template","payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3","sigOpLimit":true,"massLimit":100000000,"maxVersion":1}`) }, staticCmd: func() interface{} { template := rpcmodel.TemplateRequest{ - Mode: "template", - Capabilities: []string{"longPoll", "coinbaseTxn"}, - SigOpLimit: true, - MassLimit: 100000000, - MaxVersion: 1, + Mode: "template", + PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", + SigOpLimit: true, + MassLimit: 100000000, + MaxVersion: 1, } return rpcmodel.NewGetBlockTemplateCmd(&template) }, - marshalled: `{"jsonrpc":"1.0","method":"getBlockTemplate","params":[{"mode":"template","capabilities":["longPoll","coinbaseTxn"],"sigOpLimit":true,"massLimit":100000000,"maxVersion":1}],"id":1}`, + marshalled: `{"jsonrpc":"1.0","method":"getBlockTemplate","params":[{"mode":"template","sigOpLimit":true,"massLimit":100000000,"maxVersion":1,"payAddress":"kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3"}],"id":1}`, unmarshalled: &rpcmodel.GetBlockTemplateCmd{ Request: &rpcmodel.TemplateRequest{ - Mode: "template", - Capabilities: []string{"longPoll", "coinbaseTxn"}, - SigOpLimit: true, - MassLimit: int64(100000000), - MaxVersion: 1, + Mode: "template", + PayAddress: "kaspa:qph364lxa0ul5h0jrvl3u7xu8erc7mu3dv7prcn7x3", + SigOpLimit: true, + MassLimit: int64(100000000), + MaxVersion: 1, }, }, }, diff --git a/rpcmodel/rpc_results.go b/rpcmodel/rpc_results.go index 9b3da13b6..5b3c98d79 100644 --- a/rpcmodel/rpc_results.go +++ b/rpcmodel/rpc_results.go @@ -127,12 +127,6 @@ type GetBlockTemplateResultTx struct { Fee uint64 `json:"fee"` } -// GetBlockTemplateResultAux models the coinbaseaux field of the -// getblocktemplate command. -type GetBlockTemplateResultAux struct { - Flags string `json:"flags"` -} - // GetBlockTemplateResult models the data returned from the getblocktemplate // command. type GetBlockTemplateResult struct { @@ -147,9 +141,6 @@ type GetBlockTemplateResult struct { AcceptedIDMerkleRoot string `json:"acceptedIdMerkleRoot"` UTXOCommitment string `json:"utxoCommitment"` Version int32 `json:"version"` - CoinbaseAux *GetBlockTemplateResultAux `json:"coinbaseAux,omitempty"` - CoinbaseTxn *GetBlockTemplateResultTx `json:"coinbaseTxn,omitempty"` - CoinbaseValue *uint64 `json:"coinbaseValue,omitempty"` WorkID string `json:"workId,omitempty"` IsSynced bool `json:"isSynced"` diff --git a/server/rpc/handle_get_block_template.go b/server/rpc/handle_get_block_template.go index a890ae730..e6a538bd8 100644 --- a/server/rpc/handle_get_block_template.go +++ b/server/rpc/handle_get_block_template.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/hex" "fmt" - "math/rand" "strconv" "strings" "sync" @@ -44,16 +43,6 @@ var ( "time", "transactions/add", "parentblock", "coinbase/append", } - // gbtCoinbaseAux describes additional data that miners should include - // in the coinbase signature script. It is declared here to avoid the - // overhead of creating a new object on every invocation for constant - // data. - gbtCoinbaseAux = &rpcmodel.GetBlockTemplateResultAux{ - Flags: hex.EncodeToString(builderScript(txscript. - NewScriptBuilder(). - AddData([]byte(mining.CoinbaseFlags)))), - } - // gbtCapabilities describes additional capabilities returned with a // block template generated by the getBlockTemplate RPC. It is // declared here to avoid the overhead of creating the slice on every @@ -72,6 +61,7 @@ type gbtWorkState struct { template *mining.BlockTemplate notifyMap map[string]map[int64]chan struct{} timeSource blockdag.TimeSource + payAddress util.Address } // newGbtWorkState returns a new instance of a gbtWorkState with all internal @@ -122,42 +112,8 @@ func handleGetBlockTemplate(s *Server, cmd interface{}, closeChan <-chan struct{ // handleGetBlockTemplateRequest is a helper for handleGetBlockTemplate which // deals with generating and returning block templates to the caller. It // handles both long poll requests as specified by BIP 0022 as well as regular -// requests. In addition, it detects the capabilities reported by the caller -// in regards to whether or not it supports creating its own coinbase (the -// coinbasetxn and coinbasevalue capabilities) and modifies the returned block -// template accordingly. +// requests. func handleGetBlockTemplateRequest(s *Server, request *rpcmodel.TemplateRequest, closeChan <-chan struct{}) (interface{}, error) { - // Extract the relevant passed capabilities and restrict the result to - // either a coinbase value or a coinbase transaction object depending on - // the request. Default to only providing a coinbase value. - useCoinbaseValue := true - if request != nil { - var hasCoinbaseValue, hasCoinbaseTxn bool - for _, capability := range request.Capabilities { - switch capability { - case "coinbasetxn": - hasCoinbaseTxn = true - case "coinbasevalue": - hasCoinbaseValue = true - } - } - - if hasCoinbaseTxn && !hasCoinbaseValue { - useCoinbaseValue = false - } - } - - // When a coinbase transaction has been requested, respond with an error - // if there are no addresses to pay the created block template to. - if !useCoinbaseValue && len(config.ActiveConfig().MiningAddrs) == 0 { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInternal.Code, - Message: "A coinbase transaction has been requested, " + - "but the server has not been configured with " + - "any payment addresses via --miningaddr", - } - } - // Return an error if there are no peers connected since there is no // way to relay a found block or receive transactions to work on. // However, allow this state when running in the regression test or @@ -171,12 +127,16 @@ func handleGetBlockTemplateRequest(s *Server, request *rpcmodel.TemplateRequest, } } + payAddr, err := util.DecodeAddress(request.PayAddress, s.cfg.DAGParams.Prefix) + if err != nil { + return nil, err + } + // When a long poll ID was provided, this is a long poll request by the // client to be notified when block template referenced by the ID should // be replaced with a new one. if request != nil && request.LongPollID != "" { - return handleGetBlockTemplateLongPoll(s, request.LongPollID, - useCoinbaseValue, closeChan) + return handleGetBlockTemplateLongPoll(s, request.LongPollID, payAddr, closeChan) } // Protect concurrent access when updating block templates. @@ -190,10 +150,10 @@ func handleGetBlockTemplateRequest(s *Server, request *rpcmodel.TemplateRequest, // seconds since the last template was generated. Otherwise, the // timestamp for the existing block template is updated (and possibly // the difficulty on testnet per the consesus rules). - if err := state.updateBlockTemplate(s, useCoinbaseValue); err != nil { + if err := state.updateBlockTemplate(s, payAddr); err != nil { return nil, err } - return state.blockTemplateResult(s, useCoinbaseValue) + return state.blockTemplateResult(s) } // handleGetBlockTemplateLongPoll is a helper for handleGetBlockTemplateRequest @@ -204,10 +164,10 @@ func handleGetBlockTemplateRequest(s *Server, request *rpcmodel.TemplateRequest, // old block template is no longer valid due to a solution already being found // and added to the block DAG, or new transactions have shown up and some time // has passed without finding a solution. -func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseValue bool, closeChan <-chan struct{}) (interface{}, error) { +func handleGetBlockTemplateLongPoll(s *Server, longPollID string, payAddr util.Address, closeChan <-chan struct{}) (interface{}, error) { state := s.gbtWorkState - result, longPollChan, err := blockTemplateOrLongPollChan(s, longPollID, useCoinbaseValue) + result, longPollChan, err := blockTemplateOrLongPollChan(s, longPollID, payAddr) if err != nil { return nil, err } @@ -231,14 +191,14 @@ func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseVal state.Lock() defer state.Unlock() - if err := state.updateBlockTemplate(s, useCoinbaseValue); err != nil { + if err := state.updateBlockTemplate(s, payAddr); err != nil { return nil, err } // Include whether or not it is valid to submit work against the old // block template depending on whether or not a solution has already // been found and added to the block DAG. - result, err = state.blockTemplateResult(s, useCoinbaseValue) + result, err = state.blockTemplateResult(s) if err != nil { return nil, err } @@ -250,7 +210,7 @@ func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseVal // template identified by the provided long poll ID is stale or // invalid. Otherwise, it returns a channel that will notify // when there's a more current template. -func blockTemplateOrLongPollChan(s *Server, longPollID string, useCoinbaseValue bool) (*rpcmodel.GetBlockTemplateResult, chan struct{}, error) { +func blockTemplateOrLongPollChan(s *Server, longPollID string, payAddr util.Address) (*rpcmodel.GetBlockTemplateResult, chan struct{}, error) { state := s.gbtWorkState state.Lock() @@ -259,7 +219,7 @@ func blockTemplateOrLongPollChan(s *Server, longPollID string, useCoinbaseValue // be manually unlocked before waiting for a notification about block // template changes. - if err := state.updateBlockTemplate(s, useCoinbaseValue); err != nil { + if err := state.updateBlockTemplate(s, payAddr); err != nil { return nil, nil, err } @@ -267,7 +227,7 @@ func blockTemplateOrLongPollChan(s *Server, longPollID string, useCoinbaseValue // the caller is invalid. parentHashes, lastGenerated, err := decodeLongPollID(longPollID) if err != nil { - result, err := state.blockTemplateResult(s, useCoinbaseValue) + result, err := state.blockTemplateResult(s) if err != nil { return nil, nil, err } @@ -285,7 +245,7 @@ func blockTemplateOrLongPollChan(s *Server, longPollID string, useCoinbaseValue // Include whether or not it is valid to submit work against the // old block template depending on whether or not a solution has // already been found and added to the block DAG. - result, err := state.blockTemplateResult(s, useCoinbaseValue) + result, err := state.blockTemplateResult(s) if err != nil { return nil, nil, err } @@ -566,7 +526,7 @@ func (state *gbtWorkState) templateUpdateChan(tipHashes []*daghash.Hash, lastGen // addresses. // // This function MUST be called with the state locked. -func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) error { +func (state *gbtWorkState) updateBlockTemplate(s *Server, payAddr util.Address) error { generator := s.cfg.Generator lastTxUpdate := generator.TxSource().LastUpdated() if lastTxUpdate.IsZero() { @@ -583,6 +543,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) template := state.template if template == nil || state.tipHashes == nil || !daghash.AreEqual(state.tipHashes, tipHashes) || + state.payAddress.String() != payAddr.String() || (state.lastTxUpdate != lastTxUpdate && time.Now().After(state.lastGenerated.Add(time.Second* gbtRegenerateSeconds))) { @@ -592,14 +553,6 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) // again. state.tipHashes = nil - // Choose a payment address at random if the caller requests a - // full coinbase as opposed to only the pertinent details needed - // to create their own coinbase. - var payAddr util.Address - if !useCoinbaseValue { - payAddr = config.ActiveConfig().MiningAddrs[rand.Intn(len(config.ActiveConfig().MiningAddrs))] - } - // Create a new block template that has a coinbase which anyone // can redeem. This is only acceptable because the returned // block template doesn't include the coinbase, so the caller @@ -634,6 +587,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) state.lastTxUpdate = lastTxUpdate state.tipHashes = tipHashes state.minTimestamp = minTimestamp + state.payAddress = payAddr log.Debugf("Generated block template (timestamp %s, "+ "target %s, merkle root %s)", @@ -650,32 +604,6 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) // trigger a new block template to be generated. So, update the // existing block template. - // When the caller requires a full coinbase as opposed to only - // the pertinent details needed to create their own coinbase, - // add a payment address to the output of the coinbase of the - // template if it doesn't already have one. Since this requires - // mining addresses to be specified via the config, an error is - // returned if none have been specified. - if !useCoinbaseValue && !template.ValidPayAddress { - // Choose a payment address at random. - payToAddr := config.ActiveConfig().MiningAddrs[rand.Intn(len(config.ActiveConfig().MiningAddrs))] - - // Update the block coinbase output of the template to - // pay to the randomly selected payment address. - scriptPubKey, err := txscript.PayToAddrScript(payToAddr) - if err != nil { - context := "Failed to create pay-to-addr script" - return internalRPCError(err.Error(), context) - } - template.Block.Transactions[util.CoinbaseTransactionIndex].TxOut[0].ScriptPubKey = scriptPubKey - template.ValidPayAddress = true - - // Update the merkle root. - block := util.NewBlock(template.Block) - hashMerkleTree := blockdag.BuildHashMerkleTreeStore(block.Transactions()) - template.Block.Header.HashMerkleRoot = hashMerkleTree.Root() - } - // Set locals for convenience. msgBlock = template.Block targetDifficulty = fmt.Sprintf("%064x", @@ -700,7 +628,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool) // and returned to the caller. // // This function MUST be called with the state locked. -func (state *gbtWorkState) blockTemplateResult(s *Server, useCoinbaseValue bool) (*rpcmodel.GetBlockTemplateResult, error) { +func (state *gbtWorkState) blockTemplateResult(s *Server) (*rpcmodel.GetBlockTemplateResult, error) { dag := s.cfg.DAG // Ensure the timestamps are still in valid range for the template. // This should really only ever happen if the local clock is changed @@ -731,11 +659,6 @@ func (state *gbtWorkState) blockTemplateResult(s *Server, useCoinbaseValue bool) txID := tx.TxID() txIndex[*txID] = int64(i) - // Skip the coinbase transaction. - if i == 0 { - continue - } - // Create an array of 1-based indices to transactions that come // before this one in the transactions list which this one // depends on. This is necessary since the created block must @@ -775,7 +698,7 @@ func (state *gbtWorkState) blockTemplateResult(s *Server, useCoinbaseValue bool) // Including MinTime -> time/decrement // Omitting CoinbaseTxn -> coinbase, generation targetDifficulty := fmt.Sprintf("%064x", util.CompactToBig(header.Bits)) - longPollID := encodeLongPollID(state.tipHashes, state.lastGenerated) + longPollID := encodeLongPollID(state.tipHashes, state.payAddress, state.lastGenerated) // Check whether this node is synced with the rest of of the // network. There's almost never a good reason to mine on top @@ -806,48 +729,13 @@ func (state *gbtWorkState) blockTemplateResult(s *Server, useCoinbaseValue bool) IsSynced: isSynced, } - if useCoinbaseValue { - reply.CoinbaseAux = gbtCoinbaseAux - reply.CoinbaseValue = &msgBlock.Transactions[util.CoinbaseTransactionIndex].TxOut[0].Value - } else { - // Ensure the template has a valid payment address associated - // with it when a full coinbase is requested. - if !template.ValidPayAddress { - return nil, &rpcmodel.RPCError{ - Code: rpcmodel.ErrRPCInternal.Code, - Message: "A coinbase transaction has been " + - "requested, but the server has not " + - "been configured with any payment " + - "addresses via --miningaddr", - } - } - - // Serialize the transaction for conversion to hex. - tx := msgBlock.Transactions[util.CoinbaseTransactionIndex] - txBuf := bytes.NewBuffer(make([]byte, 0, tx.SerializeSize())) - if err := tx.Serialize(txBuf); err != nil { - context := "Failed to serialize transaction" - return nil, internalRPCError(err.Error(), context) - } - - resultTx := rpcmodel.GetBlockTemplateResultTx{ - Data: hex.EncodeToString(txBuf.Bytes()), - ID: tx.TxID().String(), - Depends: []int64{}, - Mass: template.TxMasses[0], - Fee: template.Fees[0], - } - - reply.CoinbaseTxn = &resultTx - } - return &reply, nil } // encodeLongPollID encodes the passed details into an ID that can be used to // uniquely identify a block template. -func encodeLongPollID(parentHashes []*daghash.Hash, lastGenerated time.Time) string { - return fmt.Sprintf("%s-%d", daghash.JoinHashesStrings(parentHashes, ""), lastGenerated.Unix()) +func encodeLongPollID(parentHashes []*daghash.Hash, miningAddress util.Address, lastGenerated time.Time) string { + return fmt.Sprintf("%s-%s-%d", daghash.JoinHashesStrings(parentHashes, ""), miningAddress, lastGenerated.Unix()) } // decodeLongPollID decodes an ID that is used to uniquely identify a block diff --git a/server/rpc/rpcserverhelp.go b/server/rpc/rpcserverhelp.go index 2514d0204..eb31d888d 100644 --- a/server/rpc/rpcserverhelp.go +++ b/server/rpc/rpcserverhelp.go @@ -283,15 +283,15 @@ var helpDescsEnUS = map[string]string{ "getBlockHeaderVerboseResult-childHashes": "The hashes of the child blocks (only if there are any)", // TemplateRequest help. - "templateRequest-mode": "This is 'template', 'proposal', or omitted", - "templateRequest-capabilities": "List of capabilities", - "templateRequest-longPollId": "The long poll ID of a job to monitor for expiration; required and valid only for long poll requests ", - "templateRequest-sigOpLimit": "Number of signature operations allowed in blocks (this parameter is ignored)", - "templateRequest-massLimit": "Max transaction mass allowed in blocks (this parameter is ignored)", - "templateRequest-maxVersion": "Highest supported block version number (this parameter is ignored)", - "templateRequest-target": "The desired target for the block template (this parameter is ignored)", - "templateRequest-data": "Hex-encoded block data (only for mode=proposal)", - "templateRequest-workId": "The server provided workid if provided in block template (not applicable)", + "templateRequest-mode": "This is 'template', 'proposal', or omitted", + "templateRequest-payAddress": "The address the coinbase pays to", + "templateRequest-longPollId": "The long poll ID of a job to monitor for expiration; required and valid only for long poll requests ", + "templateRequest-sigOpLimit": "Number of signature operations allowed in blocks (this parameter is ignored)", + "templateRequest-massLimit": "Max transaction mass allowed in blocks (this parameter is ignored)", + "templateRequest-maxVersion": "Highest supported block version number (this parameter is ignored)", + "templateRequest-target": "The desired target for the block template (this parameter is ignored)", + "templateRequest-data": "Hex-encoded block data (only for mode=proposal)", + "templateRequest-workId": "The server provided workid if provided in block template (not applicable)", // GetBlockTemplateResultTx help. "getBlockTemplateResultTx-data": "Hex-encoded transaction data (byte-for-byte)", From bc0227b49b9dca3e3d660251488c71e429c784f6 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Tue, 16 Jun 2020 16:51:38 +0300 Subject: [PATCH 64/77] [NOD-1059] Always call sm.restartSyncIfNeeded() when getting selectedTip message (#766) --- netsync/manager.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index 55481a888..e7477fda0 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -940,9 +940,6 @@ func (sm *SyncManager) handleSelectedTipMsg(msg *selectedTipMsg) { return } state.peerShouldSendSelectedTip = false - if selectedTipHash.IsEqual(peer.SelectedTipHash()) { - return - } peer.SetSelectedTipHash(selectedTipHash) sm.restartSyncIfNeeded() } From 1271d2f113fc12b34a156fd4cd01e689839db3e5 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Wed, 17 Jun 2020 11:29:39 +0300 Subject: [PATCH 65/77] [NOD-1038] Give higher priority for requesting missing ancestors when sending a getdata message (#767) --- netsync/manager.go | 50 +++++++++++++++++++++++++++------------ peer/banscores.go | 1 + peer/message_logging.go | 2 ++ peer/peer.go | 2 +- server/p2p/on_get_data.go | 2 ++ wire/invvect.go | 22 +++++++++-------- 6 files changed, 53 insertions(+), 26 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index e7477fda0..fee6643ed 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -298,7 +298,7 @@ func (sm *SyncManager) handleNewPeerMsg(peer *peerpkg.Peer) { // Initialize the peer state isSyncCandidate := sm.isSyncCandidate(peer) requestQueues := make(map[wire.InvType]*requestQueueAndSet) - requestQueueInvTypes := []wire.InvType{wire.InvTypeTx, wire.InvTypeBlock, wire.InvTypeSyncBlock} + requestQueueInvTypes := []wire.InvType{wire.InvTypeTx, wire.InvTypeBlock, wire.InvTypeSyncBlock, wire.InvTypeMissingAncestor} for _, invType := range requestQueueInvTypes { requestQueues[invType] = &requestQueueAndSet{ set: make(map[daghash.Hash]struct{}), @@ -351,8 +351,6 @@ func (sm *SyncManager) handleDonePeerMsg(peer *peerpkg.Peer) { } func (sm *SyncManager) stopSyncFromPeer(peer *peerpkg.Peer) { - // Attempt to find a new peer to sync from if the quitting peer is the - // sync peer. if sm.syncPeer == peer { sm.syncPeer = nil sm.restartSyncIfNeeded() @@ -558,7 +556,7 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { blockHash, err) return } - sm.addBlocksToRequestQueue(state, missingAncestors, false) + sm.addBlocksToRequestQueue(state, missingAncestors, wire.InvTypeMissingAncestor) } else { // When the block is not an orphan, log information about it and // update the DAG state. @@ -584,15 +582,11 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { } } -func (sm *SyncManager) addBlocksToRequestQueue(state *peerSyncState, hashes []*daghash.Hash, isRelayedInv bool) { +func (sm *SyncManager) addBlocksToRequestQueue(state *peerSyncState, hashes []*daghash.Hash, invType wire.InvType) { state.requestQueueMtx.Lock() defer state.requestQueueMtx.Unlock() for _, hash := range hashes { if _, exists := sm.requestedBlocks[*hash]; !exists { - invType := wire.InvTypeSyncBlock - if isRelayedInv { - invType = wire.InvTypeBlock - } iv := wire.NewInvVect(invType, hash) state.addInvToRequestQueueNoLock(iv) } @@ -604,10 +598,13 @@ func (state *peerSyncState) addInvToRequestQueueNoLock(iv *wire.InvVect) { if !ok { panic(errors.Errorf("got unsupported inventory type %s", iv.Type)) } - if _, exists := requestQueue.set[*iv.Hash]; !exists { - requestQueue.set[*iv.Hash] = struct{}{} - requestQueue.queue = append(requestQueue.queue, iv) + + if _, exists := requestQueue.set[*iv.Hash]; exists { + return } + + requestQueue.set[*iv.Hash] = struct{}{} + requestQueue.queue = append(requestQueue.queue, iv) } func (state *peerSyncState) addInvToRequestQueue(iv *wire.InvVect) { @@ -623,6 +620,8 @@ func (state *peerSyncState) addInvToRequestQueue(iv *wire.InvVect) { // (either the main pool or orphan pool). func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) { switch invVect.Type { + case wire.InvTypeMissingAncestor: + fallthrough case wire.InvTypeSyncBlock: fallthrough case wire.InvTypeBlock: @@ -698,6 +697,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { case wire.InvTypeSyncBlock: case wire.InvTypeTx: default: + log.Warnf("got unsupported inv type %s from %s", iv.Type, peer) continue } @@ -737,7 +737,8 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { if iv.IsBlockOrSyncBlock() { if sm.dag.IsKnownInvalid(iv.Hash) { - peer.AddBanScoreAndPushRejectMsg(imsg.inv.Command(), wire.RejectInvalid, iv.Hash, peerpkg.BanScoreInvalidInvBlock, 0, fmt.Sprintf("sent inv of invalid block %s", iv.Hash)) + peer.AddBanScoreAndPushRejectMsg(imsg.inv.Command(), wire.RejectInvalid, iv.Hash, + peerpkg.BanScoreInvalidInvBlock, 0, fmt.Sprintf("sent inv of invalid block %s", iv.Hash)) return } // The block is an orphan block that we already have. @@ -751,13 +752,22 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { // to signal there are more missing blocks that need to // be requested. if sm.dag.IsKnownOrphan(iv.Hash) { + if iv.Type == wire.InvTypeSyncBlock { + peer.AddBanScoreAndPushRejectMsg(imsg.inv.Command(), wire.RejectInvalid, iv.Hash, + peerpkg.BanScoreOrphanInvAsPartOfNetsync, 0, + fmt.Sprintf("sent inv of orphan block %s as part of netsync", iv.Hash)) + // Whether the peer will be banned or not, syncing from a node that doesn't follow + // the netsync protocol is undesired. + sm.stopSyncFromPeer(peer) + return + } missingAncestors, err := sm.dag.GetOrphanMissingAncestorHashes(iv.Hash) if err != nil { log.Errorf("Failed to find missing ancestors for block %s: %s", iv.Hash, err) return } - sm.addBlocksToRequestQueue(state, missingAncestors, iv.Type != wire.InvTypeSyncBlock) + sm.addBlocksToRequestQueue(state, missingAncestors, wire.InvTypeMissingAncestor) continue } @@ -831,6 +841,8 @@ func (sm *SyncManager) addInvsToGetDataMessageFromQueue(gdmsg *wire.MsgGetData, for _, iv := range invsToAdd { delete(requestQueue.set, *iv.Hash) switch invType { + case wire.InvTypeMissingAncestor: + addBlockInv(iv) case wire.InvTypeSyncBlock: addBlockInv(iv) case wire.InvTypeBlock: @@ -859,13 +871,21 @@ func (sm *SyncManager) addInvsToGetDataMessageFromQueue(gdmsg *wire.MsgGetData, func (sm *SyncManager) sendInvsFromRequestQueue(peer *peerpkg.Peer, state *peerSyncState) error { state.requestQueueMtx.Lock() defer state.requestQueueMtx.Unlock() + if len(sm.requestedBlocks) != 0 { + return nil + } gdmsg := wire.NewMsgGetData() err := sm.addInvsToGetDataMessageFromQueue(gdmsg, state, wire.InvTypeSyncBlock, wire.MaxSyncBlockInvPerGetDataMsg) if err != nil { return err } if !sm.isSyncing || sm.isSynced() { - err := sm.addInvsToGetDataMessageFromQueue(gdmsg, state, wire.InvTypeBlock, wire.MaxInvPerGetDataMsg) + err := sm.addInvsToGetDataMessageFromQueue(gdmsg, state, wire.InvTypeMissingAncestor, wire.MaxInvPerGetDataMsg) + if err != nil { + return err + } + + err = sm.addInvsToGetDataMessageFromQueue(gdmsg, state, wire.InvTypeBlock, wire.MaxInvPerGetDataMsg) if err != nil { return err } diff --git a/peer/banscores.go b/peer/banscores.go index f4529c19a..9e831afb2 100644 --- a/peer/banscores.go +++ b/peer/banscores.go @@ -5,6 +5,7 @@ const ( BanScoreUnrequestedBlock = 100 BanScoreInvalidBlock = 100 BanScoreInvalidInvBlock = 100 + BanScoreOrphanInvAsPartOfNetsync = 100 BanScoreMalformedBlueScoreInOrphan = 100 BanScoreUnrequestedSelectedTip = 20 diff --git a/peer/message_logging.go b/peer/message_logging.go index 3be61a78e..d6c368683 100644 --- a/peer/message_logging.go +++ b/peer/message_logging.go @@ -46,6 +46,8 @@ func invSummary(invList []*wire.InvVect) string { return fmt.Sprintf("block %s", iv.Hash) case wire.InvTypeSyncBlock: return fmt.Sprintf("sync block %s", iv.Hash) + case wire.InvTypeMissingAncestor: + return fmt.Sprintf("missing ancestor %s", iv.Hash) case wire.InvTypeTx: return fmt.Sprintf("tx %s", iv.Hash) } diff --git a/peer/peer.go b/peer/peer.go index 90c8a34ec..43d31c9f8 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -1558,7 +1558,7 @@ out: // No handshake? They'll find out soon enough. if p.VersionKnown() { // If this is a new block, then we'll blast it - // out immediately, sipping the inv trickle + // out immediately, skipping the inv trickle // queue. if iv.Type == wire.InvTypeBlock { invMsg := wire.NewMsgInvSizeHint(1) diff --git a/server/p2p/on_get_data.go b/server/p2p/on_get_data.go index fbe3820c7..24e37c7b2 100644 --- a/server/p2p/on_get_data.go +++ b/server/p2p/on_get_data.go @@ -44,6 +44,8 @@ func (sp *Peer) OnGetData(_ *peer.Peer, msg *wire.MsgGetData) { err = sp.server.pushTxMsg(sp, (*daghash.TxID)(iv.Hash), c, waitChan) case wire.InvTypeSyncBlock: fallthrough + case wire.InvTypeMissingAncestor: + fallthrough case wire.InvTypeBlock: err = sp.server.pushBlockMsg(sp, iv.Hash, c, waitChan) case wire.InvTypeFilteredBlock: diff --git a/wire/invvect.go b/wire/invvect.go index 75c914354..3b8d0063a 100644 --- a/wire/invvect.go +++ b/wire/invvect.go @@ -33,20 +33,22 @@ type InvType uint32 // These constants define the various supported inventory vector types. const ( - InvTypeError InvType = 0 - InvTypeTx InvType = 1 - InvTypeBlock InvType = 2 - InvTypeFilteredBlock InvType = 3 - InvTypeSyncBlock InvType = 4 + InvTypeError InvType = 0 + InvTypeTx InvType = 1 + InvTypeBlock InvType = 2 + InvTypeFilteredBlock InvType = 3 + InvTypeSyncBlock InvType = 4 + InvTypeMissingAncestor InvType = 5 ) // Map of service flags back to their constant names for pretty printing. var ivStrings = map[InvType]string{ - InvTypeError: "ERROR", - InvTypeTx: "MSG_TX", - InvTypeBlock: "MSG_BLOCK", - InvTypeFilteredBlock: "MSG_FILTERED_BLOCK", - InvTypeSyncBlock: "MSG_SYNC_BLOCK", + InvTypeError: "ERROR", + InvTypeTx: "MSG_TX", + InvTypeBlock: "MSG_BLOCK", + InvTypeFilteredBlock: "MSG_FILTERED_BLOCK", + InvTypeSyncBlock: "MSG_SYNC_BLOCK", + InvTypeMissingAncestor: "MSG_MISSING_ANCESTOR", } // String returns the InvType in human-readable form. From 1358911d95ad322b79df12a790dbda00319593b3 Mon Sep 17 00:00:00 2001 From: Svarog Date: Wed, 17 Jun 2020 14:18:00 +0300 Subject: [PATCH 66/77] [NOD-1064] Don't send GetBlockInvsMsg with lowHash = nil (#769) --- peer/peer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/peer/peer_test.go b/peer/peer_test.go index a28e178ce..82bfd3fe3 100644 --- a/peer/peer_test.go +++ b/peer/peer_test.go @@ -589,7 +589,7 @@ func TestOutboundPeer(t *testing.T) { if _, err := p2.PushAddrMsg(addrs, nil); err != nil { t.Fatalf("PushAddrMsg: unexpected err %v\n", err) } - if err := p2.PushGetBlockInvsMsg(nil, &daghash.Hash{}); err != nil { + if err := p2.PushGetBlockInvsMsg(&daghash.Hash{}, &daghash.Hash{}); err != nil { t.Fatalf("PushGetBlockInvsMsg: unexpected err %v\n", err) } From 7bf8bb54369aec39bf2a5d998497625f668a7f05 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Thu, 18 Jun 2020 12:12:49 +0300 Subject: [PATCH 67/77] [NOD-1017] Move peers.json to db (#733) * [NOD-1017] Move peers.json to db * [NOD-1017] Fix tests * [NOD-1017] Change comments and rename variables * [NOD-1017] Separate to smaller functions * [NOD-1017] Renames * [NOD-1017] Name newAddrManagerForTest return params * [NOD-1017] Fix handling of non existing peersState * [NOD-1017] Add getPeersState rpc command * [NOD-1017] Fix comment * [NOD-1017] Split long line * [NOD-1017] Rename getPeersState->getPeerAddresses * [NOD-1017] Rename getPeerInfo->getConnectedPeerInfo --- addrmgr/addrmanager.go | 215 ++++++++++-------- addrmgr/addrmanager_test.go | 140 +++++++----- connmgr/connmanager_test.go | 19 +- dbaccess/peers.go | 26 +++ rpcclient/net.go | 28 +-- rpcmodel/rpc_commands.go | 21 +- rpcmodel/rpc_commands_test.go | 10 +- rpcmodel/rpc_results.go | 37 ++- server/p2p/on_addr.go | 2 +- server/p2p/on_get_addr.go | 2 +- server/p2p/on_version.go | 2 +- server/p2p/p2p.go | 29 +-- ...o.go => handle_get_connected_peer_info.go} | 8 +- server/rpc/handle_get_peer_addresses.go | 57 +++++ server/rpc/rpcserver.go | 8 +- server/rpc/rpcserverhelp.go | 75 ++++-- 16 files changed, 452 insertions(+), 227 deletions(-) create mode 100644 dbaccess/peers.go rename server/rpc/{handle_get_peer_info.go => handle_get_connected_peer_info.go} (79%) create mode 100644 server/rpc/handle_get_peer_addresses.go diff --git a/addrmgr/addrmanager.go b/addrmgr/addrmanager.go index 1f15cf79e..c5ed123b9 100644 --- a/addrmgr/addrmanager.go +++ b/addrmgr/addrmanager.go @@ -5,16 +5,16 @@ package addrmgr import ( + "bytes" "container/list" crand "crypto/rand" // for seeding "encoding/binary" - "encoding/json" + "encoding/gob" + "github.com/kaspanet/kaspad/dbaccess" "github.com/pkg/errors" "io" "math/rand" "net" - "os" - "path/filepath" "strconv" "sync" "sync/atomic" @@ -26,14 +26,13 @@ import ( "github.com/kaspanet/kaspad/wire" ) -type newBucket [newBucketCount]map[string]*KnownAddress -type triedBucket [triedBucketCount]*list.List +type newBucket [NewBucketCount]map[string]*KnownAddress +type triedBucket [TriedBucketCount]*list.List // AddrManager provides a concurrency safe address manager for caching potential // peers on the Kaspa network. type AddrManager struct { mtx sync.Mutex - peersFile string lookupFunc func(string) ([]net.IP, error) rand *rand.Rand key [32]byte @@ -66,10 +65,12 @@ type serializedKnownAddress struct { // no refcount or tried, that is available from context. } -type serializedNewBucket [newBucketCount][]string -type serializedTriedBucket [triedBucketCount][]string +type serializedNewBucket [NewBucketCount][]string +type serializedTriedBucket [TriedBucketCount][]string -type serializedAddrManager struct { +// PeersStateForSerialization is the data model that is used to +// serialize the peers state to any encoding. +type PeersStateForSerialization struct { Version int Key [32]byte Addresses []*serializedKnownAddress @@ -118,17 +119,17 @@ const ( // tried address bucket. triedBucketSize = 256 - // triedBucketCount is the number of buckets we split tried + // TriedBucketCount is the number of buckets we split tried // addresses over. - triedBucketCount = 64 + TriedBucketCount = 64 // newBucketSize is the maximum number of addresses in each new address // bucket. newBucketSize = 64 - // newBucketCount is the number of buckets that we spread new addresses + // NewBucketCount is the number of buckets that we spread new addresses // over. - newBucketCount = 1024 + NewBucketCount = 1024 // triedBucketsPerGroup is the number of tried buckets over which an // address group will be spread. @@ -171,8 +172,8 @@ const ( // will share with a call to AddressCache. getAddrPercent = 23 - // serialisationVersion is the current version of the on-disk format. - serialisationVersion = 1 + // serializationVersion is the current version of the on-disk format. + serializationVersion = 1 ) // updateAddress is a helper function to either update an address already known @@ -392,7 +393,7 @@ func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int { data2 = append(data2, hashbuf[:]...) hash2 := daghash.DoubleHashB(data2) - return int(binary.LittleEndian.Uint64(hash2) % newBucketCount) + return int(binary.LittleEndian.Uint64(hash2) % NewBucketCount) } func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int { @@ -411,7 +412,7 @@ func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int { data2 = append(data2, hashbuf[:]...) hash2 := daghash.DoubleHashB(data2) - return int(binary.LittleEndian.Uint64(hash2) % triedBucketCount) + return int(binary.LittleEndian.Uint64(hash2) % TriedBucketCount) } // addressHandler is the main handler for the address manager. It must be run @@ -423,30 +424,62 @@ out: for { select { case <-dumpAddressTicker.C: - a.savePeers() + err := a.savePeers() + if err != nil { + panic(errors.Wrap(err, "error saving peers")) + } case <-a.quit: break out } } - a.savePeers() + err := a.savePeers() + if err != nil { + panic(errors.Wrap(err, "error saving peers")) + } a.wg.Done() log.Trace("Address handler done") } -// savePeers saves all the known addresses to a file so they can be read back +// savePeers saves all the known addresses to the database so they can be read back // in at next run. -func (a *AddrManager) savePeers() { +func (a *AddrManager) savePeers() error { + serializedPeersState, err := a.serializePeersState() + if err != nil { + return err + } + + return dbaccess.StorePeersState(dbaccess.NoTx(), serializedPeersState) +} + +func (a *AddrManager) serializePeersState() ([]byte, error) { + peersState, err := a.PeersStateForSerialization() + if err != nil { + return nil, err + } + + w := &bytes.Buffer{} + encoder := gob.NewEncoder(w) + err = encoder.Encode(&peersState) + if err != nil { + return nil, errors.Wrap(err, "failed to encode peers state") + } + + return w.Bytes(), nil +} + +// PeersStateForSerialization returns the data model that is used to serialize the peers state to any encoding. +func (a *AddrManager) PeersStateForSerialization() (*PeersStateForSerialization, error) { a.mtx.Lock() defer a.mtx.Unlock() - // First we make a serialisable datastructure so we can encode it to - // json. - sam := new(serializedAddrManager) - sam.Version = serialisationVersion - copy(sam.Key[:], a.key[:]) + // First we make a serializable data structure so we can encode it to + // gob. + peersState := new(PeersStateForSerialization) + peersState.Version = serializationVersion + copy(peersState.Key[:], a.key[:]) - sam.Addresses = make([]*serializedKnownAddress, len(a.addrIndex)) + peersState.Addresses = make([]*serializedKnownAddress, len(a.addrIndex)) i := 0 for k, v := range a.addrIndex { ska := new(serializedKnownAddress) @@ -463,119 +496,104 @@ func (a *AddrManager) savePeers() { ska.LastSuccess = v.lastsuccess.Unix() // Tried and refs are implicit in the rest of the structure // and will be worked out from context on unserialisation. - sam.Addresses[i] = ska + peersState.Addresses[i] = ska i++ } - sam.NewBuckets = make(map[string]*serializedNewBucket) + peersState.NewBuckets = make(map[string]*serializedNewBucket) for subnetworkID := range a.addrNew { subnetworkIDStr := subnetworkID.String() - sam.NewBuckets[subnetworkIDStr] = &serializedNewBucket{} + peersState.NewBuckets[subnetworkIDStr] = &serializedNewBucket{} for i := range a.addrNew[subnetworkID] { - sam.NewBuckets[subnetworkIDStr][i] = make([]string, len(a.addrNew[subnetworkID][i])) + peersState.NewBuckets[subnetworkIDStr][i] = make([]string, len(a.addrNew[subnetworkID][i])) j := 0 for k := range a.addrNew[subnetworkID][i] { - sam.NewBuckets[subnetworkIDStr][i][j] = k + peersState.NewBuckets[subnetworkIDStr][i][j] = k j++ } } } for i := range a.addrNewFullNodes { - sam.NewBucketFullNodes[i] = make([]string, len(a.addrNewFullNodes[i])) + peersState.NewBucketFullNodes[i] = make([]string, len(a.addrNewFullNodes[i])) j := 0 for k := range a.addrNewFullNodes[i] { - sam.NewBucketFullNodes[i][j] = k + peersState.NewBucketFullNodes[i][j] = k j++ } } - sam.TriedBuckets = make(map[string]*serializedTriedBucket) + peersState.TriedBuckets = make(map[string]*serializedTriedBucket) for subnetworkID := range a.addrTried { subnetworkIDStr := subnetworkID.String() - sam.TriedBuckets[subnetworkIDStr] = &serializedTriedBucket{} + peersState.TriedBuckets[subnetworkIDStr] = &serializedTriedBucket{} for i := range a.addrTried[subnetworkID] { - sam.TriedBuckets[subnetworkIDStr][i] = make([]string, a.addrTried[subnetworkID][i].Len()) + peersState.TriedBuckets[subnetworkIDStr][i] = make([]string, a.addrTried[subnetworkID][i].Len()) j := 0 for e := a.addrTried[subnetworkID][i].Front(); e != nil; e = e.Next() { ka := e.Value.(*KnownAddress) - sam.TriedBuckets[subnetworkIDStr][i][j] = NetAddressKey(ka.na) + peersState.TriedBuckets[subnetworkIDStr][i][j] = NetAddressKey(ka.na) j++ } } } for i := range a.addrTriedFullNodes { - sam.TriedBucketFullNodes[i] = make([]string, a.addrTriedFullNodes[i].Len()) + peersState.TriedBucketFullNodes[i] = make([]string, a.addrTriedFullNodes[i].Len()) j := 0 for e := a.addrTriedFullNodes[i].Front(); e != nil; e = e.Next() { ka := e.Value.(*KnownAddress) - sam.TriedBucketFullNodes[i][j] = NetAddressKey(ka.na) + peersState.TriedBucketFullNodes[i][j] = NetAddressKey(ka.na) j++ } } - w, err := os.Create(a.peersFile) - if err != nil { - log.Errorf("Error opening file %s: %s", a.peersFile, err) - return - } - enc := json.NewEncoder(w) - defer w.Close() - if err := enc.Encode(&sam); err != nil { - log.Errorf("Failed to encode file %s: %s", a.peersFile, err) - return - } + return peersState, nil } -// loadPeers loads the known address from the saved file. If empty, missing, or -// malformed file, just don't load anything and start fresh -func (a *AddrManager) loadPeers() { +// loadPeers loads the known address from the database. If missing, +// just don't load anything and start fresh. +func (a *AddrManager) loadPeers() error { a.mtx.Lock() defer a.mtx.Unlock() - err := a.deserializePeers(a.peersFile) - if err != nil { - log.Errorf("Failed to parse file %s: %s", a.peersFile, err) - // if it is invalid we nuke the old one unconditionally. - err = os.Remove(a.peersFile) - if err != nil { - log.Warnf("Failed to remove corrupt peers file %s: %s", - a.peersFile, err) - } + serializedPeerState, err := dbaccess.FetchPeersState(dbaccess.NoTx()) + if dbaccess.IsNotFoundError(err) { a.reset() - return - } - log.Infof("Loaded %d addresses from file '%s'", a.totalNumAddresses(), a.peersFile) -} - -func (a *AddrManager) deserializePeers(filePath string) error { - _, err := os.Stat(filePath) - if os.IsNotExist(err) { + log.Info("No peers state was found in the database. Created a new one", a.totalNumAddresses()) return nil } - r, err := os.Open(filePath) if err != nil { - return errors.Errorf("%s error opening file: %s", filePath, err) - } - defer r.Close() - - var sam serializedAddrManager - dec := json.NewDecoder(r) - err = dec.Decode(&sam) - if err != nil { - return errors.Errorf("error reading %s: %s", filePath, err) + return err } - if sam.Version != serialisationVersion { + err = a.deserializePeersState(serializedPeerState) + if err != nil { + return err + } + + log.Infof("Loaded %d addresses from database", a.totalNumAddresses()) + return nil +} + +func (a *AddrManager) deserializePeersState(serializedPeerState []byte) error { + var peersState PeersStateForSerialization + r := bytes.NewBuffer(serializedPeerState) + dec := gob.NewDecoder(r) + err := dec.Decode(&peersState) + if err != nil { + return errors.Wrap(err, "error deserializing peers state") + } + + if peersState.Version != serializationVersion { return errors.Errorf("unknown version %d in serialized "+ - "addrmanager", sam.Version) + "peers state", peersState.Version) } - copy(a.key[:], sam.Key[:]) + copy(a.key[:], peersState.Key[:]) - for _, v := range sam.Addresses { + for _, v := range peersState.Addresses { ka := new(KnownAddress) ka.na, err = a.DeserializeNetAddress(v.Addr) if err != nil { @@ -600,12 +618,12 @@ func (a *AddrManager) deserializePeers(filePath string) error { a.addrIndex[NetAddressKey(ka.na)] = ka } - for subnetworkIDStr := range sam.NewBuckets { + for subnetworkIDStr := range peersState.NewBuckets { subnetworkID, err := subnetworkid.NewFromStr(subnetworkIDStr) if err != nil { return err } - for i, subnetworkNewBucket := range sam.NewBuckets[subnetworkIDStr] { + for i, subnetworkNewBucket := range peersState.NewBuckets[subnetworkIDStr] { for _, val := range subnetworkNewBucket { ka, ok := a.addrIndex[val] if !ok { @@ -622,7 +640,7 @@ func (a *AddrManager) deserializePeers(filePath string) error { } } - for i, newBucket := range sam.NewBucketFullNodes { + for i, newBucket := range peersState.NewBucketFullNodes { for _, val := range newBucket { ka, ok := a.addrIndex[val] if !ok { @@ -638,12 +656,12 @@ func (a *AddrManager) deserializePeers(filePath string) error { } } - for subnetworkIDStr := range sam.TriedBuckets { + for subnetworkIDStr := range peersState.TriedBuckets { subnetworkID, err := subnetworkid.NewFromStr(subnetworkIDStr) if err != nil { return err } - for i, subnetworkTriedBucket := range sam.TriedBuckets[subnetworkIDStr] { + for i, subnetworkTriedBucket := range peersState.TriedBuckets[subnetworkIDStr] { for _, val := range subnetworkTriedBucket { ka, ok := a.addrIndex[val] if !ok { @@ -658,7 +676,7 @@ func (a *AddrManager) deserializePeers(filePath string) error { } } - for i, triedBucket := range sam.TriedBucketFullNodes { + for i, triedBucket := range peersState.TriedBucketFullNodes { for _, val := range triedBucket { ka, ok := a.addrIndex[val] if !ok { @@ -704,20 +722,24 @@ func (a *AddrManager) DeserializeNetAddress(addr string) (*wire.NetAddress, erro // Start begins the core address handler which manages a pool of known // addresses, timeouts, and interval based writes. -func (a *AddrManager) Start() { +func (a *AddrManager) Start() error { // Already started? if atomic.AddInt32(&a.started, 1) != 1 { - return + return nil } log.Trace("Starting address manager") - // Load peers we already know about from file. - a.loadPeers() + // Load peers we already know about from the database. + err := a.loadPeers() + if err != nil { + return err + } // Start the address ticker to save addresses periodically. a.wg.Add(1) spawn(a.addressHandler) + return nil } // Stop gracefully shuts down the address manager by stopping the main handler. @@ -1333,9 +1355,8 @@ func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.Net // New returns a new Kaspa address manager. // Use Start to begin processing asynchronous address updates. -func New(dataDir string, lookupFunc func(string) ([]net.IP, error), subnetworkID *subnetworkid.SubnetworkID) *AddrManager { +func New(lookupFunc func(string) ([]net.IP, error), subnetworkID *subnetworkid.SubnetworkID) *AddrManager { am := AddrManager{ - peersFile: filepath.Join(dataDir, "peers.json"), lookupFunc: lookupFunc, rand: rand.New(rand.NewSource(time.Now().UnixNano())), quit: make(chan struct{}), diff --git a/addrmgr/addrmanager_test.go b/addrmgr/addrmanager_test.go index 1ad880a4d..dc9e4a491 100644 --- a/addrmgr/addrmanager_test.go +++ b/addrmgr/addrmanager_test.go @@ -8,7 +8,9 @@ import ( "fmt" "github.com/kaspanet/kaspad/config" "github.com/kaspanet/kaspad/dagconfig" + "github.com/kaspanet/kaspad/dbaccess" "github.com/kaspanet/kaspad/util/subnetworkid" + "io/ioutil" "net" "reflect" "testing" @@ -101,14 +103,41 @@ func addNaTest(ip string, port uint16, want string) { naTests = append(naTests, test) } -func lookupFunc(host string) ([]net.IP, error) { +func lookupFuncForTest(host string) ([]net.IP, error) { return nil, errors.New("not implemented") } +func newAddrManagerForTest(t *testing.T, testName string, + localSubnetworkID *subnetworkid.SubnetworkID) (addressManager *AddrManager, teardown func()) { + + dbPath, err := ioutil.TempDir("", testName) + if err != nil { + t.Fatalf("Error creating temporary directory: %s", err) + } + + err = dbaccess.Open(dbPath) + if err != nil { + t.Fatalf("error creating db: %s", err) + } + + addressManager = New(lookupFuncForTest, localSubnetworkID) + + return addressManager, func() { + err := dbaccess.Close() + if err != nil { + t.Fatalf("error closing the database: %s", err) + } + } +} + func TestStartStop(t *testing.T) { - n := New("teststartstop", lookupFunc, nil) - n.Start() - err := n.Stop() + amgr, teardown := newAddrManagerForTest(t, "TestStartStop", nil) + defer teardown() + err := amgr.Start() + if err != nil { + t.Fatalf("Address Manager failed to start: %v", err) + } + err = amgr.Stop() if err != nil { t.Fatalf("Address Manager failed to stop: %v", err) } @@ -148,7 +177,8 @@ func TestAddAddressByIP(t *testing.T) { }, } - amgr := New("testaddressbyip", nil, nil) + amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil) + defer teardown() for i, test := range tests { err := amgr.AddAddressByIP(test.addrIP, nil) if test.err != nil && err == nil { @@ -213,7 +243,8 @@ func TestAddLocalAddress(t *testing.T) { true, }, } - amgr := New("testaddlocaladdress", nil, nil) + amgr, teardown := newAddrManagerForTest(t, "TestAddLocalAddress", nil) + defer teardown() for x, test := range tests { result := amgr.AddLocalAddress(&test.address, test.priority) if result == nil && !test.valid { @@ -239,21 +270,22 @@ func TestAttempt(t *testing.T) { }) defer config.SetActiveConfig(originalActiveCfg) - n := New("testattempt", lookupFunc, nil) + amgr, teardown := newAddrManagerForTest(t, "TestAttempt", nil) + defer teardown() // Add a new address and get it - err := n.AddAddressByIP(someIP+":8333", nil) + err := amgr.AddAddressByIP(someIP+":8333", nil) if err != nil { t.Fatalf("Adding address failed: %v", err) } - ka := n.GetAddress() + ka := amgr.GetAddress() if !ka.LastAttempt().IsZero() { t.Errorf("Address should not have attempts, but does") } na := ka.NetAddress() - n.Attempt(na) + amgr.Attempt(na) if ka.LastAttempt().IsZero() { t.Errorf("Address should have an attempt, but does not") @@ -270,19 +302,20 @@ func TestConnected(t *testing.T) { }) defer config.SetActiveConfig(originalActiveCfg) - n := New("testconnected", lookupFunc, nil) + amgr, teardown := newAddrManagerForTest(t, "TestConnected", nil) + defer teardown() // Add a new address and get it - err := n.AddAddressByIP(someIP+":8333", nil) + err := amgr.AddAddressByIP(someIP+":8333", nil) if err != nil { t.Fatalf("Adding address failed: %v", err) } - ka := n.GetAddress() + ka := amgr.GetAddress() na := ka.NetAddress() // make it an hour ago na.Timestamp = time.Unix(time.Now().Add(time.Hour*-1).Unix(), 0) - n.Connected(na) + amgr.Connected(na) if !ka.NetAddress().Timestamp.After(na.Timestamp) { t.Errorf("Address should have a new timestamp, but does not") @@ -299,9 +332,10 @@ func TestNeedMoreAddresses(t *testing.T) { }) defer config.SetActiveConfig(originalActiveCfg) - n := New("testneedmoreaddresses", lookupFunc, nil) + amgr, teardown := newAddrManagerForTest(t, "TestNeedMoreAddresses", nil) + defer teardown() addrsToAdd := 1500 - b := n.NeedMoreAddresses() + b := amgr.NeedMoreAddresses() if !b { t.Errorf("Expected that we need more addresses") } @@ -310,7 +344,7 @@ func TestNeedMoreAddresses(t *testing.T) { var err error for i := 0; i < addrsToAdd; i++ { s := fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60) - addrs[i], err = n.DeserializeNetAddress(s) + addrs[i], err = amgr.DeserializeNetAddress(s) if err != nil { t.Errorf("Failed to turn %s into an address: %v", s, err) } @@ -318,13 +352,13 @@ func TestNeedMoreAddresses(t *testing.T) { srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0) - n.AddAddresses(addrs, srcAddr, nil) - numAddrs := n.TotalNumAddresses() + amgr.AddAddresses(addrs, srcAddr, nil) + numAddrs := amgr.TotalNumAddresses() if numAddrs > addrsToAdd { t.Errorf("Number of addresses is too many %d vs %d", numAddrs, addrsToAdd) } - b = n.NeedMoreAddresses() + b = amgr.NeedMoreAddresses() if b { t.Errorf("Expected that we don't need more addresses") } @@ -340,7 +374,8 @@ func TestGood(t *testing.T) { }) defer config.SetActiveConfig(originalActiveCfg) - n := New("testgood", lookupFunc, nil) + amgr, teardown := newAddrManagerForTest(t, "TestGood", nil) + defer teardown() addrsToAdd := 64 * 64 addrs := make([]*wire.NetAddress, addrsToAdd) subnetworkCount := 32 @@ -349,7 +384,7 @@ func TestGood(t *testing.T) { var err error for i := 0; i < addrsToAdd; i++ { s := fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60) - addrs[i], err = n.DeserializeNetAddress(s) + addrs[i], err = amgr.DeserializeNetAddress(s) if err != nil { t.Errorf("Failed to turn %s into an address: %v", s, err) } @@ -361,24 +396,24 @@ func TestGood(t *testing.T) { srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0) - n.AddAddresses(addrs, srcAddr, nil) + amgr.AddAddresses(addrs, srcAddr, nil) for i, addr := range addrs { - n.Good(addr, subnetworkIDs[i%subnetworkCount]) + amgr.Good(addr, subnetworkIDs[i%subnetworkCount]) } - numAddrs := n.TotalNumAddresses() + numAddrs := amgr.TotalNumAddresses() if numAddrs >= addrsToAdd { t.Errorf("Number of addresses is too many: %d vs %d", numAddrs, addrsToAdd) } - numCache := len(n.AddressCache(true, nil)) + numCache := len(amgr.AddressCache(true, nil)) if numCache == 0 || numCache >= numAddrs/4 { t.Errorf("Number of addresses in cache: got %d, want positive and less than %d", numCache, numAddrs/4) } for i := 0; i < subnetworkCount; i++ { - numCache = len(n.AddressCache(false, subnetworkIDs[i])) + numCache = len(amgr.AddressCache(false, subnetworkIDs[i])) if numCache == 0 || numCache >= numAddrs/subnetworkCount { t.Errorf("Number of addresses in subnetwork cache: got %d, want positive and less than %d", numCache, numAddrs/4/subnetworkCount) @@ -396,17 +431,18 @@ func TestGoodChangeSubnetworkID(t *testing.T) { }) defer config.SetActiveConfig(originalActiveCfg) - n := New("test_good_change_subnetwork_id", lookupFunc, nil) + amgr, teardown := newAddrManagerForTest(t, "TestGoodChangeSubnetworkID", nil) + defer teardown() addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0) addrKey := NetAddressKey(addr) srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0) oldSubnetwork := subnetworkid.SubnetworkIDNative - n.AddAddress(addr, srcAddr, oldSubnetwork) - n.Good(addr, oldSubnetwork) + amgr.AddAddress(addr, srcAddr, oldSubnetwork) + amgr.Good(addr, oldSubnetwork) // make sure address was saved to addrIndex under oldSubnetwork - ka := n.find(addr) + ka := amgr.find(addr) if ka == nil { t.Fatalf("Address was not found after first time .Good called") } @@ -415,7 +451,7 @@ func TestGoodChangeSubnetworkID(t *testing.T) { } // make sure address was added to correct bucket under oldSubnetwork - bucket := n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)] + bucket := amgr.addrTried[*oldSubnetwork][amgr.getTriedBucket(addr)] wasFound := false for e := bucket.Front(); e != nil; e = e.Next() { if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { @@ -428,10 +464,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) { // now call .Good again with a different subnetwork newSubnetwork := subnetworkid.SubnetworkIDRegistry - n.Good(addr, newSubnetwork) + amgr.Good(addr, newSubnetwork) // make sure address was updated in addrIndex under newSubnetwork - ka = n.find(addr) + ka = amgr.find(addr) if ka == nil { t.Fatalf("Address was not found after second time .Good called") } @@ -440,7 +476,7 @@ func TestGoodChangeSubnetworkID(t *testing.T) { } // make sure address was removed from bucket under oldSubnetwork - bucket = n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)] + bucket = amgr.addrTried[*oldSubnetwork][amgr.getTriedBucket(addr)] wasFound = false for e := bucket.Front(); e != nil; e = e.Next() { if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { @@ -452,7 +488,7 @@ func TestGoodChangeSubnetworkID(t *testing.T) { } // make sure address was added to correct bucket under newSubnetwork - bucket = n.addrTried[*newSubnetwork][n.getTriedBucket(addr)] + bucket = amgr.addrTried[*newSubnetwork][amgr.getTriedBucket(addr)] wasFound = false for e := bucket.Front(); e != nil; e = e.Next() { if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { @@ -475,34 +511,35 @@ func TestGetAddress(t *testing.T) { defer config.SetActiveConfig(originalActiveCfg) localSubnetworkID := &subnetworkid.SubnetworkID{0xff} - n := New("testgetaddress", lookupFunc, localSubnetworkID) + amgr, teardown := newAddrManagerForTest(t, "TestGetAddress", localSubnetworkID) + defer teardown() // Get an address from an empty set (should error) - if rv := n.GetAddress(); rv != nil { + if rv := amgr.GetAddress(); rv != nil { t.Errorf("GetAddress failed: got: %v want: %v\n", rv, nil) } // Add a new address and get it - err := n.AddAddressByIP(someIP+":8332", localSubnetworkID) + err := amgr.AddAddressByIP(someIP+":8332", localSubnetworkID) if err != nil { t.Fatalf("Adding address failed: %v", err) } - ka := n.GetAddress() + ka := amgr.GetAddress() if ka == nil { t.Fatalf("Did not get an address where there is one in the pool") } - n.Attempt(ka.NetAddress()) + amgr.Attempt(ka.NetAddress()) // Checks that we don't get it if we find that it has other subnetwork ID than expected. actualSubnetworkID := &subnetworkid.SubnetworkID{0xfe} - n.Good(ka.NetAddress(), actualSubnetworkID) - ka = n.GetAddress() + amgr.Good(ka.NetAddress(), actualSubnetworkID) + ka = amgr.GetAddress() if ka != nil { t.Errorf("Didn't expect to get an address because there shouldn't be any address from subnetwork ID %s or nil", localSubnetworkID) } // Checks that the total number of addresses incremented although the new address is not full node or a partial node of the same subnetwork as the local node. - numAddrs := n.TotalNumAddresses() + numAddrs := amgr.TotalNumAddresses() if numAddrs != 1 { t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1) } @@ -510,11 +547,11 @@ func TestGetAddress(t *testing.T) { // Now we repeat the same process, but now the address has the expected subnetwork ID. // Add a new address and get it - err = n.AddAddressByIP(someIP+":8333", localSubnetworkID) + err = amgr.AddAddressByIP(someIP+":8333", localSubnetworkID) if err != nil { t.Fatalf("Adding address failed: %v", err) } - ka = n.GetAddress() + ka = amgr.GetAddress() if ka == nil { t.Fatalf("Did not get an address where there is one in the pool") } @@ -524,11 +561,11 @@ func TestGetAddress(t *testing.T) { if !ka.SubnetworkID().IsEqual(localSubnetworkID) { t.Errorf("Wrong Subnetwork ID: got %v, want %v", *ka.SubnetworkID(), localSubnetworkID) } - n.Attempt(ka.NetAddress()) + amgr.Attempt(ka.NetAddress()) // Mark this as a good address and get it - n.Good(ka.NetAddress(), localSubnetworkID) - ka = n.GetAddress() + amgr.Good(ka.NetAddress(), localSubnetworkID) + ka = amgr.GetAddress() if ka == nil { t.Fatalf("Did not get an address where there is one in the pool") } @@ -539,7 +576,7 @@ func TestGetAddress(t *testing.T) { t.Errorf("Wrong Subnetwork ID: got %v, want %v", ka.SubnetworkID(), localSubnetworkID) } - numAddrs = n.TotalNumAddresses() + numAddrs = amgr.TotalNumAddresses() if numAddrs != 2 { t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1) } @@ -604,7 +641,8 @@ func TestGetBestLocalAddress(t *testing.T) { */ } - amgr := New("testgetbestlocaladdress", nil, nil) + amgr, teardown := newAddrManagerForTest(t, "TestGetBestLocalAddress", nil) + defer teardown() // Test against default when there's no address for x, test := range tests { diff --git a/connmgr/connmanager_test.go b/connmgr/connmanager_test.go index 7a981437a..f7d28ccff 100644 --- a/connmgr/connmanager_test.go +++ b/connmgr/connmanager_test.go @@ -9,11 +9,11 @@ import ( "github.com/kaspanet/kaspad/addrmgr" "github.com/kaspanet/kaspad/config" "github.com/kaspanet/kaspad/dagconfig" + "github.com/kaspanet/kaspad/dbaccess" "github.com/pkg/errors" "io" "io/ioutil" "net" - "os" "sync/atomic" "testing" "time" @@ -192,19 +192,26 @@ func addressManagerForTest(t *testing.T, testName string, numAddresses uint8) (* } func createEmptyAddressManagerForTest(t *testing.T, testName string) (*addrmgr.AddrManager, func()) { - path, err := ioutil.TempDir("", fmt.Sprintf("%s-addressmanager", testName)) + path, err := ioutil.TempDir("", fmt.Sprintf("%s-database", testName)) if err != nil { t.Fatalf("createEmptyAddressManagerForTest: TempDir unexpectedly "+ "failed: %s", err) } - return addrmgr.New(path, nil, nil), func() { - // Wait for the connection manager to finish + err = dbaccess.Open(path) + if err != nil { + t.Fatalf("error creating db: %s", err) + } + + return addrmgr.New(nil, nil), func() { + // Wait for the connection manager to finish, so it'll + // have access to the address manager as long as it's + // alive. time.Sleep(10 * time.Millisecond) - err := os.RemoveAll(path) + err := dbaccess.Close() if err != nil { - t.Fatalf("couldn't remove path %s", path) + t.Fatalf("error closing the database: %s", err) } } } diff --git a/dbaccess/peers.go b/dbaccess/peers.go new file mode 100644 index 000000000..f7104c1bf --- /dev/null +++ b/dbaccess/peers.go @@ -0,0 +1,26 @@ +package dbaccess + +import "github.com/kaspanet/kaspad/database" + +var ( + peersKey = database.MakeBucket().Key([]byte("peers")) +) + +// StorePeersState stores the peers state in the database. +func StorePeersState(context Context, peersState []byte) error { + accessor, err := context.accessor() + if err != nil { + return err + } + return accessor.Put(peersKey, peersState) +} + +// FetchPeersState retrieves the peers state from the database. +// Returns ErrNotFound if the state is missing from the database. +func FetchPeersState(context Context) ([]byte, error) { + accessor, err := context.accessor() + if err != nil { + return nil, err + } + return accessor.Get(peersKey) +} diff --git a/rpcclient/net.go b/rpcclient/net.go index 3dfd0ca4c..dbd126358 100644 --- a/rpcclient/net.go +++ b/rpcclient/net.go @@ -186,26 +186,26 @@ func (c *Client) PingAsync() FuturePingResult { // Ping queues a ping to be sent to each connected peer. // -// Use the GetPeerInfo function and examine the PingTime and PingWait fields to +// Use the GetConnectedPeerInfo function and examine the PingTime and PingWait fields to // access the ping times. func (c *Client) Ping() error { return c.PingAsync().Receive() } -// FutureGetPeerInfoResult is a future promise to deliver the result of a -// GetPeerInfoAsync RPC invocation (or an applicable error). -type FutureGetPeerInfoResult chan *response +// FutureGetConnectedPeerInfo is a future promise to deliver the result of a +// GetConnectedPeerInfoAsync RPC invocation (or an applicable error). +type FutureGetConnectedPeerInfo chan *response // Receive waits for the response promised by the future and returns data about // each connected network peer. -func (r FutureGetPeerInfoResult) Receive() ([]rpcmodel.GetPeerInfoResult, error) { +func (r FutureGetConnectedPeerInfo) Receive() ([]rpcmodel.GetConnectedPeerInfoResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } - // Unmarshal result as an array of getpeerinfo result objects. - var peerInfo []rpcmodel.GetPeerInfoResult + // Unmarshal result as an array of getConnectedPeerInfo result objects. + var peerInfo []rpcmodel.GetConnectedPeerInfoResult err = json.Unmarshal(res, &peerInfo) if err != nil { return nil, err @@ -214,19 +214,19 @@ func (r FutureGetPeerInfoResult) Receive() ([]rpcmodel.GetPeerInfoResult, error) return peerInfo, nil } -// GetPeerInfoAsync returns an instance of a type that can be used to get the +// GetConnectedPeerInfoAsync returns an instance of a type that can be used to get the // result of the RPC at some future time by invoking the Receive function on the // returned instance. // -// See GetPeerInfo for the blocking version and more details. -func (c *Client) GetPeerInfoAsync() FutureGetPeerInfoResult { - cmd := rpcmodel.NewGetPeerInfoCmd() +// See GetConnectedPeerInfo for the blocking version and more details. +func (c *Client) GetConnectedPeerInfoAsync() FutureGetConnectedPeerInfo { + cmd := rpcmodel.NewGetConnectedPeerInfoCmd() return c.sendCmd(cmd) } -// GetPeerInfo returns data about each connected network peer. -func (c *Client) GetPeerInfo() ([]rpcmodel.GetPeerInfoResult, error) { - return c.GetPeerInfoAsync().Receive() +// GetConnectedPeerInfo returns data about each connected network peer. +func (c *Client) GetConnectedPeerInfo() ([]rpcmodel.GetConnectedPeerInfoResult, error) { + return c.GetConnectedPeerInfoAsync().Receive() } // FutureGetNetTotalsResult is a future promise to deliver the result of a diff --git a/rpcmodel/rpc_commands.go b/rpcmodel/rpc_commands.go index f9003fbcb..67f90146e 100644 --- a/rpcmodel/rpc_commands.go +++ b/rpcmodel/rpc_commands.go @@ -382,13 +382,13 @@ func NewGetNetTotalsCmd() *GetNetTotalsCmd { return &GetNetTotalsCmd{} } -// GetPeerInfoCmd defines the getPeerInfo JSON-RPC command. -type GetPeerInfoCmd struct{} +// GetConnectedPeerInfoCmd defines the getConnectedPeerInfo JSON-RPC command. +type GetConnectedPeerInfoCmd struct{} -// NewGetPeerInfoCmd returns a new instance which can be used to issue a getpeer +// NewGetConnectedPeerInfoCmd returns a new instance which can be used to issue a getpeer // JSON-RPC command. -func NewGetPeerInfoCmd() *GetPeerInfoCmd { - return &GetPeerInfoCmd{} +func NewGetConnectedPeerInfoCmd() *GetConnectedPeerInfoCmd { + return &GetConnectedPeerInfoCmd{} } // GetRawMempoolCmd defines the getmempool JSON-RPC command. @@ -655,6 +655,14 @@ type VersionCmd struct{} // version command. func NewVersionCmd() *VersionCmd { return new(VersionCmd) } +// GetPeerAddressesCmd defines the getPeerAddresses JSON-RPC command. +type GetPeerAddressesCmd struct { +} + +// NewGetPeerAddressesCmd returns a new instance which can be used to issue a JSON-RPC +// getPeerAddresses command. +func NewGetPeerAddressesCmd() *GetPeerAddressesCmd { return new(GetPeerAddressesCmd) } + func init() { // No special flags for commands in this file. flags := UsageFlag(0) @@ -681,7 +689,8 @@ func init() { MustRegisterCommand("getMempoolInfo", (*GetMempoolInfoCmd)(nil), flags) MustRegisterCommand("getNetworkInfo", (*GetNetworkInfoCmd)(nil), flags) MustRegisterCommand("getNetTotals", (*GetNetTotalsCmd)(nil), flags) - MustRegisterCommand("getPeerInfo", (*GetPeerInfoCmd)(nil), flags) + MustRegisterCommand("getConnectedPeerInfo", (*GetConnectedPeerInfoCmd)(nil), flags) + MustRegisterCommand("getPeerAddresses", (*GetPeerAddressesCmd)(nil), flags) MustRegisterCommand("getRawMempool", (*GetRawMempoolCmd)(nil), flags) MustRegisterCommand("getSubnetwork", (*GetSubnetworkCmd)(nil), flags) MustRegisterCommand("getTxOut", (*GetTxOutCmd)(nil), flags) diff --git a/rpcmodel/rpc_commands_test.go b/rpcmodel/rpc_commands_test.go index de6694923..bb0e3f0ef 100644 --- a/rpcmodel/rpc_commands_test.go +++ b/rpcmodel/rpc_commands_test.go @@ -444,15 +444,15 @@ func TestRPCServerCommands(t *testing.T) { unmarshalled: &rpcmodel.GetNetTotalsCmd{}, }, { - name: "getPeerInfo", + name: "getConnectedPeerInfo", newCmd: func() (interface{}, error) { - return rpcmodel.NewCommand("getPeerInfo") + return rpcmodel.NewCommand("getConnectedPeerInfo") }, staticCmd: func() interface{} { - return rpcmodel.NewGetPeerInfoCmd() + return rpcmodel.NewGetConnectedPeerInfoCmd() }, - marshalled: `{"jsonrpc":"1.0","method":"getPeerInfo","params":[],"id":1}`, - unmarshalled: &rpcmodel.GetPeerInfoCmd{}, + marshalled: `{"jsonrpc":"1.0","method":"getConnectedPeerInfo","params":[],"id":1}`, + unmarshalled: &rpcmodel.GetConnectedPeerInfoCmd{}, }, { name: "getRawMempool", diff --git a/rpcmodel/rpc_results.go b/rpcmodel/rpc_results.go index 5b3c98d79..6a5d38887 100644 --- a/rpcmodel/rpc_results.go +++ b/rpcmodel/rpc_results.go @@ -4,7 +4,10 @@ package rpcmodel -import "encoding/json" +import ( + "encoding/json" + "github.com/kaspanet/kaspad/addrmgr" +) // GetBlockHeaderVerboseResult models the data from the getblockheader command when // the verbose flag is set. When the verbose flag is not set, getblockheader @@ -213,8 +216,8 @@ type GetNetworkInfoResult struct { Warnings string `json:"warnings"` } -// GetPeerInfoResult models the data returned from the getpeerinfo command. -type GetPeerInfoResult struct { +// GetConnectedPeerInfoResult models the data returned from the getConnectedPeerInfo command. +type GetConnectedPeerInfoResult struct { ID int32 `json:"id"` Addr string `json:"addr"` Services string `json:"services"` @@ -236,6 +239,34 @@ type GetPeerInfoResult struct { SyncNode bool `json:"syncNode"` } +// GetPeerAddressesResult models the data returned from the getPeerAddresses command. +type GetPeerAddressesResult struct { + Version int + Key [32]byte + Addresses []*GetPeerAddressesKnownAddressResult + NewBuckets map[string]*GetPeerAddressesNewBucketResult // string is Subnetwork ID + NewBucketFullNodes GetPeerAddressesNewBucketResult + TriedBuckets map[string]*GetPeerAddressesTriedBucketResult // string is Subnetwork ID + TriedBucketFullNodes GetPeerAddressesTriedBucketResult +} + +// GetPeerAddressesKnownAddressResult models a GetPeerAddressesResult known address. +type GetPeerAddressesKnownAddressResult struct { + Addr string + Src string + SubnetworkID string + Attempts int + TimeStamp int64 + LastAttempt int64 + LastSuccess int64 +} + +// GetPeerAddressesNewBucketResult models a GetPeerAddressesResult new bucket. +type GetPeerAddressesNewBucketResult [addrmgr.NewBucketCount][]string + +// GetPeerAddressesTriedBucketResult models a GetPeerAddressesResult tried bucket. +type GetPeerAddressesTriedBucketResult [addrmgr.TriedBucketCount][]string + // GetRawMempoolVerboseResult models the data returned from the getrawmempool // command when the verbose flag is set. When the verbose flag is not set, // getrawmempool returns an array of transaction hashes. diff --git a/server/p2p/on_addr.go b/server/p2p/on_addr.go index a75e38de9..26bcb38e5 100644 --- a/server/p2p/on_addr.go +++ b/server/p2p/on_addr.go @@ -59,5 +59,5 @@ func (sp *Peer) OnAddr(_ *peer.Peer, msg *wire.MsgAddr) { // Add addresses to server address manager. The address manager handles // the details of things such as preventing duplicate addresses, max // addresses, and last seen updates. - sp.server.addrManager.AddAddresses(msg.AddrList, sp.NA(), msg.SubnetworkID) + sp.server.AddrManager.AddAddresses(msg.AddrList, sp.NA(), msg.SubnetworkID) } diff --git a/server/p2p/on_get_addr.go b/server/p2p/on_get_addr.go index 2a3d1b0bd..8dc3e9266 100644 --- a/server/p2p/on_get_addr.go +++ b/server/p2p/on_get_addr.go @@ -34,7 +34,7 @@ func (sp *Peer) OnGetAddr(_ *peer.Peer, msg *wire.MsgGetAddr) { sp.sentAddrs = true // Get the current known addresses from the address manager. - addrCache := sp.server.addrManager.AddressCache(msg.IncludeAllSubnetworks, msg.SubnetworkID) + addrCache := sp.server.AddrManager.AddressCache(msg.IncludeAllSubnetworks, msg.SubnetworkID) // Push the addresses. sp.pushAddrMsg(addrCache, sp.SubnetworkID()) diff --git a/server/p2p/on_version.go b/server/p2p/on_version.go index ff0a18ec1..5621361b3 100644 --- a/server/p2p/on_version.go +++ b/server/p2p/on_version.go @@ -21,7 +21,7 @@ func (sp *Peer) OnVersion(_ *peer.Peer, msg *wire.MsgVersion) { // to specified peers and actively avoids advertising and connecting to // discovered peers. if !config.ActiveConfig().Simnet { - addrManager := sp.server.addrManager + addrManager := sp.server.AddrManager // Outbound connections. if !sp.Inbound() { diff --git a/server/p2p/p2p.go b/server/p2p/p2p.go index 426f3b10c..a7720ba4d 100644 --- a/server/p2p/p2p.go +++ b/server/p2p/p2p.go @@ -214,7 +214,7 @@ type Server struct { shutdownSched int32 DAGParams *dagconfig.Params - addrManager *addrmgr.AddrManager + AddrManager *addrmgr.AddrManager connManager *connmgr.ConnManager SigCache *txscript.SigCache SyncManager *netsync.SyncManager @@ -669,7 +669,7 @@ func (s *Server) handleDonePeerMsg(state *peerState, sp *Peer) { // Update the address' last seen time if the peer has acknowledged // our version and has sent us its version as well. if sp.VerAckReceived() && sp.VersionKnown() && sp.NA() != nil { - s.addrManager.Connected(sp.NA()) + s.AddrManager.Connected(sp.NA()) } // If we get here it means that either we didn't know about the peer @@ -940,7 +940,7 @@ func newPeerConfig(sp *Peer) *peer.Config { SelectedTipHash: sp.selectedTipHash, IsInDAG: sp.blockExists, AddBanScore: sp.addBanScore, - HostToNetAddress: sp.server.addrManager.HostToNetAddress, + HostToNetAddress: sp.server.AddrManager.HostToNetAddress, Proxy: config.ActiveConfig().Proxy, UserAgentName: userAgentName, UserAgentVersion: userAgentVersion, @@ -981,7 +981,7 @@ func (s *Server) outboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) s.peerConnected(sp, conn) - s.addrManager.Attempt(sp.NA()) + s.AddrManager.Attempt(sp.NA()) } func (s *Server) peerConnected(sp *Peer, conn net.Conn) { @@ -1025,7 +1025,7 @@ func (s *Server) outboundPeerConnectionFailed(connReq *connmgr.ConnReq) { // take nil for it. netAddress := wire.NewNetAddressIPPort(net.ParseIP(host), uint16(port), defaultServices) - s.addrManager.Attempt(netAddress) + s.AddrManager.Attempt(netAddress) } // peerDoneHandler handles peer disconnects by notifiying the server that it's @@ -1058,7 +1058,10 @@ func (s *Server) peerHandler() { // to this handler and rather than adding more channels to sychronize // things, it's easier and slightly faster to simply start and stop them // in this handler. - s.addrManager.Start() + err := s.AddrManager.Start() + if err != nil { + panic(errors.Wrap(err, "address manager failed to start")) + } s.SyncManager.Start() s.quitWaitGroup.Add(1) @@ -1079,7 +1082,7 @@ func (s *Server) peerHandler() { // Kaspad uses a lookup of the dns seeder here. Since seeder returns // IPs of nodes and not its own IP, we can not know real IP of // source. So we'll take first returned address as source. - s.addrManager.AddAddresses(addrs, addrs[0], subnetworkID) + s.AddrManager.AddAddresses(addrs, addrs[0], subnetworkID) }) } @@ -1138,7 +1141,7 @@ out: s.connManager.Stop() s.SyncManager.Stop() - s.addrManager.Stop() + s.AddrManager.Stop() // Drain channels before exiting so nothing is left waiting around // to send. @@ -1431,7 +1434,7 @@ out: } na := wire.NewNetAddressIPPort(externalip, uint16(listenPort), s.services) - err = s.addrManager.AddLocalAddress(na, addrmgr.UpnpPrio) + err = s.AddrManager.AddLocalAddress(na, addrmgr.UpnpPrio) if err != nil { // XXX DeletePortMapping? } @@ -1465,13 +1468,13 @@ func NewServer(listenAddrs []string, dagParams *dagconfig.Params, interrupt <-ch services &^= wire.SFNodeBloom } - amgr := addrmgr.New(config.ActiveConfig().DataDir, serverutils.KaspadLookup, config.ActiveConfig().SubnetworkID) + addressManager := addrmgr.New(serverutils.KaspadLookup, config.ActiveConfig().SubnetworkID) var listeners []net.Listener var nat serverutils.NAT if !config.ActiveConfig().DisableListen { var err error - listeners, nat, err = initListeners(amgr, listenAddrs, services) + listeners, nat, err = initListeners(addressManager, listenAddrs, services) if err != nil { return nil, err } @@ -1484,7 +1487,7 @@ func NewServer(listenAddrs []string, dagParams *dagconfig.Params, interrupt <-ch s := Server{ DAGParams: dagParams, - addrManager: amgr, + AddrManager: addressManager, newPeers: make(chan *Peer, maxPeers), donePeers: make(chan *Peer, maxPeers), banPeers: make(chan *Peer, maxPeers), @@ -1567,7 +1570,7 @@ func NewServer(listenAddrs []string, dagParams *dagconfig.Params, interrupt <-ch Dial: serverutils.KaspadDial, OnConnection: s.outboundPeerConnected, OnConnectionFailed: s.outboundPeerConnectionFailed, - AddrManager: s.addrManager, + AddrManager: s.AddrManager, }) if err != nil { return nil, err diff --git a/server/rpc/handle_get_peer_info.go b/server/rpc/handle_get_connected_peer_info.go similarity index 79% rename from server/rpc/handle_get_peer_info.go rename to server/rpc/handle_get_connected_peer_info.go index f0e738e45..64203994c 100644 --- a/server/rpc/handle_get_peer_info.go +++ b/server/rpc/handle_get_connected_peer_info.go @@ -6,14 +6,14 @@ import ( "time" ) -// handleGetPeerInfo implements the getPeerInfo command. -func handleGetPeerInfo(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { +// handleGetConnectedPeerInfo implements the getConnectedPeerInfo command. +func handleGetConnectedPeerInfo(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { peers := s.cfg.ConnMgr.ConnectedPeers() syncPeerID := s.cfg.SyncMgr.SyncPeerID() - infos := make([]*rpcmodel.GetPeerInfoResult, 0, len(peers)) + infos := make([]*rpcmodel.GetConnectedPeerInfoResult, 0, len(peers)) for _, p := range peers { statsSnap := p.ToPeer().StatsSnapshot() - info := &rpcmodel.GetPeerInfoResult{ + info := &rpcmodel.GetConnectedPeerInfoResult{ ID: statsSnap.ID, Addr: statsSnap.Addr, Services: fmt.Sprintf("%08d", uint64(statsSnap.Services)), diff --git a/server/rpc/handle_get_peer_addresses.go b/server/rpc/handle_get_peer_addresses.go new file mode 100644 index 000000000..0623afea6 --- /dev/null +++ b/server/rpc/handle_get_peer_addresses.go @@ -0,0 +1,57 @@ +package rpc + +import "github.com/kaspanet/kaspad/rpcmodel" + +// handleGetPeerAddresses handles getPeerAddresses commands. +func handleGetPeerAddresses(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + peersState, err := s.cfg.addressManager.PeersStateForSerialization() + if err != nil { + return nil, err + } + + rpcPeersState := rpcmodel.GetPeerAddressesResult{ + Version: peersState.Version, + Key: peersState.Key, + Addresses: make([]*rpcmodel.GetPeerAddressesKnownAddressResult, len(peersState.Addresses)), + NewBuckets: make(map[string]*rpcmodel.GetPeerAddressesNewBucketResult), + NewBucketFullNodes: rpcmodel.GetPeerAddressesNewBucketResult{}, + TriedBuckets: make(map[string]*rpcmodel.GetPeerAddressesTriedBucketResult), + TriedBucketFullNodes: rpcmodel.GetPeerAddressesTriedBucketResult{}, + } + + for i, addr := range peersState.Addresses { + rpcPeersState.Addresses[i] = &rpcmodel.GetPeerAddressesKnownAddressResult{ + Addr: addr.Addr, + Src: addr.Src, + SubnetworkID: addr.SubnetworkID, + Attempts: addr.Attempts, + TimeStamp: addr.TimeStamp, + LastAttempt: addr.LastAttempt, + LastSuccess: addr.LastSuccess, + } + } + + for subnetworkID, bucket := range peersState.NewBuckets { + rpcPeersState.NewBuckets[subnetworkID] = &rpcmodel.GetPeerAddressesNewBucketResult{} + for i, addr := range bucket { + rpcPeersState.NewBuckets[subnetworkID][i] = addr + } + } + + for i, addr := range peersState.NewBucketFullNodes { + rpcPeersState.NewBucketFullNodes[i] = addr + } + + for subnetworkID, bucket := range peersState.TriedBuckets { + rpcPeersState.TriedBuckets[subnetworkID] = &rpcmodel.GetPeerAddressesTriedBucketResult{} + for i, addr := range bucket { + rpcPeersState.TriedBuckets[subnetworkID][i] = addr + } + } + + for i, addr := range peersState.TriedBucketFullNodes { + rpcPeersState.TriedBucketFullNodes[i] = addr + } + + return rpcPeersState, nil +} diff --git a/server/rpc/rpcserver.go b/server/rpc/rpcserver.go index fe8920881..7e6193a81 100644 --- a/server/rpc/rpcserver.go +++ b/server/rpc/rpcserver.go @@ -12,6 +12,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "github.com/kaspanet/kaspad/addrmgr" "io" "io/ioutil" "math/rand" @@ -83,7 +84,8 @@ var rpcHandlersBeforeInit = map[string]commandHandler{ "getMempoolInfo": handleGetMempoolInfo, "getMempoolEntry": handleGetMempoolEntry, "getNetTotals": handleGetNetTotals, - "getPeerInfo": handleGetPeerInfo, + "getConnectedPeerInfo": handleGetConnectedPeerInfo, + "getPeerAddresses": handleGetPeerAddresses, "getRawMempool": handleGetRawMempool, "getSubnetwork": handleGetSubnetwork, "getTxOut": handleGetTxOut, @@ -783,6 +785,9 @@ type rpcserverConfig struct { // These fields define any optional indexes the RPC server can make use // of to provide additional data when queried. AcceptanceIndex *indexers.AcceptanceIndex + + // addressManager defines the address manager for the RPC server to use. + addressManager *addrmgr.AddrManager } // setupRPCListeners returns a slice of listeners that are configured for use @@ -855,6 +860,7 @@ func NewRPCServer( StartupTime: startupTime, ConnMgr: &rpcConnManager{p2pServer}, SyncMgr: &rpcSyncMgr{p2pServer, p2pServer.SyncManager}, + addressManager: p2pServer.AddrManager, TimeSource: p2pServer.TimeSource, DAGParams: p2pServer.DAGParams, TxMemPool: p2pServer.TxMemPool, diff --git a/server/rpc/rpcserverhelp.go b/server/rpc/rpcserverhelp.go index eb31d888d..dc4544b68 100644 --- a/server/rpc/rpcserverhelp.go +++ b/server/rpc/rpcserverhelp.go @@ -416,29 +416,55 @@ var helpDescsEnUS = map[string]string{ "getNetTotalsResult-totalBytesSent": "Total bytes sent", "getNetTotalsResult-timeMillis": "Number of milliseconds since 1 Jan 1970 GMT", - // GetPeerInfoResult help. - "getPeerInfoResult-id": "A unique node ID", - "getPeerInfoResult-addr": "The ip address and port of the peer", - "getPeerInfoResult-services": "Services bitmask which represents the services supported by the peer", - "getPeerInfoResult-relayTxes": "Peer has requested transactions be relayed to it", - "getPeerInfoResult-lastSend": "Time the last message was received in seconds since 1 Jan 1970 GMT", - "getPeerInfoResult-lastRecv": "Time the last message was sent in seconds since 1 Jan 1970 GMT", - "getPeerInfoResult-bytesSent": "Total bytes sent", - "getPeerInfoResult-bytesRecv": "Total bytes received", - "getPeerInfoResult-connTime": "Time the connection was made in seconds since 1 Jan 1970 GMT", - "getPeerInfoResult-timeOffset": "The time offset of the peer", - "getPeerInfoResult-pingTime": "Number of microseconds the last ping took", - "getPeerInfoResult-pingWait": "Number of microseconds a queued ping has been waiting for a response", - "getPeerInfoResult-version": "The protocol version of the peer", - "getPeerInfoResult-subVer": "The user agent of the peer", - "getPeerInfoResult-inbound": "Whether or not the peer is an inbound connection", - "getPeerInfoResult-selectedTip": "The selected tip of the peer", - "getPeerInfoResult-banScore": "The ban score", - "getPeerInfoResult-feeFilter": "The requested minimum fee a transaction must have to be announced to the peer", - "getPeerInfoResult-syncNode": "Whether or not the peer is the sync peer", + // GetConnectedPeerInfoResult help. + "getConnectedPeerInfoResult-id": "A unique node ID", + "getConnectedPeerInfoResult-addr": "The ip address and port of the peer", + "getConnectedPeerInfoResult-services": "Services bitmask which represents the services supported by the peer", + "getConnectedPeerInfoResult-relayTxes": "Peer has requested transactions be relayed to it", + "getConnectedPeerInfoResult-lastSend": "Time the last message was received in seconds since 1 Jan 1970 GMT", + "getConnectedPeerInfoResult-lastRecv": "Time the last message was sent in seconds since 1 Jan 1970 GMT", + "getConnectedPeerInfoResult-bytesSent": "Total bytes sent", + "getConnectedPeerInfoResult-bytesRecv": "Total bytes received", + "getConnectedPeerInfoResult-connTime": "Time the connection was made in seconds since 1 Jan 1970 GMT", + "getConnectedPeerInfoResult-timeOffset": "The time offset of the peer", + "getConnectedPeerInfoResult-pingTime": "Number of microseconds the last ping took", + "getConnectedPeerInfoResult-pingWait": "Number of microseconds a queued ping has been waiting for a response", + "getConnectedPeerInfoResult-version": "The protocol version of the peer", + "getConnectedPeerInfoResult-subVer": "The user agent of the peer", + "getConnectedPeerInfoResult-inbound": "Whether or not the peer is an inbound connection", + "getConnectedPeerInfoResult-selectedTip": "The selected tip of the peer", + "getConnectedPeerInfoResult-banScore": "The ban score", + "getConnectedPeerInfoResult-feeFilter": "The requested minimum fee a transaction must have to be announced to the peer", + "getConnectedPeerInfoResult-syncNode": "Whether or not the peer is the sync peer", - // GetPeerInfoCmd help. - "getPeerInfo--synopsis": "Returns data about each connected network peer as an array of json objects.", + // GetConnectedPeerInfoCmd help. + "getConnectedPeerInfo--synopsis": "Returns data about each connected network peer as an array of json objects.", + + // GetPeerAddressesResult help. + "getPeerAddressesResult-version": "Peers state serialization version", + "getPeerAddressesResult-key": "Address manager's key for randomness purposes.", + "getPeerAddressesResult-addresses": "The node's known addresses", + "getPeerAddressesResult-newBuckets": "Peers state subnetwork new buckets", + "getPeerAddressesResult-newBuckets--desc": "New buckets keyed by subnetwork ID", + "getPeerAddressesResult-newBuckets--key": "subnetworkId", + "getPeerAddressesResult-newBuckets--value": "New bucket", + "getPeerAddressesResult-newBucketFullNodes": "Peers state full nodes new bucket", + "getPeerAddressesResult-triedBuckets": "Peers state subnetwork tried buckets", + "getPeerAddressesResult-triedBuckets--desc": "Tried buckets keyed by subnetwork ID", + "getPeerAddressesResult-triedBuckets--key": "subnetworkId", + "getPeerAddressesResult-triedBuckets--value": "Tried bucket", + "getPeerAddressesResult-triedBucketFullNodes": "Peers state tried full nodes bucket", + + "getPeerAddressesKnownAddressResult-addr": "Address", + "getPeerAddressesKnownAddressResult-src": "Address of the peer that handed the address", + "getPeerAddressesKnownAddressResult-subnetworkId": "Address subnetwork ID", + "getPeerAddressesKnownAddressResult-attempts": "Number of attempts to connect to the address", + "getPeerAddressesKnownAddressResult-timeStamp": "Time the address was added", + "getPeerAddressesKnownAddressResult-lastAttempt": "Last attempt to connect to the address", + "getPeerAddressesKnownAddressResult-lastSuccess": "Last successful attempt to connect to the address", + + // GetPeerAddressesCmd help. + "getPeerAddresses--synopsis": "Returns the peers state.", // GetRawMempoolVerboseResult help. "getRawMempoolVerboseResult-size": "Transaction size in bytes", @@ -488,7 +514,7 @@ var helpDescsEnUS = map[string]string{ // PingCmd help. "ping--synopsis": "Queues a ping to be sent to each connected peer.\n" + - "Ping times are provided by getPeerInfo via the pingtime and pingwait fields.", + "Ping times are provided by getConnectedPeerInfo via the pingtime and pingwait fields.", // RemoveManualNodeCmd help. "removeManualNode--synopsis": "Removes a peer from the manual nodes list", @@ -616,7 +642,8 @@ var rpcResultTypes = map[string][]interface{}{ "getMempoolInfo": {(*rpcmodel.GetMempoolInfoResult)(nil)}, "getMempoolEntry": {(*rpcmodel.GetMempoolEntryResult)(nil)}, "getNetTotals": {(*rpcmodel.GetNetTotalsResult)(nil)}, - "getPeerInfo": {(*[]rpcmodel.GetPeerInfoResult)(nil)}, + "getConnectedPeerInfo": {(*[]rpcmodel.GetConnectedPeerInfoResult)(nil)}, + "getPeerAddresses": {(*[]rpcmodel.GetPeerAddressesResult)(nil)}, "getRawMempool": {(*[]string)(nil), (*rpcmodel.GetRawMempoolVerboseResult)(nil)}, "getSubnetwork": {(*rpcmodel.GetSubnetworkResult)(nil)}, "getTxOut": {(*rpcmodel.GetTxOutResult)(nil)}, From 1e6458973b45bb7a166f6e8158d59b8472a24c5e Mon Sep 17 00:00:00 2001 From: Svarog Date: Wed, 17 Jun 2020 14:18:00 +0300 Subject: [PATCH 68/77] [NOD-1064] Don't send GetBlockInvsMsg with lowHash = nil (#769) --- peer/peer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/peer/peer_test.go b/peer/peer_test.go index fbc8942c0..192130dcb 100644 --- a/peer/peer_test.go +++ b/peer/peer_test.go @@ -585,7 +585,7 @@ func TestOutboundPeer(t *testing.T) { if _, err := p2.PushAddrMsg(addrs, nil); err != nil { t.Fatalf("PushAddrMsg: unexpected err %v\n", err) } - if err := p2.PushGetBlockInvsMsg(nil, &daghash.Hash{}); err != nil { + if err := p2.PushGetBlockInvsMsg(&daghash.Hash{}, &daghash.Hash{}); err != nil { t.Fatalf("PushGetBlockInvsMsg: unexpected err %v\n", err) } From 56e807b6634fc755e80094fa9c7aba50313d3334 Mon Sep 17 00:00:00 2001 From: Svarog Date: Mon, 22 Jun 2020 14:20:12 +0300 Subject: [PATCH 69/77] [NOD-999] Set TargetOutbound=0 to prevent rogue connectionRequests except the one requested (#771) --- connmgr/connmanager_test.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/connmgr/connmanager_test.go b/connmgr/connmanager_test.go index f7d28ccff..4cf88618b 100644 --- a/connmgr/connmanager_test.go +++ b/connmgr/connmanager_test.go @@ -6,17 +6,18 @@ package connmgr import ( "fmt" - "github.com/kaspanet/kaspad/addrmgr" - "github.com/kaspanet/kaspad/config" - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/dbaccess" - "github.com/pkg/errors" "io" "io/ioutil" "net" "sync/atomic" "testing" "time" + + "github.com/kaspanet/kaspad/addrmgr" + "github.com/kaspanet/kaspad/config" + "github.com/kaspanet/kaspad/dagconfig" + "github.com/kaspanet/kaspad/dbaccess" + "github.com/pkg/errors" ) func init() { @@ -575,7 +576,7 @@ func TestMaxRetryDuration(t *testing.T) { connected := make(chan *ConnReq) cmgr, err := New(&Config{ RetryDuration: time.Millisecond, - TargetOutbound: 1, + TargetOutbound: 0, Dial: timedDialer, OnConnection: func(c *ConnReq, conn net.Conn) { connected <- c From 895f67a8d4a31e5782ee1b5784bc7ca8295f7a0d Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Mon, 22 Jun 2020 17:14:59 +0300 Subject: [PATCH 70/77] [NOD-1035] Use reachability to check finality (#763) * [NOD-1034] Use reachability to check finality * [NOD-1034] Add comments and rename variables * [NOD-1034] Fix comments * [NOD-1034] Rename checkFinalityRules->checkFinalityViolation * [NOD-1034] Change isAncestorOf to be exclusive * [NOD-1034] Make isAncestorOf exclusive and also more explicit, and add TestReachabilityTreeNodeIsAncestorOf --- blockdag/dag.go | 47 ++++++++++++++----- blockdag/reachability.go | 6 ++- blockdag/reachability_test.go | 87 ++++++++++++++++++++++++++++++++++- blockdag/validate.go | 2 +- 4 files changed, 128 insertions(+), 14 deletions(-) diff --git a/blockdag/dag.go b/blockdag/dag.go index 4493c5ae4..ab3022b9d 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -566,7 +566,7 @@ func (dag *BlockDAG) connectBlock(node *blockNode, } } - if err := dag.checkFinalityRules(node); err != nil { + if err := dag.checkFinalityViolation(node); err != nil { return nil, err } @@ -816,20 +816,45 @@ func (dag *BlockDAG) LastFinalityPointHash() *daghash.Hash { return dag.lastFinalityPoint.hash } -// checkFinalityRules checks the new block does not violate the finality rules -// specifically - the new block selectedParent chain should contain the old finality point -func (dag *BlockDAG) checkFinalityRules(newNode *blockNode) error { +// isInSelectedParentChain returns whether aNode is in the selected parent chain of bNode. +func (dag *BlockDAG) isInSelectedParentChain(aNode, bNode *blockNode) (bool, error) { + aTreeNode, err := dag.reachabilityStore.treeNodeByBlockNode(aNode) + if err != nil { + return false, err + } + + bTreeNode, err := dag.reachabilityStore.treeNodeByBlockNode(bNode) + if err != nil { + return false, err + } + + return aTreeNode.interval.isAncestorOf(bTreeNode.interval), nil +} + +// checkFinalityViolation checks the new block does not violate the finality rules +// specifically - the new block selectedParent chain should contain the old finality point. +func (dag *BlockDAG) checkFinalityViolation(newNode *blockNode) error { // the genesis block can not violate finality rules if newNode.isGenesis() { return nil } - for currentNode := newNode; currentNode != dag.lastFinalityPoint; currentNode = currentNode.selectedParent { - // If we went past dag's last finality point without encountering it - - // the new block has violated finality. - if currentNode.blueScore <= dag.lastFinalityPoint.blueScore { - return ruleError(ErrFinality, "The last finality point is not in the selected chain of this block") - } + // Because newNode doesn't have reachability data we + // need to check if the last finality point is in the + // selected parent chain of newNode.selectedParent, so + // we explicitly check if newNode.selectedParent is + // the finality point. + if dag.lastFinalityPoint == newNode.selectedParent { + return nil + } + + isInSelectedChain, err := dag.isInSelectedParentChain(dag.lastFinalityPoint, newNode.selectedParent) + if err != nil { + return err + } + + if !isInSelectedChain { + return ruleError(ErrFinality, "the last finality point is not in the selected parent chain of this block") } return nil } @@ -894,7 +919,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) { // IsKnownFinalizedBlock returns whether the block is below the finality point. // IsKnownFinalizedBlock might be false-negative because node finality status is // updated in a separate goroutine. To get a definite answer if a block -// is finalized or not, use dag.checkFinalityRules. +// is finalized or not, use dag.checkFinalityViolation. func (dag *BlockDAG) IsKnownFinalizedBlock(blockHash *daghash.Hash) bool { node, ok := dag.index.LookupNode(blockHash) return ok && node.isFinalized diff --git a/blockdag/reachability.go b/blockdag/reachability.go index 8f785c0d4..e16544826 100644 --- a/blockdag/reachability.go +++ b/blockdag/reachability.go @@ -155,7 +155,11 @@ func exponentialFractions(sizes []uint64) []float64 { // property of reachability intervals that intervals are either completely disjoint, // or one strictly contains the other. func (ri *reachabilityInterval) isAncestorOf(other *reachabilityInterval) bool { - return ri.start <= other.end && other.end <= ri.end + // An interval is not an ancestor of itself. + if ri.start == other.start && ri.end == other.end { + return false + } + return ri.start <= other.start && other.end <= ri.end } // String returns a string representation of the interval. diff --git a/blockdag/reachability_test.go b/blockdag/reachability_test.go index c2d8c1e84..e7ab60219 100644 --- a/blockdag/reachability_test.go +++ b/blockdag/reachability_test.go @@ -57,7 +57,7 @@ func TestAddChild(t *testing.T) { // Expect all nodes to be descendant nodes of root currentNode := currentTip - for currentNode != nil { + for currentNode != root { if !root.isAncestorOf(currentNode) { t.Fatalf("TestAddChild: currentNode is not a descendant of root") } @@ -118,6 +118,91 @@ func TestAddChild(t *testing.T) { } } +func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) { + root := newReachabilityTreeNode(&blockNode{}) + currentTip := root + const numberOfDescendants = 6 + descendants := make([]*reachabilityTreeNode, numberOfDescendants) + for i := 0; i < numberOfDescendants; i++ { + node := newReachabilityTreeNode(&blockNode{}) + _, err := currentTip.addChild(node) + if err != nil { + t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: addChild failed: %s", err) + } + descendants[i] = node + currentTip = node + } + + // Expect all descendants to be in the future of root + for _, node := range descendants { + if !root.isAncestorOf(node) { + t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: node is not a descendant of root") + } + } + + if root.isAncestorOf(root) { + t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: root is not expected to be a descendant of root") + } +} + +func TestIntervalIsAncestorOf(t *testing.T) { + tests := []struct { + name string + this, other *reachabilityInterval + isThisAncestorOfOther bool + }{ + { + name: "this == other", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(10, 100), + isThisAncestorOfOther: false, + }, + { + name: "this.start == other.start && this.end < other.end", + this: newReachabilityInterval(10, 90), + other: newReachabilityInterval(10, 100), + isThisAncestorOfOther: false, + }, + { + name: "this.start == other.start && this.end > other.end", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(10, 90), + isThisAncestorOfOther: true, + }, + { + name: "this.start > other.start && this.end == other.end", + this: newReachabilityInterval(20, 100), + other: newReachabilityInterval(10, 100), + isThisAncestorOfOther: false, + }, + { + name: "this.start < other.start && this.end == other.end", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(20, 100), + isThisAncestorOfOther: true, + }, + { + name: "this.start > other.start && this.end < other.end", + this: newReachabilityInterval(20, 90), + other: newReachabilityInterval(10, 100), + isThisAncestorOfOther: false, + }, + { + name: "this.start < other.start && this.end > other.end", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(20, 90), + isThisAncestorOfOther: true, + }, + } + + for _, test := range tests { + if isAncestorOf := test.this.isAncestorOf(test.other); isAncestorOf != test.isThisAncestorOfOther { + t.Errorf("test.this.isAncestorOf(test.other) is expected to be %t but got %t", + test.isThisAncestorOfOther, isAncestorOf) + } + } +} + func TestSplitFraction(t *testing.T) { tests := []struct { interval *reachabilityInterval diff --git a/blockdag/validate.go b/blockdag/validate.go index ff6326924..7103986b1 100644 --- a/blockdag/validate.go +++ b/blockdag/validate.go @@ -698,7 +698,7 @@ func (dag *BlockDAG) validateParents(blockHeader *wire.BlockHeader, parents bloc for parentA := range parents { // isFinalized might be false-negative because node finality status is // updated in a separate goroutine. This is why later the block is - // checked more thoroughly on the finality rules in dag.checkFinalityRules. + // checked more thoroughly on the finality rules in dag.checkFinalityViolation. if parentA.isFinalized { return ruleError(ErrFinality, fmt.Sprintf("block %s is a finalized "+ "parent of block %s", parentA.hash, blockHeader.BlockHash())) From a86255ba51f37c1ac155bab5f6ddbaa66afdedd0 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Thu, 25 Jun 2020 18:08:58 +0300 Subject: [PATCH 71/77] [NOD-1088] Rename RejectReasion to RejectReason (#775) --- rpcmodel/rpc_results.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rpcmodel/rpc_results.go b/rpcmodel/rpc_results.go index 6a5d38887..f70b98ccc 100644 --- a/rpcmodel/rpc_results.go +++ b/rpcmodel/rpc_results.go @@ -162,8 +162,8 @@ type GetBlockTemplateResult struct { NonceRange string `json:"nonceRange,omitempty"` // Block proposal from BIP 0023. - Capabilities []string `json:"capabilities,omitempty"` - RejectReasion string `json:"rejectReason,omitempty"` + Capabilities []string `json:"capabilities,omitempty"` + RejectReason string `json:"rejectReason,omitempty"` } // GetMempoolEntryResult models the data returned from the getMempoolEntry From 57b1653383ee20f9cddeaac0d88de1a8d7d389c4 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Sun, 28 Jun 2020 14:27:01 +0300 Subject: [PATCH 72/77] [NOD-1063] Optimize deep reachability tree insertions (#773) * [NOD-1055] Give higher priority for requesting missing ancestors when sending a getdata message (#767) * [NOD-1063] Remove the remainingInterval field. * [NOD-1063] Add helper functions to reachabilityTreeNode. * [NOD-1063] Add reachabilityReindexRoot. * [NOD-1063] Start implementing findNextReachabilityReindexRoot. * [NOD-1063] Implement findCommonAncestor. * [NOD-1063] Implement findReachabilityTreeAncestorInChildren. * [NOD-1063] Add reachabilityReindexWindow. * [NOD-1063] Fix findReachabilityTreeAncestorInChildren. * [NOD-1063] Remove BlockDAG reference in findReachabilityTreeAncestorInChildren. * [NOD-1063] Extract updateReachabilityReindexRoot to a separate function. * [NOD-1063] Add reachabilityReindexSlack. * [NOD-1063] Implement splitReindexRootChildrenAroundChosen. * [NOD-1063] Implement calcReachabilityTreeNodeSizes. * [NOD-1063] Implement propagateChildIntervals. * [NOD-1063] Extract tightenReachabilityTreeIntervalsBeforeChosenReindexRootChild and tightenReachabilityTreeIntervalsAfterChosenReindexRootChild to separate functions. * [NOD-1063] Implement expandReachabilityTreeIntervalInChosenReindexRootChild. * [NOD-1063] Finished implementing concentrateReachabilityTreeIntervalAroundReindexRootChild. * [NOD-1063] Begin implementing reindexIntervalsBeforeReindexRoot. * [NOD-1063] Implement top-level logic of reindexIntervalsBeforeReindexRoot. * [NOD-1063] Implement reclaimIntervalBeforeChosenChild. * [NOD-1063] Add a debug log for reindexIntervalsBeforeReindexRoot. * [NOD-1063] Rename reindexIntervalsBeforeReindexRoot to reindexIntervalsEarlierThanReindexRoot. * [NOD-1063] Implement reclaimIntervalAfterChosenChild. * [NOD-1063] Add a debug log for updateReachabilityReindexRoot. * [NOD-1063] Convert modifiedTreeNodes from slices to sets. * [NOD-1063] Fix findCommonAncestor. * [NOD-1063] Fix reindexIntervalsEarlierThanReindexRoot.` * [NOD-1063] Remove redundant nil conditions. * [NOD-1063] Make map[*reachabilityTreeNode]struct{} into a type alias with a copyAllFrom method. * [NOD-1063] Remove setInterval. * [NOD-1063] Create a new struct to hold reachability stuff called reachabilityTree. * [NOD-1063] Rename functions under reachabilityTree. * [NOD-1063] Move reachabilityStore into reachabilityTree. * [NOD-1063] Move the rest of the functions in reachability.go into the reachabilityTree struct. * [NOD-1063] Update newReachabilityTree to take an instance of reachabilityStore. * [NOD-1063] Fix merge errors. * [NOD-1063] Fix merge errors. * [NOD-1063] Pass a reference to the dag into reachabilityTree. * [NOD-1063] Use Wrapf instead of Errorf. * [NOD-1063] Merge assignments. * [NOD-1063] Disambiguate a varaible name. * [NOD-1063] Add a test case for intervalBefore. * [NOD-1063] Simplify splitChildrenAroundChosenChild. * [NOD-1063] Fold temporary variables into newReachabilityInterval. * [NOD-1063] Fold more temporary variables into newReachabilityInterval. * [NOD-1063] Fix a bug in expandIntervalInReindexRootChosenChild. * [NOD-1063] Remove blockNode from futureCoveringBlock. * [NOD-1063] Get rid of futureCoveringBlock. * [NOD-1063] Use findIndex directly in findAncestorAmongChildren. * [NOD-1063] Make findIndex a bit nicer to use. Also rename it to findAncestorIndexOfNode. * [NOD-1063] Rename childIntervalAllocationRange to intervalRangeForChildAllocation. * [NOD-1063] Optimize findCommonAncestor. * [NOD-1063] In reindexIntervalsBeforeChosenChild, use chosenChild.interval.start - 1 instead of childrenBeforeChosen[len(childrenBeforeChosen)-1].interval.end + 1. * [NOD-1063] Rename reindexIntervalsBeforeChosenChild to reindexIntervalsBeforeNode. * [NOD-1063] Add a comment explain what "the chosen child" is. * [NOD-1063] In concentrateIntervalAroundReindexRootChosenChild, rename modifiedTreeNodes to allModifiedTreeNodes. * [NOD-1063] Extract propagateIntervals to a function. * [NOD-1063] Extract interval "contains" logic to a separate function. * [NOD-1063] Simplify "looping up" logic in reclaimIntervalXXXChosenChild. * [NOD-1063] Add comments to reclaimIntervalXXXChosenChild. * [NOD-1063] Rename copyAllFrom to addAll. * [NOD-1063] Rename reachabilityStore (the variable) to just store. * [NOD-1063] Fix an error message. * [NOD-1063] Reword a comment. * [NOD-1063] Don't return -1 from findAncestorIndexOfNode. * [NOD-1063] Extract slackReachabilityIntervalForReclaiming to a constant. * [NOD-1063] Add a missing condition. * [NOD-1063] Call isAncestorOf directly in insertNode. * [NOD-1063] Rename chosenReindexRootChild to reindexRootChosenChild. * [NOD-1063] Rename treeNodeSet to orderedTreeNodeSet. * [NOD-1063] Add a disclaimer to orderedTreeNodeSet. * [NOD-1063] Implement StoreReachabilityReindexRoot and FetchReachabilityReindexRoot. * [NOD-1063] Move storing the reindex root to within reachabilityTree. * [NOD-1063] Remove isAncestorOf from reachabilityInterval. * [NOD-1063] Add a comment about graph theory conventions. * [NOD-1063] Fix tests. * [NOD-1063] Change inclusion in isAncestorOf functions. * [NOD-1063] Rename a test. * [NOD-1063] Implement TestIsInFuture. * [NOD-1063] Fix error messages in TestIsInFuture. * [NOD-1063] Fix error messages in TestIsInFuture. * [NOD-1063] Rename isInSelectedParentChain to isInSelectedParentChainOf. * [NOD-1063] Rename isInFuture to isInPast. * [NOD-1063] Expand on a comment. * [NOD-1063] Rename modifiedTreeNodes. * [NOD-1063] Implement test: TestReindexIntervalsEarlierThanReindexRoot. * [NOD-1063] Implement test: TestUpdateReindexRoot. * [NOD-1063] Explain a check. * [NOD-1063] Use a method instead of calling reachabilityStore.loaded directly. * [NOD-1063] Lowercasified an error message. * [NOD-1063] Fix failing test. Co-authored-by: Ori Newman --- blockdag/dag.go | 44 +- blockdag/dagio.go | 5 +- blockdag/ghostdag.go | 6 +- blockdag/ghostdag_test.go | 4 +- blockdag/reachability.go | 886 +++++++++++++++++++++++++++++----- blockdag/reachability_test.go | 539 +++++++++++++++------ blockdag/reachabilitystore.go | 56 +-- blockdag/validate.go | 2 +- dbaccess/reachability.go | 24 + 9 files changed, 1226 insertions(+), 340 deletions(-) diff --git a/blockdag/dag.go b/blockdag/dag.go index ab3022b9d..9280d254d 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -151,9 +151,10 @@ type BlockDAG struct { lastFinalityPoint *blockNode - utxoDiffStore *utxoDiffStore - reachabilityStore *reachabilityStore - multisetStore *multisetStore + utxoDiffStore *utxoDiffStore + multisetStore *multisetStore + + reachabilityTree *reachabilityTree recentBlockProcessingTimestamps []time.Time startTime time.Time @@ -700,7 +701,7 @@ func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UT return err } - err = dag.reachabilityStore.flushToDB(dbTx) + err = dag.reachabilityTree.storeState(dbTx) if err != nil { return err } @@ -760,7 +761,7 @@ func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UT dag.index.clearDirtyEntries() dag.utxoDiffStore.clearDirtyEntries() dag.utxoDiffStore.clearOldEntries() - dag.reachabilityStore.clearDirtyEntries() + dag.reachabilityTree.store.clearDirtyEntries() dag.multisetStore.clearNewEntries() return nil @@ -816,19 +817,14 @@ func (dag *BlockDAG) LastFinalityPointHash() *daghash.Hash { return dag.lastFinalityPoint.hash } -// isInSelectedParentChain returns whether aNode is in the selected parent chain of bNode. -func (dag *BlockDAG) isInSelectedParentChain(aNode, bNode *blockNode) (bool, error) { - aTreeNode, err := dag.reachabilityStore.treeNodeByBlockNode(aNode) - if err != nil { - return false, err +// isInSelectedParentChainOf returns whether `node` is in the selected parent chain of `other`. +func (dag *BlockDAG) isInSelectedParentChainOf(node *blockNode, other *blockNode) (bool, error) { + // By definition, a node is not in the selected parent chain of itself. + if node == other { + return false, nil } - bTreeNode, err := dag.reachabilityStore.treeNodeByBlockNode(bNode) - if err != nil { - return false, err - } - - return aTreeNode.interval.isAncestorOf(bTreeNode.interval), nil + return dag.reachabilityTree.isReachabilityTreeAncestorOf(node, other) } // checkFinalityViolation checks the new block does not violate the finality rules @@ -848,7 +844,7 @@ func (dag *BlockDAG) checkFinalityViolation(newNode *blockNode) error { return nil } - isInSelectedChain, err := dag.isInSelectedParentChain(dag.lastFinalityPoint, newNode.selectedParent) + isInSelectedChain, err := dag.isInSelectedParentChainOf(dag.lastFinalityPoint, newNode.selectedParent) if err != nil { return err } @@ -995,10 +991,10 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockPastUTXO UTXOSet, newBlockMultiset *secp256k1.MultiSet, selectedParentAnticone []*blockNode) ( virtualUTXODiff *UTXODiff, chainUpdates *chainUpdates, err error) { - // Add the block to the reachability structures - err = dag.updateReachability(node, selectedParentAnticone) + // Add the block to the reachability tree + err = dag.reachabilityTree.addBlock(node, selectedParentAnticone) if err != nil { - return nil, nil, errors.Wrap(err, "failed updating reachability") + return nil, nil, errors.Wrap(err, "failed adding block to the reachability tree") } dag.multisetStore.setMultiset(node, newBlockMultiset) @@ -1785,7 +1781,7 @@ func (dag *BlockDAG) antiPastBetween(lowHash, highHash *daghash.Hash, maxEntries continue } visited.add(current) - isCurrentAncestorOfLowNode, err := dag.isAncestorOf(current, lowNode) + isCurrentAncestorOfLowNode, err := dag.isInPast(current, lowNode) if err != nil { return nil, err } @@ -1811,6 +1807,10 @@ func (dag *BlockDAG) antiPastBetween(lowHash, highHash *daghash.Hash, maxEntries return nodes, nil } +func (dag *BlockDAG) isInPast(this *blockNode, other *blockNode) (bool, error) { + return dag.reachabilityTree.isInPast(this, other) +} + // AntiPastHashesBetween returns the hashes of the blocks between the // lowHash's antiPast and highHash's antiPast, or up to the provided // max number of block hashes. @@ -2064,8 +2064,8 @@ func New(config *Config) (*BlockDAG, error) { dag.virtual = newVirtualBlock(dag, nil) dag.utxoDiffStore = newUTXODiffStore(dag) - dag.reachabilityStore = newReachabilityStore(dag) dag.multisetStore = newMultisetStore(dag) + dag.reachabilityTree = newReachabilityTree(dag) // Initialize the DAG state from the passed database. When the db // does not yet contain any DAG state, both it and the DAG state diff --git a/blockdag/dagio.go b/blockdag/dagio.go index 32c01877e..9300ecaf9 100644 --- a/blockdag/dagio.go +++ b/blockdag/dagio.go @@ -209,7 +209,7 @@ func (dag *BlockDAG) initDAGState() error { } log.Debugf("Loading reachability data...") - err = dag.reachabilityStore.init(dbaccess.NoTx()) + err = dag.reachabilityTree.init(dbaccess.NoTx()) if err != nil { return err } @@ -236,7 +236,8 @@ func (dag *BlockDAG) initDAGState() error { var ok bool dag.lastFinalityPoint, ok = dag.index.LookupNode(dagState.LastFinalityPoint) if !ok { - return errors.Errorf("block %s does not exist in the DAG", dagState.LastFinalityPoint) + return errors.Errorf("finality point block %s "+ + "does not exist in the DAG", dagState.LastFinalityPoint) } dag.finalizeNodesBelowFinalityPoint(false) diff --git a/blockdag/ghostdag.go b/blockdag/ghostdag.go index 6f8f488fc..8c2a5e240 100644 --- a/blockdag/ghostdag.go +++ b/blockdag/ghostdag.go @@ -57,7 +57,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo // newNode is always in the future of blueCandidate, so there's // no point in checking it. if chainBlock != newNode { - if isAncestorOfBlueCandidate, err := dag.isAncestorOf(chainBlock, blueCandidate); err != nil { + if isAncestorOfBlueCandidate, err := dag.isInPast(chainBlock, blueCandidate); err != nil { return nil, err } else if isAncestorOfBlueCandidate { break @@ -66,7 +66,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo for _, block := range chainBlock.blues { // Skip blocks that exist in the past of blueCandidate. - if isAncestorOfBlueCandidate, err := dag.isAncestorOf(block, blueCandidate); err != nil { + if isAncestorOfBlueCandidate, err := dag.isInPast(block, blueCandidate); err != nil { return nil, err } else if isAncestorOfBlueCandidate { continue @@ -148,7 +148,7 @@ func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, erro if anticoneSet.contains(parent) || selectedParentPast.contains(parent) { continue } - isAncestorOfSelectedParent, err := dag.isAncestorOf(parent, node.selectedParent) + isAncestorOfSelectedParent, err := dag.isInPast(parent, node.selectedParent) if err != nil { return nil, err } diff --git a/blockdag/ghostdag_test.go b/blockdag/ghostdag_test.go index 1556db239..29686d99c 100644 --- a/blockdag/ghostdag_test.go +++ b/blockdag/ghostdag_test.go @@ -349,7 +349,7 @@ func TestGHOSTDAGErrors(t *testing.T) { block3 := prepareAndProcessBlockByParentMsgBlocks(t, dag, block1, block2) // Clear the reachability store - dag.reachabilityStore.loaded = map[daghash.Hash]*reachabilityData{} + dag.reachabilityTree.store.loaded = map[daghash.Hash]*reachabilityData{} dbTx, err := dbaccess.NewTx() if err != nil { @@ -377,7 +377,7 @@ func TestGHOSTDAGErrors(t *testing.T) { if err == nil { t.Fatalf("TestGHOSTDAGErrors: ghostdag unexpectedly succeeded") } - expectedErrSubstring := "Couldn't find reachability data" + expectedErrSubstring := "couldn't find reachability data" if !strings.Contains(err.Error(), expectedErrSubstring) { t.Fatalf("TestGHOSTDAGErrors: ghostdag returned wrong error. "+ "Want: %s, got: %s", expectedErrSubstring, err) diff --git a/blockdag/reachability.go b/blockdag/reachability.go index e16544826..fa5a1ec2b 100644 --- a/blockdag/reachability.go +++ b/blockdag/reachability.go @@ -2,12 +2,52 @@ package blockdag import ( "fmt" + "github.com/kaspanet/kaspad/dbaccess" "github.com/pkg/errors" "math" "strings" "time" ) +var ( + // reachabilityReindexWindow is the target window size for reachability + // reindexes. Note that this is not a constant for testing purposes. + reachabilityReindexWindow uint64 = 200 + + // reachabilityReindexSlack is the slack interval given to reachability + // tree nodes not in the selected parent chain. Note that this is not + // a constant for testing purposes. + reachabilityReindexSlack uint64 = 1 << 12 + + // slackReachabilityIntervalForReclaiming is the slack interval to + // reclaim during reachability reindexes earlier than the reindex root. + // See reclaimIntervalBeforeChosenChild for further details. Note that + // this is not a constant for testing purposes. + slackReachabilityIntervalForReclaiming uint64 = 1 +) + +// modifiedTreeNodes are a set of reachabilityTreeNodes that's bubbled up +// from any function that modifies them, so that the original caller may +// update the database accordingly. This is a set rather than a slice due +// to frequent duplicate treeNodes between operations. +type modifiedTreeNodes map[*reachabilityTreeNode]struct{} + +func newModifiedTreeNodes(nodes ...*reachabilityTreeNode) modifiedTreeNodes { + modifiedNodes := make(modifiedTreeNodes) + for _, node := range nodes { + modifiedNodes[node] = struct{}{} + } + return modifiedNodes +} + +// addAll adds all the reachabilityTreeNodes in `other` +// into `mtn`. Note that `other` is not affected. +func (mtn modifiedTreeNodes) addAll(other modifiedTreeNodes) { + for node := range other { + mtn[node] = struct{}{} + } +} + // reachabilityInterval represents an interval to be used within the // tree reachability algorithm. See reachabilityTreeNode for further // details. @@ -150,15 +190,8 @@ func exponentialFractions(sizes []uint64) []float64 { return fractions } -// isAncestorOf checks if this interval's node is a reachability tree -// ancestor of the other interval's node. The condition below is relying on the -// property of reachability intervals that intervals are either completely disjoint, -// or one strictly contains the other. -func (ri *reachabilityInterval) isAncestorOf(other *reachabilityInterval) bool { - // An interval is not an ancestor of itself. - if ri.start == other.start && ri.end == other.end { - return false - } +// contains returns true if ri contains other. +func (ri *reachabilityInterval) contains(other *reachabilityInterval) bool { return ri.start <= other.start && other.end <= ri.end } @@ -191,33 +224,78 @@ type reachabilityTreeNode struct { // interval is the index interval containing all intervals of // blocks in this node's subtree interval *reachabilityInterval - - // remainingInterval is the not-yet allocated interval (within - // this node's interval) awaiting new children - remainingInterval *reachabilityInterval } func newReachabilityTreeNode(blockNode *blockNode) *reachabilityTreeNode { // Please see the comment above reachabilityTreeNode to understand why // we use these initial values. interval := newReachabilityInterval(1, math.MaxUint64-1) - // We subtract 1 from the end of the remaining interval to prevent the node from allocating + return &reachabilityTreeNode{blockNode: blockNode, interval: interval} +} + +func (rtn *reachabilityTreeNode) intervalRangeForChildAllocation() *reachabilityInterval { + // We subtract 1 from the end of the range to prevent the node from allocating // the entire interval to its child, so its interval would *strictly* contain the interval of its child. - remainingInterval := newReachabilityInterval(interval.start, interval.end-1) - return &reachabilityTreeNode{blockNode: blockNode, interval: interval, remainingInterval: remainingInterval} + return newReachabilityInterval(rtn.interval.start, rtn.interval.end-1) +} + +func (rtn *reachabilityTreeNode) remainingIntervalBefore() *reachabilityInterval { + childRange := rtn.intervalRangeForChildAllocation() + if len(rtn.children) == 0 { + return childRange + } + return newReachabilityInterval(childRange.start, rtn.children[0].interval.start-1) +} + +func (rtn *reachabilityTreeNode) remainingIntervalAfter() *reachabilityInterval { + childRange := rtn.intervalRangeForChildAllocation() + if len(rtn.children) == 0 { + return childRange + } + return newReachabilityInterval(rtn.children[len(rtn.children)-1].interval.end+1, childRange.end) +} + +func (rtn *reachabilityTreeNode) hasSlackIntervalBefore() bool { + return rtn.remainingIntervalBefore().size() > 0 +} + +func (rtn *reachabilityTreeNode) hasSlackIntervalAfter() bool { + return rtn.remainingIntervalAfter().size() > 0 } // addChild adds child to this tree node. If this node has no // remaining interval to allocate, a reindexing is triggered. // This method returns a list of reachabilityTreeNodes modified // by it. -func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode) ([]*reachabilityTreeNode, error) { +func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRoot *reachabilityTreeNode) ( + modifiedTreeNodes, error) { + + remaining := rtn.remainingIntervalAfter() + // Set the parent-child relationship rtn.children = append(rtn.children, child) child.parent = rtn + // Handle rtn not being a descendant of the reindex root. + // Note that we check rtn here instead of child because + // at this point we don't yet know child's interval. + if !reindexRoot.isAncestorOf(rtn) { + reindexStartTime := time.Now() + modifiedNodes, err := rtn.reindexIntervalsEarlierThanReindexRoot(reindexRoot) + if err != nil { + return nil, err + } + reindexTimeElapsed := time.Since(reindexStartTime) + log.Debugf("Reachability reindex triggered for "+ + "block %s. This block is not a child of the current "+ + "reindex root %s. Modified %d tree nodes and took %dms.", + rtn.blockNode.hash, reindexRoot.blockNode.hash, + len(modifiedNodes), reindexTimeElapsed.Milliseconds()) + return modifiedNodes, nil + } + // No allocation space left -- reindex - if rtn.remainingInterval.size() == 0 { + if remaining.size() == 0 { reindexStartTime := time.Now() modifiedNodes, err := rtn.reindexIntervals() if err != nil { @@ -231,23 +309,12 @@ func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode) ([]*reach } // Allocate from the remaining space - allocated, remaining, err := rtn.remainingInterval.splitInHalf() + allocated, _, err := remaining.splitInHalf() if err != nil { return nil, err } - child.setInterval(allocated) - rtn.remainingInterval = remaining - return []*reachabilityTreeNode{rtn, child}, nil -} - -// setInterval sets the reachability interval for this node. -func (rtn *reachabilityTreeNode) setInterval(interval *reachabilityInterval) { - rtn.interval = interval - - // Reserve a single interval index for the current node. This - // is necessary to ensure that ancestor intervals are strictly - // supersets of any descendant intervals and not equal - rtn.remainingInterval = newReachabilityInterval(interval.start, interval.end-1) + child.interval = allocated + return newModifiedTreeNodes(rtn, child), nil } // reindexIntervals traverses the reachability subtree that's @@ -257,7 +324,7 @@ func (rtn *reachabilityTreeNode) setInterval(interval *reachabilityInterval) { // tree until it finds a node with a subreeSize that's greater than // its interval size. See propagateInterval for further details. // This method returns a list of reachabilityTreeNodes modified by it. -func (rtn *reachabilityTreeNode) reindexIntervals() ([]*reachabilityTreeNode, error) { +func (rtn *reachabilityTreeNode) reindexIntervals() (modifiedTreeNodes, error) { current := rtn // Initial interval and subtree sizes @@ -348,11 +415,11 @@ func (rtn *reachabilityTreeNode) countSubtrees(subTreeSizeMap map[*reachabilityT // Subtree intervals are recursively allocated according to subtree sizes and // the allocation rule in splitWithExponentialBias. This method returns // a list of reachabilityTreeNodes modified by it. -func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabilityTreeNode]uint64) ([]*reachabilityTreeNode, error) { - // We set the interval to reset its remainingInterval, so we could reallocate it while reindexing. - rtn.setInterval(rtn.interval) +func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabilityTreeNode]uint64) ( + modifiedTreeNodes, error) { + + allModifiedTreeNodes := newModifiedTreeNodes() queue := []*reachabilityTreeNode{rtn} - var modifiedNodes []*reachabilityTreeNode for len(queue) > 0 { var current *reachabilityTreeNode current, queue = queue[0], queue[1:] @@ -361,29 +428,245 @@ func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabil for i, child := range current.children { sizes[i] = subTreeSizeMap[child] } - intervals, err := current.remainingInterval.splitWithExponentialBias(sizes) + intervals, err := current.intervalRangeForChildAllocation().splitWithExponentialBias(sizes) if err != nil { return nil, err } for i, child := range current.children { childInterval := intervals[i] - child.setInterval(childInterval) + child.interval = childInterval queue = append(queue, child) } - - // Empty up remaining interval - current.remainingInterval.start = current.remainingInterval.end + 1 } - modifiedNodes = append(modifiedNodes, current) + allModifiedTreeNodes[current] = struct{}{} } - return modifiedNodes, nil + return allModifiedTreeNodes, nil +} + +func (rtn *reachabilityTreeNode) reindexIntervalsEarlierThanReindexRoot( + reindexRoot *reachabilityTreeNode) (modifiedTreeNodes, error) { + + // Find the common ancestor for both rtn and the reindex root + commonAncestor := rtn.findCommonAncestorWithReindexRoot(reindexRoot) + + // The chosen child is: + // a. A reachability tree child of `commonAncestor` + // b. A reachability tree ancestor of `reindexRoot` + commonAncestorChosenChild, err := commonAncestor.findAncestorAmongChildren(reindexRoot) + if err != nil { + return nil, err + } + + if rtn.interval.end < commonAncestorChosenChild.interval.start { + // rtn is in the subtree before the chosen child + return rtn.reclaimIntervalBeforeChosenChild(commonAncestor, commonAncestorChosenChild, reindexRoot) + } + if commonAncestorChosenChild.interval.end < rtn.interval.start { + // rtn is in the subtree after the chosen child + return rtn.reclaimIntervalAfterChosenChild(commonAncestor, commonAncestorChosenChild, reindexRoot) + } + return nil, errors.Errorf("rtn is in the chosen child's subtree") +} + +func (rtn *reachabilityTreeNode) reclaimIntervalBeforeChosenChild( + commonAncestor *reachabilityTreeNode, commonAncestorChosenChild *reachabilityTreeNode, reindexRoot *reachabilityTreeNode) ( + modifiedTreeNodes, error) { + + allModifiedTreeNodes := newModifiedTreeNodes() + + current := commonAncestorChosenChild + if !commonAncestorChosenChild.hasSlackIntervalBefore() { + // The common ancestor ran out of slack before its chosen child. + // Climb up the reachability tree toward the reindex root until + // we find a node that has enough slack. + for !current.hasSlackIntervalBefore() && current != reindexRoot { + var err error + current, err = current.findAncestorAmongChildren(reindexRoot) + if err != nil { + return nil, err + } + } + + if current == reindexRoot { + // "Deallocate" an interval of slackReachabilityIntervalForReclaiming + // from this node. This is the interval that we'll use for the new + // node. + originalInterval := current.interval + current.interval = newReachabilityInterval( + current.interval.start+slackReachabilityIntervalForReclaiming, + current.interval.end, + ) + modifiedNodes, err := current.countSubtreesAndPropagateInterval() + if err != nil { + return nil, err + } + allModifiedTreeNodes.addAll(modifiedNodes) + current.interval = originalInterval + } + } + + // Go down the reachability tree towards the common ancestor. + // On every hop we reindex the reachability subtree before the + // current node with an interval that is smaller by + // slackReachabilityIntervalForReclaiming. This is to make room + // for the new node. + for current != commonAncestor { + current.interval = newReachabilityInterval( + current.interval.start+slackReachabilityIntervalForReclaiming, + current.interval.end, + ) + modifiedNodes, err := current.parent.reindexIntervalsBeforeNode(current) + if err != nil { + return nil, err + } + allModifiedTreeNodes.addAll(modifiedNodes) + current = current.parent + } + + return allModifiedTreeNodes, nil +} + +// reindexIntervalsBeforeNode applies a tight interval to the reachability +// subtree before `node`. Note that `node` itself is unaffected. +func (rtn *reachabilityTreeNode) reindexIntervalsBeforeNode(node *reachabilityTreeNode) ( + modifiedTreeNodes, error) { + + childrenBeforeNode, _, err := rtn.splitChildrenAroundChild(node) + if err != nil { + return nil, err + } + + childrenBeforeNodeSizes, childrenBeforeNodeSubtreeSizeMaps, childrenBeforeNodeSizesSum := + calcReachabilityTreeNodeSizes(childrenBeforeNode) + + // Apply a tight interval + newIntervalEnd := node.interval.start - 1 + newInterval := newReachabilityInterval(newIntervalEnd-childrenBeforeNodeSizesSum+1, newIntervalEnd) + intervals, err := newInterval.splitExact(childrenBeforeNodeSizes) + if err != nil { + return nil, err + } + return orderedTreeNodeSet(childrenBeforeNode).propagateIntervals(intervals, childrenBeforeNodeSubtreeSizeMaps) +} + +func (rtn *reachabilityTreeNode) reclaimIntervalAfterChosenChild( + commonAncestor *reachabilityTreeNode, commonAncestorChosenChild *reachabilityTreeNode, reindexRoot *reachabilityTreeNode) ( + modifiedTreeNodes, error) { + + allModifiedTreeNodes := newModifiedTreeNodes() + + current := commonAncestorChosenChild + if !commonAncestorChosenChild.hasSlackIntervalAfter() { + // The common ancestor ran out of slack after its chosen child. + // Climb up the reachability tree toward the reindex root until + // we find a node that has enough slack. + for !current.hasSlackIntervalAfter() && current != reindexRoot { + var err error + current, err = current.findAncestorAmongChildren(reindexRoot) + if err != nil { + return nil, err + } + } + + if current == reindexRoot { + // "Deallocate" an interval of slackReachabilityIntervalForReclaiming + // from this node. This is the interval that we'll use for the new + // node. + originalInterval := current.interval + current.interval = newReachabilityInterval( + current.interval.start, + current.interval.end-slackReachabilityIntervalForReclaiming, + ) + modifiedNodes, err := current.countSubtreesAndPropagateInterval() + if err != nil { + return nil, err + } + allModifiedTreeNodes.addAll(modifiedNodes) + current.interval = originalInterval + } + } + + // Go down the reachability tree towards the common ancestor. + // On every hop we reindex the reachability subtree after the + // current node with an interval that is smaller by + // slackReachabilityIntervalForReclaiming. This is to make room + // for the new node. + for current != commonAncestor { + current.interval = newReachabilityInterval( + current.interval.start, + current.interval.end-slackReachabilityIntervalForReclaiming, + ) + modifiedNodes, err := current.parent.reindexIntervalsAfterNode(current) + if err != nil { + return nil, err + } + allModifiedTreeNodes.addAll(modifiedNodes) + current = current.parent + } + + return allModifiedTreeNodes, nil +} + +// reindexIntervalsAfterNode applies a tight interval to the reachability +// subtree after `node`. Note that `node` itself is unaffected. +func (rtn *reachabilityTreeNode) reindexIntervalsAfterNode(node *reachabilityTreeNode) ( + modifiedTreeNodes, error) { + + _, childrenAfterNode, err := rtn.splitChildrenAroundChild(node) + if err != nil { + return nil, err + } + + childrenAfterNodeSizes, childrenAfterNodeSubtreeSizeMaps, childrenAfterNodeSizesSum := + calcReachabilityTreeNodeSizes(childrenAfterNode) + + // Apply a tight interval + newIntervalStart := node.interval.end + 1 + newInterval := newReachabilityInterval(newIntervalStart, newIntervalStart+childrenAfterNodeSizesSum-1) + intervals, err := newInterval.splitExact(childrenAfterNodeSizes) + if err != nil { + return nil, err + } + return orderedTreeNodeSet(childrenAfterNode).propagateIntervals(intervals, childrenAfterNodeSubtreeSizeMaps) +} + +func (tns orderedTreeNodeSet) propagateIntervals(intervals []*reachabilityInterval, + subtreeSizeMaps []map[*reachabilityTreeNode]uint64) (modifiedTreeNodes, error) { + + allModifiedTreeNodes := newModifiedTreeNodes() + for i, node := range tns { + node.interval = intervals[i] + subtreeSizeMap := subtreeSizeMaps[i] + modifiedNodes, err := node.propagateInterval(subtreeSizeMap) + if err != nil { + return nil, err + } + allModifiedTreeNodes.addAll(modifiedNodes) + } + return allModifiedTreeNodes, nil } // isAncestorOf checks if this node is a reachability tree ancestor -// of the other node. +// of the other node. Note that we use the graph theory convention +// here which defines that rtn is also an ancestor of itself. func (rtn *reachabilityTreeNode) isAncestorOf(other *reachabilityTreeNode) bool { - return rtn.interval.isAncestorOf(other.interval) + return rtn.interval.contains(other.interval) +} + +// findCommonAncestorWithReindexRoot finds the most recent reachability +// tree ancestor common to both rtn and the given reindex root. Note +// that we assume that almost always the chain between the reindex root +// and the common ancestor is longer than the chain between rtn and the +// common ancestor. +func (rtn *reachabilityTreeNode) findCommonAncestorWithReindexRoot(reindexRoot *reachabilityTreeNode) *reachabilityTreeNode { + currentThis := rtn + for { + if currentThis.isAncestorOf(reindexRoot) { + return currentThis + } + currentThis = currentThis.parent + } } // String returns a string representation of a reachability tree node @@ -408,174 +691,527 @@ func (rtn *reachabilityTreeNode) String() string { return strings.Join(lines, "\n") } -// futureCoveringBlockSet represents a collection of blocks in the future of +// orderedTreeNodeSet is an ordered set of reachabilityTreeNodes +// Note that this type does not validate order validity. It's the +// responsibility of the caller to construct instances of this +// type properly. +type orderedTreeNodeSet []*reachabilityTreeNode + +// futureCoveringTreeNodeSet represents a collection of blocks in the future of // a certain block. Once a block B is added to the DAG, every block A_i in -// B's selected parent anticone must register B in its futureCoveringBlockSet. This allows -// to relatively quickly (O(log(|futureCoveringBlockSet|))) query whether B +// B's selected parent anticone must register B in its futureCoveringTreeNodeSet. This allows +// to relatively quickly (O(log(|futureCoveringTreeNodeSet|))) query whether B // is a descendent (is in the "future") of any block that previously // registered it. // -// Note that futureCoveringBlockSet is meant to be queried only if B is not +// Note that futureCoveringTreeNodeSet is meant to be queried only if B is not // a reachability tree descendant of the block in question, as reachability // tree queries are always O(1). // -// See insertBlock, isInFuture, and dag.isAncestorOf for further details. -type futureCoveringBlockSet []*futureCoveringBlock +// See insertNode, hasAncestorOf, and reachabilityTree.isInPast for further +// details. +type futureCoveringTreeNodeSet orderedTreeNodeSet -// futureCoveringBlock represents a block in the future of some other block. -type futureCoveringBlock struct { - blockNode *blockNode - treeNode *reachabilityTreeNode -} - -// insertBlock inserts the given block into this futureCoveringBlockSet -// while keeping futureCoveringBlockSet ordered by interval. -// If a block B ∈ futureCoveringBlockSet exists such that its interval +// insertNode inserts the given block into this futureCoveringTreeNodeSet +// while keeping futureCoveringTreeNodeSet ordered by interval. +// If a block B ∈ futureCoveringTreeNodeSet exists such that its interval // contains block's interval, block need not be added. If block's // interval contains B's interval, it replaces it. // // Notes: // * Intervals never intersect unless one contains the other // (this follows from the tree structure and the indexing rule). -// * Since futureCoveringBlockSet is kept ordered, a binary search can be +// * Since futureCoveringTreeNodeSet is kept ordered, a binary search can be // used for insertion/queries. // * Although reindexing may change a block's interval, the // is-superset relation will by definition // be always preserved. -func (fb *futureCoveringBlockSet) insertBlock(block *futureCoveringBlock) { - blockInterval := block.treeNode.interval - i := fb.findIndex(block) - if i > 0 { - candidate := (*fb)[i-1] - candidateInterval := candidate.treeNode.interval - if candidateInterval.isAncestorOf(blockInterval) { - // candidate is an ancestor of block, no need to insert - return - } - if blockInterval.isAncestorOf(candidateInterval) { - // block is an ancestor of candidate, and can thus replace it - (*fb)[i-1] = block - return - } +func (fb *futureCoveringTreeNodeSet) insertNode(node *reachabilityTreeNode) { + ancestorIndex, ok := orderedTreeNodeSet(*fb).findAncestorIndexOfNode(node) + if !ok { + *fb = append([]*reachabilityTreeNode{node}, *fb...) + return } - // Insert block in the correct index to maintain futureCoveringBlockSet as + candidate := (*fb)[ancestorIndex] + if candidate.isAncestorOf(node) { + // candidate is an ancestor of node, no need to insert + return + } + if node.isAncestorOf(candidate) { + // node is an ancestor of candidate, and can thus replace it + (*fb)[ancestorIndex] = node + return + } + + // Insert node in the correct index to maintain futureCoveringTreeNodeSet as // a sorted-by-interval list. - // Note that i might be equal to len(futureCoveringBlockSet) - left := (*fb)[:i] - right := append([]*futureCoveringBlock{block}, (*fb)[i:]...) + // Note that ancestorIndex might be equal to len(futureCoveringTreeNodeSet) + left := (*fb)[:ancestorIndex+1] + right := append([]*reachabilityTreeNode{node}, (*fb)[ancestorIndex+1:]...) *fb = append(left, right...) } -// isInFuture resolves whether the given block is in the subtree of -// any block in this futureCoveringBlockSet. -// See insertBlock method for the complementary insertion behavior. +// hasAncestorOf resolves whether the given node is in the subtree of +// any node in this futureCoveringTreeNodeSet. +// See insertNode method for the complementary insertion behavior. // // Like the insert method, this method also relies on the fact that -// futureCoveringBlockSet is kept ordered by interval to efficiently perform a -// binary search over futureCoveringBlockSet and answer the query in -// O(log(|futureCoveringBlockSet|)). -func (fb futureCoveringBlockSet) isInFuture(block *futureCoveringBlock) bool { - i := fb.findIndex(block) - if i == 0 { - // No candidate to contain block +// futureCoveringTreeNodeSet is kept ordered by interval to efficiently perform a +// binary search over futureCoveringTreeNodeSet and answer the query in +// O(log(|futureCoveringTreeNodeSet|)). +func (fb futureCoveringTreeNodeSet) hasAncestorOf(node *reachabilityTreeNode) bool { + ancestorIndex, ok := orderedTreeNodeSet(fb).findAncestorIndexOfNode(node) + if !ok { + // No candidate to contain node return false } - candidate := fb[i-1] - return candidate.treeNode.isAncestorOf(block.treeNode) + candidate := fb[ancestorIndex] + return candidate.isAncestorOf(node) } -// findIndex finds the index of the block with the maximum start that is below -// the given block. -func (fb futureCoveringBlockSet) findIndex(block *futureCoveringBlock) int { - blockInterval := block.treeNode.interval +// findAncestorOfNode finds the reachability tree ancestor of `node` +// among the nodes in `tns`. +func (tns orderedTreeNodeSet) findAncestorOfNode(node *reachabilityTreeNode) (*reachabilityTreeNode, bool) { + ancestorIndex, ok := tns.findAncestorIndexOfNode(node) + if !ok { + return nil, false + } + return tns[ancestorIndex], true +} + +// findAncestorIndexOfNode finds the index of the reachability tree +// ancestor of `node` among the nodes in `tns`. It does so by finding +// the index of the block with the maximum start that is below the +// given block. +func (tns orderedTreeNodeSet) findAncestorIndexOfNode(node *reachabilityTreeNode) (int, bool) { + blockInterval := node.interval end := blockInterval.end low := 0 - high := len(fb) + high := len(tns) for low < high { middle := (low + high) / 2 - middleInterval := fb[middle].treeNode.interval + middleInterval := tns[middle].interval if end < middleInterval.start { high = middle } else { low = middle + 1 } } - return low + + if low == 0 { + return 0, false + } + return low - 1, true } -// String returns a string representation of the intervals in this futureCoveringBlockSet. -func (fb futureCoveringBlockSet) String() string { +// String returns a string representation of the intervals in this futureCoveringTreeNodeSet. +func (fb futureCoveringTreeNodeSet) String() string { intervalsString := "" - for _, block := range fb { - intervalsString += block.treeNode.interval.String() + for _, node := range fb { + intervalsString += node.interval.String() } return intervalsString } -func (dag *BlockDAG) updateReachability(node *blockNode, selectedParentAnticone []*blockNode) error { +func (rt *reachabilityTree) addBlock(node *blockNode, selectedParentAnticone []*blockNode) error { // Allocate a new reachability tree node newTreeNode := newReachabilityTreeNode(node) // If this is the genesis node, simply initialize it and return if node.isGenesis() { - dag.reachabilityStore.setTreeNode(newTreeNode) + rt.store.setTreeNode(newTreeNode) + rt.reindexRoot = newTreeNode return nil } // Insert the node into the selected parent's reachability tree - selectedParentTreeNode, err := dag.reachabilityStore.treeNodeByBlockNode(node.selectedParent) + selectedParentTreeNode, err := rt.store.treeNodeByBlockNode(node.selectedParent) if err != nil { return err } - modifiedTreeNodes, err := selectedParentTreeNode.addChild(newTreeNode) + modifiedNodes, err := selectedParentTreeNode.addChild(newTreeNode, rt.reindexRoot) if err != nil { return err } - for _, modifiedTreeNode := range modifiedTreeNodes { - dag.reachabilityStore.setTreeNode(modifiedTreeNode) + for modifiedNode := range modifiedNodes { + rt.store.setTreeNode(modifiedNode) } // Add the block to the futureCoveringSets of all the blocks // in the selected parent's anticone for _, current := range selectedParentAnticone { - currentFutureCoveringSet, err := dag.reachabilityStore.futureCoveringSetByBlockNode(current) + currentFutureCoveringSet, err := rt.store.futureCoveringSetByBlockNode(current) if err != nil { return err } - currentFutureCoveringSet.insertBlock(&futureCoveringBlock{blockNode: node, treeNode: newTreeNode}) - err = dag.reachabilityStore.setFutureCoveringSet(current, currentFutureCoveringSet) + currentFutureCoveringSet.insertNode(newTreeNode) + err = rt.store.setFutureCoveringSet(current, currentFutureCoveringSet) if err != nil { return err } } + + // Update the reindex root. + // Note that we check for blue score here in order to find out + // whether the new node is going to be the virtual's selected + // parent. We don't check node == virtual.selectedParent because + // at this stage the virtual had not yet been updated. + if node.blueScore > rt.dag.SelectedTipBlueScore() { + updateStartTime := time.Now() + modifiedNodes, err := rt.updateReindexRoot(newTreeNode) + if err != nil { + return err + } + if len(modifiedNodes) > 0 { + updateTimeElapsed := time.Since(updateStartTime) + log.Debugf("Reachability reindex root updated to %s. "+ + "Modified %d tree nodes and took %dms.", + rt.reindexRoot.blockNode.hash, + len(modifiedNodes), updateTimeElapsed.Milliseconds()) + for modifiedNode := range modifiedNodes { + rt.store.setTreeNode(modifiedNode) + } + } + } + return nil } -// isAncestorOf returns true if this node is in the past of the other node -// in the DAG. The complexity of this method is O(log(|this.futureCoveringBlockSet|)) -func (dag *BlockDAG) isAncestorOf(this *blockNode, other *blockNode) (bool, error) { - // First, check if this node is a reachability tree ancestor of the +type reachabilityTree struct { + dag *BlockDAG + store *reachabilityStore + reindexRoot *reachabilityTreeNode +} + +func newReachabilityTree(dag *BlockDAG) *reachabilityTree { + store := newReachabilityStore(dag) + return &reachabilityTree{ + dag: dag, + store: store, + reindexRoot: nil, + } +} + +func (rt *reachabilityTree) init(dbContext dbaccess.Context) error { + // Init the store + err := rt.store.init(dbContext) + if err != nil { + return err + } + + // Fetch the reindex root hash. If missing, use the genesis hash + reindexRootHash, err := dbaccess.FetchReachabilityReindexRoot(dbContext) + if err != nil { + if !dbaccess.IsNotFoundError(err) { + return err + } + reindexRootHash = rt.dag.dagParams.GenesisHash + } + + // Init the reindex root + reachabilityReindexRootNode, ok := rt.dag.index.LookupNode(reindexRootHash) + if !ok { + return errors.Errorf("reachability reindex root block %s "+ + "does not exist in the DAG", reindexRootHash) + } + rt.reindexRoot, err = rt.store.treeNodeByBlockNode(reachabilityReindexRootNode) + if err != nil { + return errors.Wrapf(err, "cannot set reachability reindex root") + } + + return nil +} + +func (rt *reachabilityTree) storeState(dbTx *dbaccess.TxContext) error { + // Flush the store + err := rt.dag.reachabilityTree.store.flushToDB(dbTx) + if err != nil { + return err + } + + // Store the reindex root + err = dbaccess.StoreReachabilityReindexRoot(dbTx, rt.reindexRoot.blockNode.hash) + if err != nil { + return err + } + + return nil +} + +func (rt *reachabilityTree) updateReindexRoot(newTreeNode *reachabilityTreeNode) (modifiedTreeNodes, error) { + allModifiedTreeNodes := newModifiedTreeNodes() + + nextReindexRoot := rt.reindexRoot + for { + candidateReindexRoot, modifiedNodes, found, err := rt.maybeMoveReindexRoot(nextReindexRoot, newTreeNode) + if err != nil { + return nil, err + } + if !found { + break + } + allModifiedTreeNodes.addAll(modifiedNodes) + nextReindexRoot = candidateReindexRoot + } + + rt.reindexRoot = nextReindexRoot + return allModifiedTreeNodes, nil +} + +func (rt *reachabilityTree) maybeMoveReindexRoot( + reindexRoot *reachabilityTreeNode, newTreeNode *reachabilityTreeNode) ( + newReindexRoot *reachabilityTreeNode, modifiedNodes modifiedTreeNodes, found bool, err error) { + + if !reindexRoot.isAncestorOf(newTreeNode) { + commonAncestor := newTreeNode.findCommonAncestorWithReindexRoot(reindexRoot) + return commonAncestor, nil, true, nil + } + + reindexRootChosenChild, err := reindexRoot.findAncestorAmongChildren(newTreeNode) + if err != nil { + return nil, nil, false, err + } + if newTreeNode.blockNode.blueScore-reindexRootChosenChild.blockNode.blueScore < reachabilityReindexWindow { + return nil, nil, false, nil + } + modifiedNodes, err = rt.concentrateIntervalAroundReindexRootChosenChild(reindexRoot, reindexRootChosenChild) + if err != nil { + return nil, nil, false, err + } + + return reindexRootChosenChild, modifiedNodes, true, nil +} + +// findAncestorAmongChildren finds the reachability tree child +// of rtn that is the ancestor of node. +func (rtn *reachabilityTreeNode) findAncestorAmongChildren(node *reachabilityTreeNode) (*reachabilityTreeNode, error) { + ancestor, ok := orderedTreeNodeSet(rtn.children).findAncestorOfNode(node) + if !ok { + return nil, errors.Errorf("rtn is not an ancestor of node") + } + + return ancestor, nil +} + +func (rt *reachabilityTree) concentrateIntervalAroundReindexRootChosenChild( + reindexRoot *reachabilityTreeNode, reindexRootChosenChild *reachabilityTreeNode) ( + modifiedTreeNodes, error) { + + allModifiedTreeNodes := newModifiedTreeNodes() + + reindexRootChildNodesBeforeChosen, reindexRootChildNodesAfterChosen, err := + reindexRoot.splitChildrenAroundChild(reindexRootChosenChild) + if err != nil { + return nil, err + } + + reindexRootChildNodesBeforeChosenSizesSum, modifiedNodesBeforeChosen, err := + rt.tightenIntervalsBeforeReindexRootChosenChild(reindexRoot, reindexRootChildNodesBeforeChosen) + if err != nil { + return nil, err + } + allModifiedTreeNodes.addAll(modifiedNodesBeforeChosen) + + reindexRootChildNodesAfterChosenSizesSum, modifiedNodesAfterChosen, err := + rt.tightenIntervalsAfterReindexRootChosenChild(reindexRoot, reindexRootChildNodesAfterChosen) + if err != nil { + return nil, err + } + allModifiedTreeNodes.addAll(modifiedNodesAfterChosen) + + modifiedNodesForReindexRootExpansion, err := rt.expandIntervalInReindexRootChosenChild( + reindexRoot, reindexRootChosenChild, reindexRootChildNodesBeforeChosenSizesSum, reindexRootChildNodesAfterChosenSizesSum) + if err != nil { + return nil, err + } + allModifiedTreeNodes.addAll(modifiedNodesForReindexRootExpansion) + + return allModifiedTreeNodes, nil +} + +// splitChildrenAroundChild splits `rtn` into two slices: the nodes that are before +// `child` and the nodes that are after. +func (rtn *reachabilityTreeNode) splitChildrenAroundChild(child *reachabilityTreeNode) ( + nodesBeforeChild []*reachabilityTreeNode, nodesAfterChild []*reachabilityTreeNode, err error) { + + for i, candidateChild := range rtn.children { + if candidateChild == child { + return rtn.children[:i], rtn.children[i+1:], nil + } + } + return nil, nil, errors.Errorf("child not a child of rtn") +} + +func (rt *reachabilityTree) tightenIntervalsBeforeReindexRootChosenChild( + reindexRoot *reachabilityTreeNode, reindexRootChildNodesBeforeChosen []*reachabilityTreeNode) ( + reindexRootChildNodesBeforeChosenSizesSum uint64, modifiedNodes modifiedTreeNodes, err error) { + + reindexRootChildNodesBeforeChosenSizes, reindexRootChildNodesBeforeChosenSubtreeSizeMaps, reindexRootChildNodesBeforeChosenSizesSum := + calcReachabilityTreeNodeSizes(reindexRootChildNodesBeforeChosen) + + intervalBeforeReindexRootStart := newReachabilityInterval( + reindexRoot.interval.start+reachabilityReindexSlack, + reindexRoot.interval.start+reachabilityReindexSlack+reindexRootChildNodesBeforeChosenSizesSum-1, + ) + + modifiedNodes, err = rt.propagateChildIntervals(intervalBeforeReindexRootStart, reindexRootChildNodesBeforeChosen, + reindexRootChildNodesBeforeChosenSizes, reindexRootChildNodesBeforeChosenSubtreeSizeMaps) + if err != nil { + return 0, nil, err + } + return reindexRootChildNodesBeforeChosenSizesSum, modifiedNodes, nil +} + +func (rt *reachabilityTree) tightenIntervalsAfterReindexRootChosenChild( + reindexRoot *reachabilityTreeNode, reindexRootChildNodesAfterChosen []*reachabilityTreeNode) ( + reindexRootChildNodesAfterChosenSizesSum uint64, modifiedNodes modifiedTreeNodes, err error) { + + reindexRootChildNodesAfterChosenSizes, reindexRootChildNodesAfterChosenSubtreeSizeMaps, reindexRootChildNodesAfterChosenSizesSum := + calcReachabilityTreeNodeSizes(reindexRootChildNodesAfterChosen) + + intervalAfterReindexRootEnd := newReachabilityInterval( + reindexRoot.interval.end-reachabilityReindexSlack-reindexRootChildNodesAfterChosenSizesSum, + reindexRoot.interval.end-reachabilityReindexSlack-1, + ) + + modifiedNodes, err = rt.propagateChildIntervals(intervalAfterReindexRootEnd, reindexRootChildNodesAfterChosen, + reindexRootChildNodesAfterChosenSizes, reindexRootChildNodesAfterChosenSubtreeSizeMaps) + if err != nil { + return 0, nil, err + } + return reindexRootChildNodesAfterChosenSizesSum, modifiedNodes, nil +} + +func (rt *reachabilityTree) expandIntervalInReindexRootChosenChild(reindexRoot *reachabilityTreeNode, + reindexRootChosenChild *reachabilityTreeNode, reindexRootChildNodesBeforeChosenSizesSum uint64, + reindexRootChildNodesAfterChosenSizesSum uint64) (modifiedTreeNodes, error) { + + allModifiedTreeNodes := newModifiedTreeNodes() + + newReindexRootChildInterval := newReachabilityInterval( + reindexRoot.interval.start+reindexRootChildNodesBeforeChosenSizesSum+reachabilityReindexSlack, + reindexRoot.interval.end-reindexRootChildNodesAfterChosenSizesSum-reachabilityReindexSlack-1, + ) + + if !newReindexRootChildInterval.contains(reindexRootChosenChild.interval) { + // New interval doesn't contain the previous one, propagation is required + + // We assign slack on both sides as an optimization. Were we to + // assign a tight interval, the next time the reindex root moves we + // would need to propagate intervals again. That is to say, When we + // DO allocate slack, next time + // expandIntervalInReindexRootChosenChild is called (next time the + // reindex root moves), newReindexRootChildInterval is likely to + // contain reindexRootChosenChild.interval. + reindexRootChosenChild.interval = newReachabilityInterval( + newReindexRootChildInterval.start+reachabilityReindexSlack, + newReindexRootChildInterval.end-reachabilityReindexSlack, + ) + modifiedNodes, err := reindexRootChosenChild.countSubtreesAndPropagateInterval() + if err != nil { + return nil, err + } + allModifiedTreeNodes.addAll(modifiedNodes) + } + + reindexRootChosenChild.interval = newReindexRootChildInterval + allModifiedTreeNodes[reindexRootChosenChild] = struct{}{} + return allModifiedTreeNodes, nil +} + +func (rtn *reachabilityTreeNode) countSubtreesAndPropagateInterval() (modifiedTreeNodes, error) { + subtreeSizeMap := make(map[*reachabilityTreeNode]uint64) + rtn.countSubtrees(subtreeSizeMap) + return rtn.propagateInterval(subtreeSizeMap) +} + +func calcReachabilityTreeNodeSizes(treeNodes []*reachabilityTreeNode) ( + sizes []uint64, subtreeSizeMaps []map[*reachabilityTreeNode]uint64, sum uint64) { + + sizes = make([]uint64, len(treeNodes)) + subtreeSizeMaps = make([]map[*reachabilityTreeNode]uint64, len(treeNodes)) + sum = 0 + for i, node := range treeNodes { + subtreeSizeMap := make(map[*reachabilityTreeNode]uint64) + node.countSubtrees(subtreeSizeMap) + subtreeSize := subtreeSizeMap[node] + sizes[i] = subtreeSize + subtreeSizeMaps[i] = subtreeSizeMap + sum += subtreeSize + } + return sizes, subtreeSizeMaps, sum +} + +func (rt *reachabilityTree) propagateChildIntervals(interval *reachabilityInterval, + childNodes []*reachabilityTreeNode, sizes []uint64, subtreeSizeMaps []map[*reachabilityTreeNode]uint64) ( + modifiedTreeNodes, error) { + + allModifiedTreeNodes := newModifiedTreeNodes() + + childIntervalSizes, err := interval.splitExact(sizes) + if err != nil { + return nil, err + } + + for i, child := range childNodes { + childInterval := childIntervalSizes[i] + child.interval = childInterval + + childSubtreeSizeMap := subtreeSizeMaps[i] + modifiedNodes, err := child.propagateInterval(childSubtreeSizeMap) + if err != nil { + return nil, err + } + allModifiedTreeNodes.addAll(modifiedNodes) + } + + return allModifiedTreeNodes, nil +} + +// isInPast returns true if `this` is in the past (exclusive) of `other` +// in the DAG. +// The complexity of this method is O(log(|this.futureCoveringTreeNodeSet|)) +func (rt *reachabilityTree) isInPast(this *blockNode, other *blockNode) (bool, error) { + // By definition, a node is not in the past of itself. + if this == other { + return false, nil + } + + // Check if this node is a reachability tree ancestor of the // other node - thisTreeNode, err := dag.reachabilityStore.treeNodeByBlockNode(this) + isReachabilityTreeAncestor, err := rt.isReachabilityTreeAncestorOf(this, other) if err != nil { return false, err } - otherTreeNode, err := dag.reachabilityStore.treeNodeByBlockNode(other) - if err != nil { - return false, err - } - if thisTreeNode.isAncestorOf(otherTreeNode) { + if isReachabilityTreeAncestor { return true, nil } // Otherwise, use previously registered future blocks to complete the // reachability test - thisFutureCoveringSet, err := dag.reachabilityStore.futureCoveringSetByBlockNode(this) + thisFutureCoveringSet, err := rt.store.futureCoveringSetByBlockNode(this) if err != nil { return false, err } - return thisFutureCoveringSet.isInFuture(&futureCoveringBlock{blockNode: other, treeNode: otherTreeNode}), nil + otherTreeNode, err := rt.store.treeNodeByBlockNode(other) + if err != nil { + return false, err + } + return thisFutureCoveringSet.hasAncestorOf(otherTreeNode), nil +} + +// isReachabilityTreeAncestorOf returns whether `this` is in the selected parent chain of `other`. +func (rt *reachabilityTree) isReachabilityTreeAncestorOf(this *blockNode, other *blockNode) (bool, error) { + thisTreeNode, err := rt.store.treeNodeByBlockNode(this) + if err != nil { + return false, err + } + otherTreeNode, err := rt.store.treeNodeByBlockNode(other) + if err != nil { + return false, err + } + return thisTreeNode.isAncestorOf(otherTreeNode), nil } diff --git a/blockdag/reachability_test.go b/blockdag/reachability_test.go index e7ab60219..47ad98bcc 100644 --- a/blockdag/reachability_test.go +++ b/blockdag/reachability_test.go @@ -1,6 +1,8 @@ package blockdag import ( + "github.com/kaspanet/kaspad/dagconfig" + "github.com/kaspanet/kaspad/util/daghash" "reflect" "strings" "testing" @@ -11,19 +13,19 @@ func TestAddChild(t *testing.T) { // root -> a -> b -> c... // Create the root node of a new reachability tree root := newReachabilityTreeNode(&blockNode{}) - root.setInterval(newReachabilityInterval(1, 100)) + root.interval = newReachabilityInterval(1, 100) // Add a chain of child nodes just before a reindex occurs (2^6=64 < 100) currentTip := root for i := 0; i < 6; i++ { node := newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err := currentTip.addChild(node) + modifiedNodes, err := currentTip.addChild(node, root) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } // Expect only the node and its parent to be affected - expectedModifiedNodes := []*reachabilityTreeNode{currentTip, node} + expectedModifiedNodes := newModifiedTreeNodes(currentTip, node) if !reflect.DeepEqual(modifiedNodes, expectedModifiedNodes) { t.Fatalf("TestAddChild: unexpected modifiedNodes. "+ "want: %s, got: %s", expectedModifiedNodes, modifiedNodes) @@ -34,7 +36,7 @@ func TestAddChild(t *testing.T) { // Add another node to the tip of the chain to trigger a reindex (100 < 2^7=128) lastChild := newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err := currentTip.addChild(lastChild) + modifiedNodes, err := currentTip.addChild(lastChild, root) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } @@ -45,14 +47,18 @@ func TestAddChild(t *testing.T) { t.Fatalf("TestAddChild: unexpected amount of modifiedNodes.") } - // Expect the tip to have an interval of 1 and remaining interval of 0 + // Expect the tip to have an interval of 1 and remaining interval of 0 both before and after tipInterval := lastChild.interval.size() if tipInterval != 1 { t.Fatalf("TestAddChild: unexpected tip interval size: want: 1, got: %d", tipInterval) } - tipRemainingInterval := lastChild.remainingInterval.size() - if tipRemainingInterval != 0 { - t.Fatalf("TestAddChild: unexpected tip interval size: want: 0, got: %d", tipRemainingInterval) + tipRemainingIntervalBefore := lastChild.remainingIntervalBefore().size() + if tipRemainingIntervalBefore != 0 { + t.Fatalf("TestAddChild: unexpected tip interval before size: want: 0, got: %d", tipRemainingIntervalBefore) + } + tipRemainingIntervalAfter := lastChild.remainingIntervalAfter().size() + if tipRemainingIntervalAfter != 0 { + t.Fatalf("TestAddChild: unexpected tip interval after size: want: 0, got: %d", tipRemainingIntervalAfter) } // Expect all nodes to be descendant nodes of root @@ -68,19 +74,19 @@ func TestAddChild(t *testing.T) { // root -> a, b, c... // Create the root node of a new reachability tree root = newReachabilityTreeNode(&blockNode{}) - root.setInterval(newReachabilityInterval(1, 100)) + root.interval = newReachabilityInterval(1, 100) // Add child nodes to root just before a reindex occurs (2^6=64 < 100) childNodes := make([]*reachabilityTreeNode, 6) for i := 0; i < len(childNodes); i++ { childNodes[i] = newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err := root.addChild(childNodes[i]) + modifiedNodes, err := root.addChild(childNodes[i], root) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } // Expect only the node and the root to be affected - expectedModifiedNodes := []*reachabilityTreeNode{root, childNodes[i]} + expectedModifiedNodes := newModifiedTreeNodes(root, childNodes[i]) if !reflect.DeepEqual(modifiedNodes, expectedModifiedNodes) { t.Fatalf("TestAddChild: unexpected modifiedNodes. "+ "want: %s, got: %s", expectedModifiedNodes, modifiedNodes) @@ -89,7 +95,7 @@ func TestAddChild(t *testing.T) { // Add another node to the root to trigger a reindex (100 < 2^7=128) lastChild = newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err = root.addChild(lastChild) + modifiedNodes, err = root.addChild(lastChild, root) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } @@ -100,14 +106,18 @@ func TestAddChild(t *testing.T) { t.Fatalf("TestAddChild: unexpected amount of modifiedNodes.") } - // Expect the last-added child to have an interval of 1 and remaining interval of 0 + // Expect the last-added child to have an interval of 1 and remaining interval of 0 both before and after lastChildInterval := lastChild.interval.size() if lastChildInterval != 1 { t.Fatalf("TestAddChild: unexpected lastChild interval size: want: 1, got: %d", lastChildInterval) } - lastChildRemainingInterval := lastChild.remainingInterval.size() - if lastChildRemainingInterval != 0 { - t.Fatalf("TestAddChild: unexpected lastChild interval size: want: 0, got: %d", lastChildRemainingInterval) + lastChildRemainingIntervalBefore := lastChild.remainingIntervalBefore().size() + if lastChildRemainingIntervalBefore != 0 { + t.Fatalf("TestAddChild: unexpected lastChild interval before size: want: 0, got: %d", lastChildRemainingIntervalBefore) + } + lastChildRemainingIntervalAfter := lastChild.remainingIntervalAfter().size() + if lastChildRemainingIntervalAfter != 0 { + t.Fatalf("TestAddChild: unexpected lastChild interval after size: want: 0, got: %d", lastChildRemainingIntervalAfter) } // Expect all nodes to be descendant nodes of root @@ -125,7 +135,7 @@ func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) { descendants := make([]*reachabilityTreeNode, numberOfDescendants) for i := 0; i < numberOfDescendants; i++ { node := newReachabilityTreeNode(&blockNode{}) - _, err := currentTip.addChild(node) + _, err := currentTip.addChild(node, root) if err != nil { t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: addChild failed: %s", err) } @@ -140,65 +150,65 @@ func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) { } } - if root.isAncestorOf(root) { - t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: root is not expected to be a descendant of root") + if !root.isAncestorOf(root) { + t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: root is expected to be an ancestor of root") } } -func TestIntervalIsAncestorOf(t *testing.T) { +func TestIntervalContains(t *testing.T) { tests := []struct { - name string - this, other *reachabilityInterval - isThisAncestorOfOther bool + name string + this, other *reachabilityInterval + thisContainsOther bool }{ { - name: "this == other", - this: newReachabilityInterval(10, 100), - other: newReachabilityInterval(10, 100), - isThisAncestorOfOther: false, + name: "this == other", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(10, 100), + thisContainsOther: true, }, { - name: "this.start == other.start && this.end < other.end", - this: newReachabilityInterval(10, 90), - other: newReachabilityInterval(10, 100), - isThisAncestorOfOther: false, + name: "this.start == other.start && this.end < other.end", + this: newReachabilityInterval(10, 90), + other: newReachabilityInterval(10, 100), + thisContainsOther: false, }, { - name: "this.start == other.start && this.end > other.end", - this: newReachabilityInterval(10, 100), - other: newReachabilityInterval(10, 90), - isThisAncestorOfOther: true, + name: "this.start == other.start && this.end > other.end", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(10, 90), + thisContainsOther: true, }, { - name: "this.start > other.start && this.end == other.end", - this: newReachabilityInterval(20, 100), - other: newReachabilityInterval(10, 100), - isThisAncestorOfOther: false, + name: "this.start > other.start && this.end == other.end", + this: newReachabilityInterval(20, 100), + other: newReachabilityInterval(10, 100), + thisContainsOther: false, }, { - name: "this.start < other.start && this.end == other.end", - this: newReachabilityInterval(10, 100), - other: newReachabilityInterval(20, 100), - isThisAncestorOfOther: true, + name: "this.start < other.start && this.end == other.end", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(20, 100), + thisContainsOther: true, }, { - name: "this.start > other.start && this.end < other.end", - this: newReachabilityInterval(20, 90), - other: newReachabilityInterval(10, 100), - isThisAncestorOfOther: false, + name: "this.start > other.start && this.end < other.end", + this: newReachabilityInterval(20, 90), + other: newReachabilityInterval(10, 100), + thisContainsOther: false, }, { - name: "this.start < other.start && this.end > other.end", - this: newReachabilityInterval(10, 100), - other: newReachabilityInterval(20, 90), - isThisAncestorOfOther: true, + name: "this.start < other.start && this.end > other.end", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(20, 90), + thisContainsOther: true, }, } for _, test := range tests { - if isAncestorOf := test.this.isAncestorOf(test.other); isAncestorOf != test.isThisAncestorOfOther { - t.Errorf("test.this.isAncestorOf(test.other) is expected to be %t but got %t", - test.isThisAncestorOfOther, isAncestorOf) + if thisContainsOther := test.this.contains(test.other); thisContainsOther != test.thisContainsOther { + t.Errorf("test.this.contains(test.other) is expected to be %t but got %t", + test.thisContainsOther, thisContainsOther) } } } @@ -431,140 +441,140 @@ func TestSplitWithExponentialBias(t *testing.T) { } } -func TestIsInFuture(t *testing.T) { - blocks := futureCoveringBlockSet{ - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(2, 3)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}}, +func TestHasAncestorOf(t *testing.T) { + treeNodes := futureCoveringTreeNodeSet{ + &reachabilityTreeNode{interval: newReachabilityInterval(2, 3)}, + &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}, + &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}, + &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}, } tests := []struct { - block *futureCoveringBlock + treeNode *reachabilityTreeNode expectedResult bool }{ { - block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 1)}}, + treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 1)}, expectedResult: false, }, { - block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)}}, + treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)}, expectedResult: true, }, { - block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 76)}}, + treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 76)}, expectedResult: true, }, { - block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(78, 100)}}, + treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(78, 100)}, expectedResult: false, }, { - block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1980, 2000)}}, + treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1980, 2000)}, expectedResult: false, }, { - block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1920)}}, + treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1920)}, expectedResult: true, }, } for i, test := range tests { - result := blocks.isInFuture(test.block) + result := treeNodes.hasAncestorOf(test.treeNode) if result != test.expectedResult { - t.Errorf("TestIsInFuture: unexpected result in test #%d. Want: %t, got: %t", + t.Errorf("TestHasAncestorOf: unexpected result in test #%d. Want: %t, got: %t", i, test.expectedResult, result) } } } -func TestInsertBlock(t *testing.T) { - blocks := futureCoveringBlockSet{ - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}}, +func TestInsertNode(t *testing.T) { + treeNodes := futureCoveringTreeNodeSet{ + &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}, + &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}, + &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}, + &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}, } tests := []struct { - toInsert []*futureCoveringBlock - expectedResult futureCoveringBlockSet + toInsert []*reachabilityTreeNode + expectedResult futureCoveringTreeNodeSet }{ { - toInsert: []*futureCoveringBlock{ - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)}}, + toInsert: []*reachabilityTreeNode{ + {interval: newReachabilityInterval(5, 7)}, }, - expectedResult: futureCoveringBlockSet{ - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}}, + expectedResult: futureCoveringTreeNodeSet{ + &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}, + &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}, + &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}, + &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}, }, }, { - toInsert: []*futureCoveringBlock{ - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(65, 78)}}, + toInsert: []*reachabilityTreeNode{ + {interval: newReachabilityInterval(65, 78)}, }, - expectedResult: futureCoveringBlockSet{ - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(65, 78)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}}, + expectedResult: futureCoveringTreeNodeSet{ + &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}, + &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}, + &reachabilityTreeNode{interval: newReachabilityInterval(65, 78)}, + &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}, }, }, { - toInsert: []*futureCoveringBlock{ - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}}, + toInsert: []*reachabilityTreeNode{ + {interval: newReachabilityInterval(88, 97)}, }, - expectedResult: futureCoveringBlockSet{ - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}}, + expectedResult: futureCoveringTreeNodeSet{ + &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}, + &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}, + &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}, + &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}, + &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}, }, }, { - toInsert: []*futureCoveringBlock{ - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)}}, + toInsert: []*reachabilityTreeNode{ + {interval: newReachabilityInterval(88, 97)}, + {interval: newReachabilityInterval(3000, 3010)}, }, - expectedResult: futureCoveringBlockSet{ - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}}, - {treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)}}, + expectedResult: futureCoveringTreeNodeSet{ + &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}, + &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}, + &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}, + &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}, + &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}, + &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}, + &reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)}, }, }, } for i, test := range tests { - // Create a clone of blocks so that we have a clean start for every test - blocksClone := make(futureCoveringBlockSet, len(blocks)) - for i, block := range blocks { - blocksClone[i] = block + // Create a clone of treeNodes so that we have a clean start for every test + treeNodesClone := make(futureCoveringTreeNodeSet, len(treeNodes)) + for i, treeNode := range treeNodes { + treeNodesClone[i] = treeNode } - for _, block := range test.toInsert { - blocksClone.insertBlock(block) + for _, treeNode := range test.toInsert { + treeNodesClone.insertNode(treeNode) } - if !reflect.DeepEqual(blocksClone, test.expectedResult) { - t.Errorf("TestInsertBlock: unexpected result in test #%d. Want: %s, got: %s", - i, test.expectedResult, blocksClone) + if !reflect.DeepEqual(treeNodesClone, test.expectedResult) { + t.Errorf("TestInsertNode: unexpected result in test #%d. Want: %s, got: %s", + i, test.expectedResult, treeNodesClone) } } } @@ -665,14 +675,14 @@ func TestSplitWithExponentialBiasErrors(t *testing.T) { func TestReindexIntervalErrors(t *testing.T) { // Create a treeNode and give it size = 100 treeNode := newReachabilityTreeNode(&blockNode{}) - treeNode.setInterval(newReachabilityInterval(0, 99)) + treeNode.interval = newReachabilityInterval(0, 99) // Add a chain of 100 child treeNodes to treeNode var err error currentTreeNode := treeNode for i := 0; i < 100; i++ { childTreeNode := newReachabilityTreeNode(&blockNode{}) - _, err = currentTreeNode.addChild(childTreeNode) + _, err = currentTreeNode.addChild(childTreeNode, treeNode) if err != nil { break } @@ -704,12 +714,12 @@ func BenchmarkReindexInterval(b *testing.B) { // its first child gets half of the interval, so a reindex // from the root should happen after adding subTreeSize // nodes. - root.setInterval(newReachabilityInterval(0, subTreeSize*2)) + root.interval = newReachabilityInterval(0, subTreeSize*2) currentTreeNode := root for i := 0; i < subTreeSize; i++ { childTreeNode := newReachabilityTreeNode(&blockNode{}) - _, err := currentTreeNode.addChild(childTreeNode) + _, err := currentTreeNode.addChild(childTreeNode, root) if err != nil { b.Fatalf("addChild: %s", err) } @@ -717,50 +727,47 @@ func BenchmarkReindexInterval(b *testing.B) { currentTreeNode = childTreeNode } - remainingIntervalBefore := *root.remainingInterval + originalRemainingInterval := *root.remainingIntervalAfter() // After we added subTreeSize nodes, adding the next // node should lead to a reindex from root. fullReindexTriggeringNode := newReachabilityTreeNode(&blockNode{}) b.StartTimer() - _, err := currentTreeNode.addChild(fullReindexTriggeringNode) + _, err := currentTreeNode.addChild(fullReindexTriggeringNode, root) b.StopTimer() if err != nil { b.Fatalf("addChild: %s", err) } - if *root.remainingInterval == remainingIntervalBefore { + if *root.remainingIntervalAfter() == originalRemainingInterval { b.Fatal("Expected a reindex from root, but it didn't happen") } } } -func TestFutureCoveringBlockSetString(t *testing.T) { +func TestFutureCoveringTreeNodeSetString(t *testing.T) { treeNodeA := newReachabilityTreeNode(&blockNode{}) - treeNodeA.setInterval(newReachabilityInterval(123, 456)) + treeNodeA.interval = newReachabilityInterval(123, 456) treeNodeB := newReachabilityTreeNode(&blockNode{}) - treeNodeB.setInterval(newReachabilityInterval(457, 789)) - futureCoveringSet := futureCoveringBlockSet{ - &futureCoveringBlock{treeNode: treeNodeA}, - &futureCoveringBlock{treeNode: treeNodeB}, - } + treeNodeB.interval = newReachabilityInterval(457, 789) + futureCoveringSet := futureCoveringTreeNodeSet{treeNodeA, treeNodeB} str := futureCoveringSet.String() expectedStr := "[123,456][457,789]" if str != expectedStr { - t.Fatalf("TestFutureCoveringBlockSetString: unexpected "+ + t.Fatalf("TestFutureCoveringTreeNodeSetString: unexpected "+ "string. Want: %s, got: %s", expectedStr, str) } } func TestReachabilityTreeNodeString(t *testing.T) { treeNodeA := newReachabilityTreeNode(&blockNode{}) - treeNodeA.setInterval(newReachabilityInterval(100, 199)) + treeNodeA.interval = newReachabilityInterval(100, 199) treeNodeB1 := newReachabilityTreeNode(&blockNode{}) - treeNodeB1.setInterval(newReachabilityInterval(100, 150)) + treeNodeB1.interval = newReachabilityInterval(100, 150) treeNodeB2 := newReachabilityTreeNode(&blockNode{}) - treeNodeB2.setInterval(newReachabilityInterval(150, 199)) + treeNodeB2.interval = newReachabilityInterval(150, 199) treeNodeC := newReachabilityTreeNode(&blockNode{}) - treeNodeC.setInterval(newReachabilityInterval(100, 149)) + treeNodeC.interval = newReachabilityInterval(100, 149) treeNodeA.children = []*reachabilityTreeNode{treeNodeB1, treeNodeB2} treeNodeB2.children = []*reachabilityTreeNode{treeNodeC} @@ -771,3 +778,237 @@ func TestReachabilityTreeNodeString(t *testing.T) { "string. Want: %s, got: %s", expectedStr, str) } } + +func TestIsInPast(t *testing.T) { + // Create a new database and DAG instance to run tests against. + dag, teardownFunc, err := DAGSetup("TestIsInPast", true, Config{ + DAGParams: &dagconfig.SimnetParams, + }) + if err != nil { + t.Fatalf("TestIsInPast: Failed to setup DAG instance: %v", err) + } + defer teardownFunc() + + // Add a chain of two blocks above the genesis. This will be the + // selected parent chain. + blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) + blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil) + + // Add another block above the genesis + blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) + nodeC, ok := dag.index.LookupNode(blockC.BlockHash()) + if !ok { + t.Fatalf("TestIsInPast: block C is not in the block index") + } + + // Add a block whose parents are the two tips + blockD := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash(), blockC.BlockHash()}, nil) + nodeD, ok := dag.index.LookupNode(blockD.BlockHash()) + if !ok { + t.Fatalf("TestIsInPast: block C is not in the block index") + } + + // Make sure that node C is in the past of node D + isInFuture, err := dag.reachabilityTree.isInPast(nodeC, nodeD) + if err != nil { + t.Fatalf("TestIsInPast: isInPast unexpectedly failed: %s", err) + } + if !isInFuture { + t.Fatalf("TestIsInPast: node C is unexpectedly not the past of node D") + } +} + +func TestUpdateReindexRoot(t *testing.T) { + // Create a new database and DAG instance to run tests against. + dag, teardownFunc, err := DAGSetup("TestUpdateReindexRoot", true, Config{ + DAGParams: &dagconfig.SimnetParams, + }) + if err != nil { + t.Fatalf("Failed to setup DAG instance: %v", err) + } + defer teardownFunc() + + // Set the reindex window to a low number to make this test run fast + originalReachabilityReindexWindow := reachabilityReindexWindow + reachabilityReindexWindow = 10 + defer func() { + reachabilityReindexWindow = originalReachabilityReindexWindow + }() + + // Add two blocks on top of the genesis block + chain1RootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) + chain2RootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) + + // Add chain of reachabilityReindexWindow - 1 blocks above chain1RootBlock and + // chain2RootBlock, respectively. This should not move the reindex root + chain1RootBlockTipHash := chain1RootBlock.BlockHash() + chain2RootBlockTipHash := chain2RootBlock.BlockHash() + genesisTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(dag.genesis.hash) + if err != nil { + t.Fatalf("failed to get tree node: %s", err) + } + for i := uint64(0); i < reachabilityReindexWindow-1; i++ { + chain1Block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain1RootBlockTipHash}, nil) + chain1RootBlockTipHash = chain1Block.BlockHash() + + chain2Block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain2RootBlockTipHash}, nil) + chain2RootBlockTipHash = chain2Block.BlockHash() + + if dag.reachabilityTree.reindexRoot != genesisTreeNode { + t.Fatalf("reindex root unexpectedly moved") + } + } + + // Add another block over chain1. This will move the reindex root to chain1RootBlock + PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chain1RootBlockTipHash}, nil) + + // Make sure that chain1RootBlock is now the reindex root + chain1RootTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(chain1RootBlock.BlockHash()) + if err != nil { + t.Fatalf("failed to get tree node: %s", err) + } + if dag.reachabilityTree.reindexRoot != chain1RootTreeNode { + t.Fatalf("chain1RootBlock is not the reindex root after reindex") + } + + // Make sure that tight intervals have been applied to chain2. Since + // we added reachabilityReindexWindow-1 blocks to chain2, the size + // of the interval at its root should be equal to reachabilityReindexWindow + chain2RootTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(chain2RootBlock.BlockHash()) + if err != nil { + t.Fatalf("failed to get tree node: %s", err) + } + if chain2RootTreeNode.interval.size() != reachabilityReindexWindow { + t.Fatalf("got unexpected chain2RootNode interval. Want: %d, got: %d", + chain2RootTreeNode.interval.size(), reachabilityReindexWindow) + } + + // Make sure that the rest of the interval has been allocated to + // chain1RootNode, minus slack from both sides + expectedChain1RootIntervalSize := genesisTreeNode.interval.size() - 1 - + chain2RootTreeNode.interval.size() - 2*reachabilityReindexSlack + if chain1RootTreeNode.interval.size() != expectedChain1RootIntervalSize { + t.Fatalf("got unexpected chain1RootNode interval. Want: %d, got: %d", + chain1RootTreeNode.interval.size(), expectedChain1RootIntervalSize) + } +} + +func TestReindexIntervalsEarlierThanReindexRoot(t *testing.T) { + // Create a new database and DAG instance to run tests against. + dag, teardownFunc, err := DAGSetup("TestReindexIntervalsEarlierThanReindexRoot", true, Config{ + DAGParams: &dagconfig.SimnetParams, + }) + if err != nil { + t.Fatalf("Failed to setup DAG instance: %v", err) + } + defer teardownFunc() + + // Set the reindex window and slack to low numbers to make this test + // run fast + originalReachabilityReindexWindow := reachabilityReindexWindow + originalReachabilityReindexSlack := reachabilityReindexSlack + reachabilityReindexWindow = 10 + reachabilityReindexSlack = 5 + defer func() { + reachabilityReindexWindow = originalReachabilityReindexWindow + reachabilityReindexSlack = originalReachabilityReindexSlack + }() + + // Add three children to the genesis: leftBlock, centerBlock, rightBlock + leftBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) + centerBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) + rightBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) + + // Add a chain of reachabilityReindexWindow blocks above centerBlock. + // This will move the reindex root to centerBlock + centerTipHash := centerBlock.BlockHash() + for i := uint64(0); i < reachabilityReindexWindow; i++ { + block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{centerTipHash}, nil) + centerTipHash = block.BlockHash() + } + + // Make sure that centerBlock is now the reindex root + centerTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(centerBlock.BlockHash()) + if err != nil { + t.Fatalf("failed to get tree node: %s", err) + } + if dag.reachabilityTree.reindexRoot != centerTreeNode { + t.Fatalf("centerBlock is not the reindex root after reindex") + } + + // Get the current interval for leftBlock. The reindex should have + // resulted in a tight interval there + leftTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(leftBlock.BlockHash()) + if err != nil { + t.Fatalf("failed to get tree node: %s", err) + } + if leftTreeNode.interval.size() != 1 { + t.Fatalf("leftBlock interval not tight after reindex") + } + + // Get the current interval for rightBlock. The reindex should have + // resulted in a tight interval there + rightTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(rightBlock.BlockHash()) + if err != nil { + t.Fatalf("failed to get tree node: %s", err) + } + if rightTreeNode.interval.size() != 1 { + t.Fatalf("rightBlock interval not tight after reindex") + } + + // Get the current interval for centerBlock. Its interval should be: + // genesisInterval - 1 - leftInterval - leftSlack - rightInterval - rightSlack + genesisTreeNode, err := dag.reachabilityTree.store.treeNodeByBlockHash(dag.genesis.hash) + if err != nil { + t.Fatalf("failed to get tree node: %s", err) + } + expectedCenterInterval := genesisTreeNode.interval.size() - 1 - + leftTreeNode.interval.size() - reachabilityReindexSlack - + rightTreeNode.interval.size() - reachabilityReindexSlack + if centerTreeNode.interval.size() != expectedCenterInterval { + t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d", + expectedCenterInterval, centerTreeNode.interval.size()) + } + + // Add a chain of reachabilityReindexWindow - 1 blocks above leftBlock. + // Each addition will trigger a low-than-reindex-root reindex. We + // expect the centerInterval to shrink by 1 each time, but its child + // to remain unaffected + treeChildOfCenterBlock := centerTreeNode.children[0] + treeChildOfCenterBlockOriginalIntervalSize := treeChildOfCenterBlock.interval.size() + leftTipHash := leftBlock.BlockHash() + for i := uint64(0); i < reachabilityReindexWindow-1; i++ { + block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{leftTipHash}, nil) + leftTipHash = block.BlockHash() + + expectedCenterInterval-- + if centerTreeNode.interval.size() != expectedCenterInterval { + t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d", + expectedCenterInterval, centerTreeNode.interval.size()) + } + + if treeChildOfCenterBlock.interval.size() != treeChildOfCenterBlockOriginalIntervalSize { + t.Fatalf("the interval of centerBlock's child unexpectedly changed") + } + } + + // Add a chain of reachabilityReindexWindow - 1 blocks above rightBlock. + // Each addition will trigger a low-than-reindex-root reindex. We + // expect the centerInterval to shrink by 1 each time, but its child + // to remain unaffected + rightTipHash := rightBlock.BlockHash() + for i := uint64(0); i < reachabilityReindexWindow-1; i++ { + block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{rightTipHash}, nil) + rightTipHash = block.BlockHash() + + expectedCenterInterval-- + if centerTreeNode.interval.size() != expectedCenterInterval { + t.Fatalf("unexpected centerBlock interval. Want: %d, got: %d", + expectedCenterInterval, centerTreeNode.interval.size()) + } + + if treeChildOfCenterBlock.interval.size() != treeChildOfCenterBlockOriginalIntervalSize { + t.Fatalf("the interval of centerBlock's child unexpectedly changed") + } + } +} diff --git a/blockdag/reachabilitystore.go b/blockdag/reachabilitystore.go index dea254784..064e82d6b 100644 --- a/blockdag/reachabilitystore.go +++ b/blockdag/reachabilitystore.go @@ -12,7 +12,7 @@ import ( type reachabilityData struct { treeNode *reachabilityTreeNode - futureCoveringSet futureCoveringBlockSet + futureCoveringSet futureCoveringTreeNodeSet } type reachabilityStore struct { @@ -41,11 +41,11 @@ func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) { store.setBlockAsDirty(node.hash) } -func (store *reachabilityStore) setFutureCoveringSet(node *blockNode, futureCoveringSet futureCoveringBlockSet) error { +func (store *reachabilityStore) setFutureCoveringSet(node *blockNode, futureCoveringSet futureCoveringTreeNodeSet) error { // load the reachability data from DB to store.loaded _, exists := store.reachabilityDataByHash(node.hash) if !exists { - return reachabilityNotFoundError(node) + return reachabilityNotFoundError(node.hash) } store.loaded[*node.hash].futureCoveringSet = futureCoveringSet @@ -57,22 +57,26 @@ func (store *reachabilityStore) setBlockAsDirty(blockHash *daghash.Hash) { store.dirty[*blockHash] = struct{}{} } -func reachabilityNotFoundError(node *blockNode) error { - return errors.Errorf("Couldn't find reachability data for block %s", node.hash) +func reachabilityNotFoundError(hash *daghash.Hash) error { + return errors.Errorf("couldn't find reachability data for block %s", hash) } -func (store *reachabilityStore) treeNodeByBlockNode(node *blockNode) (*reachabilityTreeNode, error) { - reachabilityData, exists := store.reachabilityDataByHash(node.hash) +func (store *reachabilityStore) treeNodeByBlockHash(hash *daghash.Hash) (*reachabilityTreeNode, error) { + reachabilityData, exists := store.reachabilityDataByHash(hash) if !exists { - return nil, reachabilityNotFoundError(node) + return nil, reachabilityNotFoundError(hash) } return reachabilityData.treeNode, nil } -func (store *reachabilityStore) futureCoveringSetByBlockNode(node *blockNode) (futureCoveringBlockSet, error) { +func (store *reachabilityStore) treeNodeByBlockNode(node *blockNode) (*reachabilityTreeNode, error) { + return store.treeNodeByBlockHash(node.hash) +} + +func (store *reachabilityStore) futureCoveringSetByBlockNode(node *blockNode) (futureCoveringTreeNodeSet, error) { reachabilityData, exists := store.reachabilityDataByHash(node.hash) if !exists { - return nil, reachabilityNotFoundError(node) + return nil, reachabilityNotFoundError(node.hash) } return reachabilityData.futureCoveringSet, nil } @@ -215,12 +219,6 @@ func (store *reachabilityStore) serializeTreeNode(w io.Writer, treeNode *reachab return err } - // Serialize the remaining interval - err = store.serializeReachabilityInterval(w, treeNode.remainingInterval) - if err != nil { - return err - } - // Serialize the parent // If this is the genesis block, write the zero hash instead parentHash := &daghash.ZeroHash @@ -265,16 +263,16 @@ func (store *reachabilityStore) serializeReachabilityInterval(w io.Writer, inter return nil } -func (store *reachabilityStore) serializeFutureCoveringSet(w io.Writer, futureCoveringSet futureCoveringBlockSet) error { +func (store *reachabilityStore) serializeFutureCoveringSet(w io.Writer, futureCoveringSet futureCoveringTreeNodeSet) error { // Serialize the set size err := wire.WriteVarInt(w, uint64(len(futureCoveringSet))) if err != nil { return err } - // Serialize each block in the set - for _, block := range futureCoveringSet { - err = wire.WriteElement(w, block.blockNode.hash) + // Serialize each node in the set + for _, node := range futureCoveringSet { + err = wire.WriteElement(w, node.blockNode.hash) if err != nil { return err } @@ -311,13 +309,6 @@ func (store *reachabilityStore) deserializeTreeNode(r io.Reader, destination *re } destination.treeNode.interval = interval - // Deserialize the remaining interval - remainingInterval, err := store.deserializeReachabilityInterval(r) - if err != nil { - return err - } - destination.treeNode.remainingInterval = remainingInterval - // Deserialize the parent // If this is the zero hash, this node is the genesis and as such doesn't have a parent parentHash := &daghash.Hash{} @@ -388,25 +379,18 @@ func (store *reachabilityStore) deserializeFutureCoveringSet(r io.Reader, destin } // Deserialize each block in the set - futureCoveringSet := make(futureCoveringBlockSet, setSize) + futureCoveringSet := make(futureCoveringTreeNodeSet, setSize) for i := uint64(0); i < setSize; i++ { blockHash := &daghash.Hash{} err = wire.ReadElement(r, blockHash) if err != nil { return err } - blockNode, ok := store.dag.index.LookupNode(blockHash) - if !ok { - return errors.Errorf("blockNode not found for hash %s", blockHash) - } blockReachabilityData, ok := store.reachabilityDataByHash(blockHash) if !ok { return errors.Errorf("block reachability data not found for hash: %s", blockHash) } - futureCoveringSet[i] = &futureCoveringBlock{ - blockNode: blockNode, - treeNode: blockReachabilityData.treeNode, - } + futureCoveringSet[i] = blockReachabilityData.treeNode } destination.futureCoveringSet = futureCoveringSet diff --git a/blockdag/validate.go b/blockdag/validate.go index 7103986b1..18534a12e 100644 --- a/blockdag/validate.go +++ b/blockdag/validate.go @@ -709,7 +709,7 @@ func (dag *BlockDAG) validateParents(blockHeader *wire.BlockHeader, parents bloc continue } - isAncestorOf, err := dag.isAncestorOf(parentA, parentB) + isAncestorOf, err := dag.isInPast(parentA, parentB) if err != nil { return err } diff --git a/dbaccess/reachability.go b/dbaccess/reachability.go index 68401448e..dada75cfc 100644 --- a/dbaccess/reachability.go +++ b/dbaccess/reachability.go @@ -6,6 +6,7 @@ import ( ) var reachabilityDataBucket = database.MakeBucket([]byte("reachability")) +var reachabilityReindexKey = database.MakeBucket().Key([]byte("reachability-reindex-root")) func reachabilityKey(hash *daghash.Hash) *database.Key { return reachabilityDataBucket.Key(hash[:]) @@ -38,3 +39,26 @@ func StoreReachabilityData(context Context, blockHash *daghash.Hash, reachabilit func ClearReachabilityData(dbTx *TxContext) error { return clearBucket(dbTx, reachabilityDataBucket) } + +// StoreReachabilityReindexRoot stores the reachability reindex root in the database. +func StoreReachabilityReindexRoot(context Context, reachabilityReindexRoot *daghash.Hash) error { + accessor, err := context.accessor() + if err != nil { + return err + } + return accessor.Put(reachabilityReindexKey, reachabilityReindexRoot[:]) +} + +// FetchReachabilityReindexRoot retrieves the reachability reindex root from the database. +// Returns ErrNotFound if the state is missing from the database. +func FetchReachabilityReindexRoot(context Context) (*daghash.Hash, error) { + accessor, err := context.accessor() + if err != nil { + return nil, err + } + bytes, err := accessor.Get(reachabilityReindexKey) + if err != nil { + return nil, err + } + return daghash.NewHash(bytes) +} From ad096f978134dcc2c7f5305f8ffcf8423fd6a95e Mon Sep 17 00:00:00 2001 From: Mike Zak Date: Mon, 29 Jun 2020 08:59:56 +0300 Subject: [PATCH 73/77] Update to version 0.5.0 --- version/version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version/version.go b/version/version.go index bac602838..85c11187f 100644 --- a/version/version.go +++ b/version/version.go @@ -10,8 +10,8 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs const ( appMajor uint = 0 - appMinor uint = 4 - appPatch uint = 1 + appMinor uint = 5 + appPatch uint = 0 ) // appBuild is defined as a variable so it can be overridden during the build From 336347b3c518c4e164590e949ba6497597e37776 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Mon, 29 Jun 2020 12:13:51 +0300 Subject: [PATCH 74/77] [NOD-1063] Fix a bug in which a block is pointing directly to a block in the selected parent chain below the reindex root (#777) * [NOD-1063] Fix a bug in which a block is added to the selected parent chain below the reindex root. * [NOD-1063] Convert modifiedTreeNode to an out param. * [NOD-1063] Fix a comment. * [NOD-1063] Rename a test. --- blockdag/reachability.go | 263 ++++++++++++++++------------------ blockdag/reachability_test.go | 51 +++++-- 2 files changed, 166 insertions(+), 148 deletions(-) diff --git a/blockdag/reachability.go b/blockdag/reachability.go index fa5a1ec2b..1b69cd4ae 100644 --- a/blockdag/reachability.go +++ b/blockdag/reachability.go @@ -40,14 +40,6 @@ func newModifiedTreeNodes(nodes ...*reachabilityTreeNode) modifiedTreeNodes { return modifiedNodes } -// addAll adds all the reachabilityTreeNodes in `other` -// into `mtn`. Note that `other` is not affected. -func (mtn modifiedTreeNodes) addAll(other modifiedTreeNodes) { - for node := range other { - mtn[node] = struct{}{} - } -} - // reachabilityInterval represents an interval to be used within the // tree reachability algorithm. See reachabilityTreeNode for further // details. @@ -267,8 +259,8 @@ func (rtn *reachabilityTreeNode) hasSlackIntervalAfter() bool { // remaining interval to allocate, a reindexing is triggered. // This method returns a list of reachabilityTreeNodes modified // by it. -func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRoot *reachabilityTreeNode) ( - modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRoot *reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) error { remaining := rtn.remainingIntervalAfter() @@ -281,9 +273,9 @@ func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRo // at this point we don't yet know child's interval. if !reindexRoot.isAncestorOf(rtn) { reindexStartTime := time.Now() - modifiedNodes, err := rtn.reindexIntervalsEarlierThanReindexRoot(reindexRoot) + err := rtn.reindexIntervalsEarlierThanReindexRoot(reindexRoot, modifiedNodes) if err != nil { - return nil, err + return err } reindexTimeElapsed := time.Since(reindexStartTime) log.Debugf("Reachability reindex triggered for "+ @@ -291,30 +283,32 @@ func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRo "reindex root %s. Modified %d tree nodes and took %dms.", rtn.blockNode.hash, reindexRoot.blockNode.hash, len(modifiedNodes), reindexTimeElapsed.Milliseconds()) - return modifiedNodes, nil + return nil } // No allocation space left -- reindex if remaining.size() == 0 { reindexStartTime := time.Now() - modifiedNodes, err := rtn.reindexIntervals() + err := rtn.reindexIntervals(modifiedNodes) if err != nil { - return nil, err + return err } reindexTimeElapsed := time.Since(reindexStartTime) log.Debugf("Reachability reindex triggered for "+ "block %s. Modified %d tree nodes and took %dms.", rtn.blockNode.hash, len(modifiedNodes), reindexTimeElapsed.Milliseconds()) - return modifiedNodes, nil + return nil } // Allocate from the remaining space allocated, _, err := remaining.splitInHalf() if err != nil { - return nil, err + return err } child.interval = allocated - return newModifiedTreeNodes(rtn, child), nil + modifiedNodes[rtn] = struct{}{} + modifiedNodes[child] = struct{}{} + return nil } // reindexIntervals traverses the reachability subtree that's @@ -324,7 +318,7 @@ func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRo // tree until it finds a node with a subreeSize that's greater than // its interval size. See propagateInterval for further details. // This method returns a list of reachabilityTreeNodes modified by it. -func (rtn *reachabilityTreeNode) reindexIntervals() (modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) reindexIntervals(modifiedNodes modifiedTreeNodes) error { current := rtn // Initial interval and subtree sizes @@ -338,7 +332,7 @@ func (rtn *reachabilityTreeNode) reindexIntervals() (modifiedTreeNodes, error) { if current.parent == nil { // If we ended up here it means that there are more // than 2^64 blocks, which shouldn't ever happen. - return nil, errors.Errorf("missing tree " + + return errors.Errorf("missing tree " + "parent during reindexing. Theoretically, this " + "should only ever happen if there are more " + "than 2^64 blocks in the DAG.") @@ -350,7 +344,7 @@ func (rtn *reachabilityTreeNode) reindexIntervals() (modifiedTreeNodes, error) { } // Propagate the interval to the subtree - return current.propagateInterval(subTreeSizeMap) + return current.propagateInterval(subTreeSizeMap, modifiedNodes) } // countSubtrees counts the size of each subtree under this node, @@ -415,10 +409,9 @@ func (rtn *reachabilityTreeNode) countSubtrees(subTreeSizeMap map[*reachabilityT // Subtree intervals are recursively allocated according to subtree sizes and // the allocation rule in splitWithExponentialBias. This method returns // a list of reachabilityTreeNodes modified by it. -func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabilityTreeNode]uint64) ( - modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabilityTreeNode]uint64, + modifiedNodes modifiedTreeNodes) error { - allModifiedTreeNodes := newModifiedTreeNodes() queue := []*reachabilityTreeNode{rtn} for len(queue) > 0 { var current *reachabilityTreeNode @@ -430,7 +423,7 @@ func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabil } intervals, err := current.intervalRangeForChildAllocation().splitWithExponentialBias(sizes) if err != nil { - return nil, err + return err } for i, child := range current.children { childInterval := intervals[i] @@ -439,13 +432,13 @@ func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabil } } - allModifiedTreeNodes[current] = struct{}{} + modifiedNodes[current] = struct{}{} } - return allModifiedTreeNodes, nil + return nil } func (rtn *reachabilityTreeNode) reindexIntervalsEarlierThanReindexRoot( - reindexRoot *reachabilityTreeNode) (modifiedTreeNodes, error) { + reindexRoot *reachabilityTreeNode, modifiedNodes modifiedTreeNodes) error { // Find the common ancestor for both rtn and the reindex root commonAncestor := rtn.findCommonAncestorWithReindexRoot(reindexRoot) @@ -455,25 +448,27 @@ func (rtn *reachabilityTreeNode) reindexIntervalsEarlierThanReindexRoot( // b. A reachability tree ancestor of `reindexRoot` commonAncestorChosenChild, err := commonAncestor.findAncestorAmongChildren(reindexRoot) if err != nil { - return nil, err + return err } if rtn.interval.end < commonAncestorChosenChild.interval.start { // rtn is in the subtree before the chosen child - return rtn.reclaimIntervalBeforeChosenChild(commonAncestor, commonAncestorChosenChild, reindexRoot) + return rtn.reclaimIntervalBeforeChosenChild(commonAncestor, + commonAncestorChosenChild, reindexRoot, modifiedNodes) } - if commonAncestorChosenChild.interval.end < rtn.interval.start { - // rtn is in the subtree after the chosen child - return rtn.reclaimIntervalAfterChosenChild(commonAncestor, commonAncestorChosenChild, reindexRoot) - } - return nil, errors.Errorf("rtn is in the chosen child's subtree") + + // rtn is either: + // * in the subtree after the chosen child + // * the common ancestor + // In both cases we reclaim from the "after" subtree. In the + // latter case this is arbitrary + return rtn.reclaimIntervalAfterChosenChild(commonAncestor, + commonAncestorChosenChild, reindexRoot, modifiedNodes) } func (rtn *reachabilityTreeNode) reclaimIntervalBeforeChosenChild( - commonAncestor *reachabilityTreeNode, commonAncestorChosenChild *reachabilityTreeNode, reindexRoot *reachabilityTreeNode) ( - modifiedTreeNodes, error) { - - allModifiedTreeNodes := newModifiedTreeNodes() + commonAncestor *reachabilityTreeNode, commonAncestorChosenChild *reachabilityTreeNode, + reindexRoot *reachabilityTreeNode, modifiedNodes modifiedTreeNodes) error { current := commonAncestorChosenChild if !commonAncestorChosenChild.hasSlackIntervalBefore() { @@ -484,7 +479,7 @@ func (rtn *reachabilityTreeNode) reclaimIntervalBeforeChosenChild( var err error current, err = current.findAncestorAmongChildren(reindexRoot) if err != nil { - return nil, err + return err } } @@ -497,11 +492,10 @@ func (rtn *reachabilityTreeNode) reclaimIntervalBeforeChosenChild( current.interval.start+slackReachabilityIntervalForReclaiming, current.interval.end, ) - modifiedNodes, err := current.countSubtreesAndPropagateInterval() + err := current.countSubtreesAndPropagateInterval(modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) current.interval = originalInterval } } @@ -516,25 +510,24 @@ func (rtn *reachabilityTreeNode) reclaimIntervalBeforeChosenChild( current.interval.start+slackReachabilityIntervalForReclaiming, current.interval.end, ) - modifiedNodes, err := current.parent.reindexIntervalsBeforeNode(current) + err := current.parent.reindexIntervalsBeforeNode(current, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) current = current.parent } - return allModifiedTreeNodes, nil + return nil } // reindexIntervalsBeforeNode applies a tight interval to the reachability // subtree before `node`. Note that `node` itself is unaffected. -func (rtn *reachabilityTreeNode) reindexIntervalsBeforeNode(node *reachabilityTreeNode) ( - modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) reindexIntervalsBeforeNode(node *reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) error { childrenBeforeNode, _, err := rtn.splitChildrenAroundChild(node) if err != nil { - return nil, err + return err } childrenBeforeNodeSizes, childrenBeforeNodeSubtreeSizeMaps, childrenBeforeNodeSizesSum := @@ -545,16 +538,15 @@ func (rtn *reachabilityTreeNode) reindexIntervalsBeforeNode(node *reachabilityTr newInterval := newReachabilityInterval(newIntervalEnd-childrenBeforeNodeSizesSum+1, newIntervalEnd) intervals, err := newInterval.splitExact(childrenBeforeNodeSizes) if err != nil { - return nil, err + return err } - return orderedTreeNodeSet(childrenBeforeNode).propagateIntervals(intervals, childrenBeforeNodeSubtreeSizeMaps) + return orderedTreeNodeSet(childrenBeforeNode). + propagateIntervals(intervals, childrenBeforeNodeSubtreeSizeMaps, modifiedNodes) } func (rtn *reachabilityTreeNode) reclaimIntervalAfterChosenChild( - commonAncestor *reachabilityTreeNode, commonAncestorChosenChild *reachabilityTreeNode, reindexRoot *reachabilityTreeNode) ( - modifiedTreeNodes, error) { - - allModifiedTreeNodes := newModifiedTreeNodes() + commonAncestor *reachabilityTreeNode, commonAncestorChosenChild *reachabilityTreeNode, + reindexRoot *reachabilityTreeNode, modifiedNodes modifiedTreeNodes) error { current := commonAncestorChosenChild if !commonAncestorChosenChild.hasSlackIntervalAfter() { @@ -565,7 +557,7 @@ func (rtn *reachabilityTreeNode) reclaimIntervalAfterChosenChild( var err error current, err = current.findAncestorAmongChildren(reindexRoot) if err != nil { - return nil, err + return err } } @@ -578,11 +570,12 @@ func (rtn *reachabilityTreeNode) reclaimIntervalAfterChosenChild( current.interval.start, current.interval.end-slackReachabilityIntervalForReclaiming, ) - modifiedNodes, err := current.countSubtreesAndPropagateInterval() + modifiedNodes[current] = struct{}{} + + err := current.countSubtreesAndPropagateInterval(modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) current.interval = originalInterval } } @@ -597,25 +590,26 @@ func (rtn *reachabilityTreeNode) reclaimIntervalAfterChosenChild( current.interval.start, current.interval.end-slackReachabilityIntervalForReclaiming, ) - modifiedNodes, err := current.parent.reindexIntervalsAfterNode(current) + modifiedNodes[current] = struct{}{} + + err := current.parent.reindexIntervalsAfterNode(current, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) current = current.parent } - return allModifiedTreeNodes, nil + return nil } // reindexIntervalsAfterNode applies a tight interval to the reachability // subtree after `node`. Note that `node` itself is unaffected. -func (rtn *reachabilityTreeNode) reindexIntervalsAfterNode(node *reachabilityTreeNode) ( - modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) reindexIntervalsAfterNode(node *reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) error { _, childrenAfterNode, err := rtn.splitChildrenAroundChild(node) if err != nil { - return nil, err + return err } childrenAfterNodeSizes, childrenAfterNodeSubtreeSizeMaps, childrenAfterNodeSizesSum := @@ -626,25 +620,24 @@ func (rtn *reachabilityTreeNode) reindexIntervalsAfterNode(node *reachabilityTre newInterval := newReachabilityInterval(newIntervalStart, newIntervalStart+childrenAfterNodeSizesSum-1) intervals, err := newInterval.splitExact(childrenAfterNodeSizes) if err != nil { - return nil, err + return err } - return orderedTreeNodeSet(childrenAfterNode).propagateIntervals(intervals, childrenAfterNodeSubtreeSizeMaps) + return orderedTreeNodeSet(childrenAfterNode). + propagateIntervals(intervals, childrenAfterNodeSubtreeSizeMaps, modifiedNodes) } func (tns orderedTreeNodeSet) propagateIntervals(intervals []*reachabilityInterval, - subtreeSizeMaps []map[*reachabilityTreeNode]uint64) (modifiedTreeNodes, error) { + subtreeSizeMaps []map[*reachabilityTreeNode]uint64, modifiedNodes modifiedTreeNodes) error { - allModifiedTreeNodes := newModifiedTreeNodes() for i, node := range tns { node.interval = intervals[i] subtreeSizeMap := subtreeSizeMaps[i] - modifiedNodes, err := node.propagateInterval(subtreeSizeMap) + err := node.propagateInterval(subtreeSizeMap, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) } - return allModifiedTreeNodes, nil + return nil } // isAncestorOf checks if this node is a reachability tree ancestor @@ -832,7 +825,8 @@ func (rt *reachabilityTree) addBlock(node *blockNode, selectedParentAnticone []* if err != nil { return err } - modifiedNodes, err := selectedParentTreeNode.addChild(newTreeNode, rt.reindexRoot) + modifiedNodes := newModifiedTreeNodes() + err = selectedParentTreeNode.addChild(newTreeNode, rt.reindexRoot, modifiedNodes) if err != nil { return err } @@ -861,7 +855,8 @@ func (rt *reachabilityTree) addBlock(node *blockNode, selectedParentAnticone []* // at this stage the virtual had not yet been updated. if node.blueScore > rt.dag.SelectedTipBlueScore() { updateStartTime := time.Now() - modifiedNodes, err := rt.updateReindexRoot(newTreeNode) + modifiedNodes := newModifiedTreeNodes() + err := rt.updateReindexRoot(newTreeNode, modifiedNodes) if err != nil { return err } @@ -941,48 +936,47 @@ func (rt *reachabilityTree) storeState(dbTx *dbaccess.TxContext) error { return nil } -func (rt *reachabilityTree) updateReindexRoot(newTreeNode *reachabilityTreeNode) (modifiedTreeNodes, error) { - allModifiedTreeNodes := newModifiedTreeNodes() +func (rt *reachabilityTree) updateReindexRoot(newTreeNode *reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) error { nextReindexRoot := rt.reindexRoot for { - candidateReindexRoot, modifiedNodes, found, err := rt.maybeMoveReindexRoot(nextReindexRoot, newTreeNode) + candidateReindexRoot, found, err := rt.maybeMoveReindexRoot(nextReindexRoot, newTreeNode, modifiedNodes) if err != nil { - return nil, err + return err } if !found { break } - allModifiedTreeNodes.addAll(modifiedNodes) nextReindexRoot = candidateReindexRoot } rt.reindexRoot = nextReindexRoot - return allModifiedTreeNodes, nil + return nil } func (rt *reachabilityTree) maybeMoveReindexRoot( - reindexRoot *reachabilityTreeNode, newTreeNode *reachabilityTreeNode) ( - newReindexRoot *reachabilityTreeNode, modifiedNodes modifiedTreeNodes, found bool, err error) { + reindexRoot *reachabilityTreeNode, newTreeNode *reachabilityTreeNode, modifiedNodes modifiedTreeNodes) ( + newReindexRoot *reachabilityTreeNode, found bool, err error) { if !reindexRoot.isAncestorOf(newTreeNode) { commonAncestor := newTreeNode.findCommonAncestorWithReindexRoot(reindexRoot) - return commonAncestor, nil, true, nil + return commonAncestor, true, nil } reindexRootChosenChild, err := reindexRoot.findAncestorAmongChildren(newTreeNode) if err != nil { - return nil, nil, false, err + return nil, false, err } if newTreeNode.blockNode.blueScore-reindexRootChosenChild.blockNode.blueScore < reachabilityReindexWindow { - return nil, nil, false, nil + return nil, false, nil } - modifiedNodes, err = rt.concentrateIntervalAroundReindexRootChosenChild(reindexRoot, reindexRootChosenChild) + err = rt.concentrateIntervalAroundReindexRootChosenChild(reindexRoot, reindexRootChosenChild, modifiedNodes) if err != nil { - return nil, nil, false, err + return nil, false, err } - return reindexRootChosenChild, modifiedNodes, true, nil + return reindexRootChosenChild, true, nil } // findAncestorAmongChildren finds the reachability tree child @@ -997,39 +991,34 @@ func (rtn *reachabilityTreeNode) findAncestorAmongChildren(node *reachabilityTre } func (rt *reachabilityTree) concentrateIntervalAroundReindexRootChosenChild( - reindexRoot *reachabilityTreeNode, reindexRootChosenChild *reachabilityTreeNode) ( - modifiedTreeNodes, error) { - - allModifiedTreeNodes := newModifiedTreeNodes() + reindexRoot *reachabilityTreeNode, reindexRootChosenChild *reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) error { reindexRootChildNodesBeforeChosen, reindexRootChildNodesAfterChosen, err := reindexRoot.splitChildrenAroundChild(reindexRootChosenChild) if err != nil { - return nil, err + return err } - reindexRootChildNodesBeforeChosenSizesSum, modifiedNodesBeforeChosen, err := - rt.tightenIntervalsBeforeReindexRootChosenChild(reindexRoot, reindexRootChildNodesBeforeChosen) + reindexRootChildNodesBeforeChosenSizesSum, err := + rt.tightenIntervalsBeforeReindexRootChosenChild(reindexRoot, reindexRootChildNodesBeforeChosen, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodesBeforeChosen) - reindexRootChildNodesAfterChosenSizesSum, modifiedNodesAfterChosen, err := - rt.tightenIntervalsAfterReindexRootChosenChild(reindexRoot, reindexRootChildNodesAfterChosen) + reindexRootChildNodesAfterChosenSizesSum, err := + rt.tightenIntervalsAfterReindexRootChosenChild(reindexRoot, reindexRootChildNodesAfterChosen, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodesAfterChosen) - modifiedNodesForReindexRootExpansion, err := rt.expandIntervalInReindexRootChosenChild( - reindexRoot, reindexRootChosenChild, reindexRootChildNodesBeforeChosenSizesSum, reindexRootChildNodesAfterChosenSizesSum) + err = rt.expandIntervalInReindexRootChosenChild(reindexRoot, reindexRootChosenChild, + reindexRootChildNodesBeforeChosenSizesSum, reindexRootChildNodesAfterChosenSizesSum, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodesForReindexRootExpansion) - return allModifiedTreeNodes, nil + return nil } // splitChildrenAroundChild splits `rtn` into two slices: the nodes that are before @@ -1046,8 +1035,8 @@ func (rtn *reachabilityTreeNode) splitChildrenAroundChild(child *reachabilityTre } func (rt *reachabilityTree) tightenIntervalsBeforeReindexRootChosenChild( - reindexRoot *reachabilityTreeNode, reindexRootChildNodesBeforeChosen []*reachabilityTreeNode) ( - reindexRootChildNodesBeforeChosenSizesSum uint64, modifiedNodes modifiedTreeNodes, err error) { + reindexRoot *reachabilityTreeNode, reindexRootChildNodesBeforeChosen []*reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) (reindexRootChildNodesBeforeChosenSizesSum uint64, err error) { reindexRootChildNodesBeforeChosenSizes, reindexRootChildNodesBeforeChosenSubtreeSizeMaps, reindexRootChildNodesBeforeChosenSizesSum := calcReachabilityTreeNodeSizes(reindexRootChildNodesBeforeChosen) @@ -1057,17 +1046,17 @@ func (rt *reachabilityTree) tightenIntervalsBeforeReindexRootChosenChild( reindexRoot.interval.start+reachabilityReindexSlack+reindexRootChildNodesBeforeChosenSizesSum-1, ) - modifiedNodes, err = rt.propagateChildIntervals(intervalBeforeReindexRootStart, reindexRootChildNodesBeforeChosen, - reindexRootChildNodesBeforeChosenSizes, reindexRootChildNodesBeforeChosenSubtreeSizeMaps) + err = rt.propagateChildIntervals(intervalBeforeReindexRootStart, reindexRootChildNodesBeforeChosen, + reindexRootChildNodesBeforeChosenSizes, reindexRootChildNodesBeforeChosenSubtreeSizeMaps, modifiedNodes) if err != nil { - return 0, nil, err + return 0, err } - return reindexRootChildNodesBeforeChosenSizesSum, modifiedNodes, nil + return reindexRootChildNodesBeforeChosenSizesSum, nil } func (rt *reachabilityTree) tightenIntervalsAfterReindexRootChosenChild( - reindexRoot *reachabilityTreeNode, reindexRootChildNodesAfterChosen []*reachabilityTreeNode) ( - reindexRootChildNodesAfterChosenSizesSum uint64, modifiedNodes modifiedTreeNodes, err error) { + reindexRoot *reachabilityTreeNode, reindexRootChildNodesAfterChosen []*reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) (reindexRootChildNodesAfterChosenSizesSum uint64, err error) { reindexRootChildNodesAfterChosenSizes, reindexRootChildNodesAfterChosenSubtreeSizeMaps, reindexRootChildNodesAfterChosenSizesSum := calcReachabilityTreeNodeSizes(reindexRootChildNodesAfterChosen) @@ -1077,19 +1066,17 @@ func (rt *reachabilityTree) tightenIntervalsAfterReindexRootChosenChild( reindexRoot.interval.end-reachabilityReindexSlack-1, ) - modifiedNodes, err = rt.propagateChildIntervals(intervalAfterReindexRootEnd, reindexRootChildNodesAfterChosen, - reindexRootChildNodesAfterChosenSizes, reindexRootChildNodesAfterChosenSubtreeSizeMaps) + err = rt.propagateChildIntervals(intervalAfterReindexRootEnd, reindexRootChildNodesAfterChosen, + reindexRootChildNodesAfterChosenSizes, reindexRootChildNodesAfterChosenSubtreeSizeMaps, modifiedNodes) if err != nil { - return 0, nil, err + return 0, err } - return reindexRootChildNodesAfterChosenSizesSum, modifiedNodes, nil + return reindexRootChildNodesAfterChosenSizesSum, nil } func (rt *reachabilityTree) expandIntervalInReindexRootChosenChild(reindexRoot *reachabilityTreeNode, reindexRootChosenChild *reachabilityTreeNode, reindexRootChildNodesBeforeChosenSizesSum uint64, - reindexRootChildNodesAfterChosenSizesSum uint64) (modifiedTreeNodes, error) { - - allModifiedTreeNodes := newModifiedTreeNodes() + reindexRootChildNodesAfterChosenSizesSum uint64, modifiedNodes modifiedTreeNodes) error { newReindexRootChildInterval := newReachabilityInterval( reindexRoot.interval.start+reindexRootChildNodesBeforeChosenSizesSum+reachabilityReindexSlack, @@ -1110,22 +1097,21 @@ func (rt *reachabilityTree) expandIntervalInReindexRootChosenChild(reindexRoot * newReindexRootChildInterval.start+reachabilityReindexSlack, newReindexRootChildInterval.end-reachabilityReindexSlack, ) - modifiedNodes, err := reindexRootChosenChild.countSubtreesAndPropagateInterval() + err := reindexRootChosenChild.countSubtreesAndPropagateInterval(modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) } reindexRootChosenChild.interval = newReindexRootChildInterval - allModifiedTreeNodes[reindexRootChosenChild] = struct{}{} - return allModifiedTreeNodes, nil + modifiedNodes[reindexRootChosenChild] = struct{}{} + return nil } -func (rtn *reachabilityTreeNode) countSubtreesAndPropagateInterval() (modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) countSubtreesAndPropagateInterval(modifiedNodes modifiedTreeNodes) error { subtreeSizeMap := make(map[*reachabilityTreeNode]uint64) rtn.countSubtrees(subtreeSizeMap) - return rtn.propagateInterval(subtreeSizeMap) + return rtn.propagateInterval(subtreeSizeMap, modifiedNodes) } func calcReachabilityTreeNodeSizes(treeNodes []*reachabilityTreeNode) ( @@ -1146,14 +1132,12 @@ func calcReachabilityTreeNodeSizes(treeNodes []*reachabilityTreeNode) ( } func (rt *reachabilityTree) propagateChildIntervals(interval *reachabilityInterval, - childNodes []*reachabilityTreeNode, sizes []uint64, subtreeSizeMaps []map[*reachabilityTreeNode]uint64) ( - modifiedTreeNodes, error) { - - allModifiedTreeNodes := newModifiedTreeNodes() + childNodes []*reachabilityTreeNode, sizes []uint64, subtreeSizeMaps []map[*reachabilityTreeNode]uint64, + modifiedNodes modifiedTreeNodes) error { childIntervalSizes, err := interval.splitExact(sizes) if err != nil { - return nil, err + return err } for i, child := range childNodes { @@ -1161,14 +1145,13 @@ func (rt *reachabilityTree) propagateChildIntervals(interval *reachabilityInterv child.interval = childInterval childSubtreeSizeMap := subtreeSizeMaps[i] - modifiedNodes, err := child.propagateInterval(childSubtreeSizeMap) + err := child.propagateInterval(childSubtreeSizeMap, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) } - return allModifiedTreeNodes, nil + return nil } // isInPast returns true if `this` is in the past (exclusive) of `other` diff --git a/blockdag/reachability_test.go b/blockdag/reachability_test.go index 47ad98bcc..07e03791a 100644 --- a/blockdag/reachability_test.go +++ b/blockdag/reachability_test.go @@ -19,7 +19,8 @@ func TestAddChild(t *testing.T) { currentTip := root for i := 0; i < 6; i++ { node := newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err := currentTip.addChild(node, root) + modifiedNodes := newModifiedTreeNodes() + err := currentTip.addChild(node, root, modifiedNodes) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } @@ -36,7 +37,8 @@ func TestAddChild(t *testing.T) { // Add another node to the tip of the chain to trigger a reindex (100 < 2^7=128) lastChild := newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err := currentTip.addChild(lastChild, root) + modifiedNodes := newModifiedTreeNodes() + err := currentTip.addChild(lastChild, root, modifiedNodes) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } @@ -80,7 +82,8 @@ func TestAddChild(t *testing.T) { childNodes := make([]*reachabilityTreeNode, 6) for i := 0; i < len(childNodes); i++ { childNodes[i] = newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err := root.addChild(childNodes[i], root) + modifiedNodes := newModifiedTreeNodes() + err := root.addChild(childNodes[i], root, modifiedNodes) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } @@ -95,7 +98,8 @@ func TestAddChild(t *testing.T) { // Add another node to the root to trigger a reindex (100 < 2^7=128) lastChild = newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err = root.addChild(lastChild, root) + modifiedNodes = newModifiedTreeNodes() + err = root.addChild(lastChild, root, modifiedNodes) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } @@ -135,7 +139,7 @@ func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) { descendants := make([]*reachabilityTreeNode, numberOfDescendants) for i := 0; i < numberOfDescendants; i++ { node := newReachabilityTreeNode(&blockNode{}) - _, err := currentTip.addChild(node, root) + err := currentTip.addChild(node, root, newModifiedTreeNodes()) if err != nil { t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: addChild failed: %s", err) } @@ -682,7 +686,7 @@ func TestReindexIntervalErrors(t *testing.T) { currentTreeNode := treeNode for i := 0; i < 100; i++ { childTreeNode := newReachabilityTreeNode(&blockNode{}) - _, err = currentTreeNode.addChild(childTreeNode, treeNode) + err = currentTreeNode.addChild(childTreeNode, treeNode, newModifiedTreeNodes()) if err != nil { break } @@ -719,7 +723,7 @@ func BenchmarkReindexInterval(b *testing.B) { currentTreeNode := root for i := 0; i < subTreeSize; i++ { childTreeNode := newReachabilityTreeNode(&blockNode{}) - _, err := currentTreeNode.addChild(childTreeNode, root) + err := currentTreeNode.addChild(childTreeNode, root, newModifiedTreeNodes()) if err != nil { b.Fatalf("addChild: %s", err) } @@ -732,7 +736,7 @@ func BenchmarkReindexInterval(b *testing.B) { // node should lead to a reindex from root. fullReindexTriggeringNode := newReachabilityTreeNode(&blockNode{}) b.StartTimer() - _, err := currentTreeNode.addChild(fullReindexTriggeringNode, root) + err := currentTreeNode.addChild(fullReindexTriggeringNode, root, newModifiedTreeNodes()) b.StopTimer() if err != nil { b.Fatalf("addChild: %s", err) @@ -818,6 +822,37 @@ func TestIsInPast(t *testing.T) { } } +func TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot(t *testing.T) { + // Create a new database and DAG instance to run tests against. + dag, teardownFunc, err := DAGSetup("TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot", + true, Config{DAGParams: &dagconfig.SimnetParams}) + if err != nil { + t.Fatalf("Failed to setup DAG instance: %v", err) + } + defer teardownFunc() + + // Set the reindex window to a low number to make this test run fast + originalReachabilityReindexWindow := reachabilityReindexWindow + reachabilityReindexWindow = 10 + defer func() { + reachabilityReindexWindow = originalReachabilityReindexWindow + }() + + // Add a block on top of the genesis block + chainRootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) + + // Add chain of reachabilityReindexWindow blocks above chainRootBlock. + // This should move the reindex root + chainRootBlockTipHash := chainRootBlock.BlockHash() + for i := uint64(0); i < reachabilityReindexWindow; i++ { + chainBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chainRootBlockTipHash}, nil) + chainRootBlockTipHash = chainBlock.BlockHash() + } + + // Add another block over genesis + PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) +} + func TestUpdateReindexRoot(t *testing.T) { // Create a new database and DAG instance to run tests against. dag, teardownFunc, err := DAGSetup("TestUpdateReindexRoot", true, Config{ From e87d00c9cf4c71180031bd53196b3e4d5e56f804 Mon Sep 17 00:00:00 2001 From: Mike Zak Date: Mon, 29 Jun 2020 12:16:47 +0300 Subject: [PATCH 75/77] [NOD-1063] Fix a bug in which a block is pointing directly to a block in the selected parent chain below the reindex root commit e303efef4209b8d62f1aac2cb57ac79829411556 Author: stasatdaglabs Date: Mon Jun 29 11:59:36 2020 +0300 [NOD-1063] Rename a test. commit bfecd57470ec8aeb0a1b0ef82c051dde364c536e Author: stasatdaglabs Date: Mon Jun 29 11:57:36 2020 +0300 [NOD-1063] Fix a comment. commit b969e5922da16a3734806c03075fe2e45e64958b Author: stasatdaglabs Date: Sun Jun 28 18:14:44 2020 +0300 [NOD-1063] Convert modifiedTreeNode to an out param. commit 170f9872f432b2f7177cb099149c9959031e4f1e Author: stasatdaglabs Date: Sun Jun 28 17:05:01 2020 +0300 [NOD-1063] Fix a bug in which a block is added to the selected parent chain below the reindex root. --- blockdag/reachability.go | 263 ++++++++++++++++------------------ blockdag/reachability_test.go | 51 +++++-- 2 files changed, 166 insertions(+), 148 deletions(-) diff --git a/blockdag/reachability.go b/blockdag/reachability.go index fa5a1ec2b..1b69cd4ae 100644 --- a/blockdag/reachability.go +++ b/blockdag/reachability.go @@ -40,14 +40,6 @@ func newModifiedTreeNodes(nodes ...*reachabilityTreeNode) modifiedTreeNodes { return modifiedNodes } -// addAll adds all the reachabilityTreeNodes in `other` -// into `mtn`. Note that `other` is not affected. -func (mtn modifiedTreeNodes) addAll(other modifiedTreeNodes) { - for node := range other { - mtn[node] = struct{}{} - } -} - // reachabilityInterval represents an interval to be used within the // tree reachability algorithm. See reachabilityTreeNode for further // details. @@ -267,8 +259,8 @@ func (rtn *reachabilityTreeNode) hasSlackIntervalAfter() bool { // remaining interval to allocate, a reindexing is triggered. // This method returns a list of reachabilityTreeNodes modified // by it. -func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRoot *reachabilityTreeNode) ( - modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRoot *reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) error { remaining := rtn.remainingIntervalAfter() @@ -281,9 +273,9 @@ func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRo // at this point we don't yet know child's interval. if !reindexRoot.isAncestorOf(rtn) { reindexStartTime := time.Now() - modifiedNodes, err := rtn.reindexIntervalsEarlierThanReindexRoot(reindexRoot) + err := rtn.reindexIntervalsEarlierThanReindexRoot(reindexRoot, modifiedNodes) if err != nil { - return nil, err + return err } reindexTimeElapsed := time.Since(reindexStartTime) log.Debugf("Reachability reindex triggered for "+ @@ -291,30 +283,32 @@ func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRo "reindex root %s. Modified %d tree nodes and took %dms.", rtn.blockNode.hash, reindexRoot.blockNode.hash, len(modifiedNodes), reindexTimeElapsed.Milliseconds()) - return modifiedNodes, nil + return nil } // No allocation space left -- reindex if remaining.size() == 0 { reindexStartTime := time.Now() - modifiedNodes, err := rtn.reindexIntervals() + err := rtn.reindexIntervals(modifiedNodes) if err != nil { - return nil, err + return err } reindexTimeElapsed := time.Since(reindexStartTime) log.Debugf("Reachability reindex triggered for "+ "block %s. Modified %d tree nodes and took %dms.", rtn.blockNode.hash, len(modifiedNodes), reindexTimeElapsed.Milliseconds()) - return modifiedNodes, nil + return nil } // Allocate from the remaining space allocated, _, err := remaining.splitInHalf() if err != nil { - return nil, err + return err } child.interval = allocated - return newModifiedTreeNodes(rtn, child), nil + modifiedNodes[rtn] = struct{}{} + modifiedNodes[child] = struct{}{} + return nil } // reindexIntervals traverses the reachability subtree that's @@ -324,7 +318,7 @@ func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode, reindexRo // tree until it finds a node with a subreeSize that's greater than // its interval size. See propagateInterval for further details. // This method returns a list of reachabilityTreeNodes modified by it. -func (rtn *reachabilityTreeNode) reindexIntervals() (modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) reindexIntervals(modifiedNodes modifiedTreeNodes) error { current := rtn // Initial interval and subtree sizes @@ -338,7 +332,7 @@ func (rtn *reachabilityTreeNode) reindexIntervals() (modifiedTreeNodes, error) { if current.parent == nil { // If we ended up here it means that there are more // than 2^64 blocks, which shouldn't ever happen. - return nil, errors.Errorf("missing tree " + + return errors.Errorf("missing tree " + "parent during reindexing. Theoretically, this " + "should only ever happen if there are more " + "than 2^64 blocks in the DAG.") @@ -350,7 +344,7 @@ func (rtn *reachabilityTreeNode) reindexIntervals() (modifiedTreeNodes, error) { } // Propagate the interval to the subtree - return current.propagateInterval(subTreeSizeMap) + return current.propagateInterval(subTreeSizeMap, modifiedNodes) } // countSubtrees counts the size of each subtree under this node, @@ -415,10 +409,9 @@ func (rtn *reachabilityTreeNode) countSubtrees(subTreeSizeMap map[*reachabilityT // Subtree intervals are recursively allocated according to subtree sizes and // the allocation rule in splitWithExponentialBias. This method returns // a list of reachabilityTreeNodes modified by it. -func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabilityTreeNode]uint64) ( - modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabilityTreeNode]uint64, + modifiedNodes modifiedTreeNodes) error { - allModifiedTreeNodes := newModifiedTreeNodes() queue := []*reachabilityTreeNode{rtn} for len(queue) > 0 { var current *reachabilityTreeNode @@ -430,7 +423,7 @@ func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabil } intervals, err := current.intervalRangeForChildAllocation().splitWithExponentialBias(sizes) if err != nil { - return nil, err + return err } for i, child := range current.children { childInterval := intervals[i] @@ -439,13 +432,13 @@ func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabil } } - allModifiedTreeNodes[current] = struct{}{} + modifiedNodes[current] = struct{}{} } - return allModifiedTreeNodes, nil + return nil } func (rtn *reachabilityTreeNode) reindexIntervalsEarlierThanReindexRoot( - reindexRoot *reachabilityTreeNode) (modifiedTreeNodes, error) { + reindexRoot *reachabilityTreeNode, modifiedNodes modifiedTreeNodes) error { // Find the common ancestor for both rtn and the reindex root commonAncestor := rtn.findCommonAncestorWithReindexRoot(reindexRoot) @@ -455,25 +448,27 @@ func (rtn *reachabilityTreeNode) reindexIntervalsEarlierThanReindexRoot( // b. A reachability tree ancestor of `reindexRoot` commonAncestorChosenChild, err := commonAncestor.findAncestorAmongChildren(reindexRoot) if err != nil { - return nil, err + return err } if rtn.interval.end < commonAncestorChosenChild.interval.start { // rtn is in the subtree before the chosen child - return rtn.reclaimIntervalBeforeChosenChild(commonAncestor, commonAncestorChosenChild, reindexRoot) + return rtn.reclaimIntervalBeforeChosenChild(commonAncestor, + commonAncestorChosenChild, reindexRoot, modifiedNodes) } - if commonAncestorChosenChild.interval.end < rtn.interval.start { - // rtn is in the subtree after the chosen child - return rtn.reclaimIntervalAfterChosenChild(commonAncestor, commonAncestorChosenChild, reindexRoot) - } - return nil, errors.Errorf("rtn is in the chosen child's subtree") + + // rtn is either: + // * in the subtree after the chosen child + // * the common ancestor + // In both cases we reclaim from the "after" subtree. In the + // latter case this is arbitrary + return rtn.reclaimIntervalAfterChosenChild(commonAncestor, + commonAncestorChosenChild, reindexRoot, modifiedNodes) } func (rtn *reachabilityTreeNode) reclaimIntervalBeforeChosenChild( - commonAncestor *reachabilityTreeNode, commonAncestorChosenChild *reachabilityTreeNode, reindexRoot *reachabilityTreeNode) ( - modifiedTreeNodes, error) { - - allModifiedTreeNodes := newModifiedTreeNodes() + commonAncestor *reachabilityTreeNode, commonAncestorChosenChild *reachabilityTreeNode, + reindexRoot *reachabilityTreeNode, modifiedNodes modifiedTreeNodes) error { current := commonAncestorChosenChild if !commonAncestorChosenChild.hasSlackIntervalBefore() { @@ -484,7 +479,7 @@ func (rtn *reachabilityTreeNode) reclaimIntervalBeforeChosenChild( var err error current, err = current.findAncestorAmongChildren(reindexRoot) if err != nil { - return nil, err + return err } } @@ -497,11 +492,10 @@ func (rtn *reachabilityTreeNode) reclaimIntervalBeforeChosenChild( current.interval.start+slackReachabilityIntervalForReclaiming, current.interval.end, ) - modifiedNodes, err := current.countSubtreesAndPropagateInterval() + err := current.countSubtreesAndPropagateInterval(modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) current.interval = originalInterval } } @@ -516,25 +510,24 @@ func (rtn *reachabilityTreeNode) reclaimIntervalBeforeChosenChild( current.interval.start+slackReachabilityIntervalForReclaiming, current.interval.end, ) - modifiedNodes, err := current.parent.reindexIntervalsBeforeNode(current) + err := current.parent.reindexIntervalsBeforeNode(current, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) current = current.parent } - return allModifiedTreeNodes, nil + return nil } // reindexIntervalsBeforeNode applies a tight interval to the reachability // subtree before `node`. Note that `node` itself is unaffected. -func (rtn *reachabilityTreeNode) reindexIntervalsBeforeNode(node *reachabilityTreeNode) ( - modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) reindexIntervalsBeforeNode(node *reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) error { childrenBeforeNode, _, err := rtn.splitChildrenAroundChild(node) if err != nil { - return nil, err + return err } childrenBeforeNodeSizes, childrenBeforeNodeSubtreeSizeMaps, childrenBeforeNodeSizesSum := @@ -545,16 +538,15 @@ func (rtn *reachabilityTreeNode) reindexIntervalsBeforeNode(node *reachabilityTr newInterval := newReachabilityInterval(newIntervalEnd-childrenBeforeNodeSizesSum+1, newIntervalEnd) intervals, err := newInterval.splitExact(childrenBeforeNodeSizes) if err != nil { - return nil, err + return err } - return orderedTreeNodeSet(childrenBeforeNode).propagateIntervals(intervals, childrenBeforeNodeSubtreeSizeMaps) + return orderedTreeNodeSet(childrenBeforeNode). + propagateIntervals(intervals, childrenBeforeNodeSubtreeSizeMaps, modifiedNodes) } func (rtn *reachabilityTreeNode) reclaimIntervalAfterChosenChild( - commonAncestor *reachabilityTreeNode, commonAncestorChosenChild *reachabilityTreeNode, reindexRoot *reachabilityTreeNode) ( - modifiedTreeNodes, error) { - - allModifiedTreeNodes := newModifiedTreeNodes() + commonAncestor *reachabilityTreeNode, commonAncestorChosenChild *reachabilityTreeNode, + reindexRoot *reachabilityTreeNode, modifiedNodes modifiedTreeNodes) error { current := commonAncestorChosenChild if !commonAncestorChosenChild.hasSlackIntervalAfter() { @@ -565,7 +557,7 @@ func (rtn *reachabilityTreeNode) reclaimIntervalAfterChosenChild( var err error current, err = current.findAncestorAmongChildren(reindexRoot) if err != nil { - return nil, err + return err } } @@ -578,11 +570,12 @@ func (rtn *reachabilityTreeNode) reclaimIntervalAfterChosenChild( current.interval.start, current.interval.end-slackReachabilityIntervalForReclaiming, ) - modifiedNodes, err := current.countSubtreesAndPropagateInterval() + modifiedNodes[current] = struct{}{} + + err := current.countSubtreesAndPropagateInterval(modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) current.interval = originalInterval } } @@ -597,25 +590,26 @@ func (rtn *reachabilityTreeNode) reclaimIntervalAfterChosenChild( current.interval.start, current.interval.end-slackReachabilityIntervalForReclaiming, ) - modifiedNodes, err := current.parent.reindexIntervalsAfterNode(current) + modifiedNodes[current] = struct{}{} + + err := current.parent.reindexIntervalsAfterNode(current, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) current = current.parent } - return allModifiedTreeNodes, nil + return nil } // reindexIntervalsAfterNode applies a tight interval to the reachability // subtree after `node`. Note that `node` itself is unaffected. -func (rtn *reachabilityTreeNode) reindexIntervalsAfterNode(node *reachabilityTreeNode) ( - modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) reindexIntervalsAfterNode(node *reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) error { _, childrenAfterNode, err := rtn.splitChildrenAroundChild(node) if err != nil { - return nil, err + return err } childrenAfterNodeSizes, childrenAfterNodeSubtreeSizeMaps, childrenAfterNodeSizesSum := @@ -626,25 +620,24 @@ func (rtn *reachabilityTreeNode) reindexIntervalsAfterNode(node *reachabilityTre newInterval := newReachabilityInterval(newIntervalStart, newIntervalStart+childrenAfterNodeSizesSum-1) intervals, err := newInterval.splitExact(childrenAfterNodeSizes) if err != nil { - return nil, err + return err } - return orderedTreeNodeSet(childrenAfterNode).propagateIntervals(intervals, childrenAfterNodeSubtreeSizeMaps) + return orderedTreeNodeSet(childrenAfterNode). + propagateIntervals(intervals, childrenAfterNodeSubtreeSizeMaps, modifiedNodes) } func (tns orderedTreeNodeSet) propagateIntervals(intervals []*reachabilityInterval, - subtreeSizeMaps []map[*reachabilityTreeNode]uint64) (modifiedTreeNodes, error) { + subtreeSizeMaps []map[*reachabilityTreeNode]uint64, modifiedNodes modifiedTreeNodes) error { - allModifiedTreeNodes := newModifiedTreeNodes() for i, node := range tns { node.interval = intervals[i] subtreeSizeMap := subtreeSizeMaps[i] - modifiedNodes, err := node.propagateInterval(subtreeSizeMap) + err := node.propagateInterval(subtreeSizeMap, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) } - return allModifiedTreeNodes, nil + return nil } // isAncestorOf checks if this node is a reachability tree ancestor @@ -832,7 +825,8 @@ func (rt *reachabilityTree) addBlock(node *blockNode, selectedParentAnticone []* if err != nil { return err } - modifiedNodes, err := selectedParentTreeNode.addChild(newTreeNode, rt.reindexRoot) + modifiedNodes := newModifiedTreeNodes() + err = selectedParentTreeNode.addChild(newTreeNode, rt.reindexRoot, modifiedNodes) if err != nil { return err } @@ -861,7 +855,8 @@ func (rt *reachabilityTree) addBlock(node *blockNode, selectedParentAnticone []* // at this stage the virtual had not yet been updated. if node.blueScore > rt.dag.SelectedTipBlueScore() { updateStartTime := time.Now() - modifiedNodes, err := rt.updateReindexRoot(newTreeNode) + modifiedNodes := newModifiedTreeNodes() + err := rt.updateReindexRoot(newTreeNode, modifiedNodes) if err != nil { return err } @@ -941,48 +936,47 @@ func (rt *reachabilityTree) storeState(dbTx *dbaccess.TxContext) error { return nil } -func (rt *reachabilityTree) updateReindexRoot(newTreeNode *reachabilityTreeNode) (modifiedTreeNodes, error) { - allModifiedTreeNodes := newModifiedTreeNodes() +func (rt *reachabilityTree) updateReindexRoot(newTreeNode *reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) error { nextReindexRoot := rt.reindexRoot for { - candidateReindexRoot, modifiedNodes, found, err := rt.maybeMoveReindexRoot(nextReindexRoot, newTreeNode) + candidateReindexRoot, found, err := rt.maybeMoveReindexRoot(nextReindexRoot, newTreeNode, modifiedNodes) if err != nil { - return nil, err + return err } if !found { break } - allModifiedTreeNodes.addAll(modifiedNodes) nextReindexRoot = candidateReindexRoot } rt.reindexRoot = nextReindexRoot - return allModifiedTreeNodes, nil + return nil } func (rt *reachabilityTree) maybeMoveReindexRoot( - reindexRoot *reachabilityTreeNode, newTreeNode *reachabilityTreeNode) ( - newReindexRoot *reachabilityTreeNode, modifiedNodes modifiedTreeNodes, found bool, err error) { + reindexRoot *reachabilityTreeNode, newTreeNode *reachabilityTreeNode, modifiedNodes modifiedTreeNodes) ( + newReindexRoot *reachabilityTreeNode, found bool, err error) { if !reindexRoot.isAncestorOf(newTreeNode) { commonAncestor := newTreeNode.findCommonAncestorWithReindexRoot(reindexRoot) - return commonAncestor, nil, true, nil + return commonAncestor, true, nil } reindexRootChosenChild, err := reindexRoot.findAncestorAmongChildren(newTreeNode) if err != nil { - return nil, nil, false, err + return nil, false, err } if newTreeNode.blockNode.blueScore-reindexRootChosenChild.blockNode.blueScore < reachabilityReindexWindow { - return nil, nil, false, nil + return nil, false, nil } - modifiedNodes, err = rt.concentrateIntervalAroundReindexRootChosenChild(reindexRoot, reindexRootChosenChild) + err = rt.concentrateIntervalAroundReindexRootChosenChild(reindexRoot, reindexRootChosenChild, modifiedNodes) if err != nil { - return nil, nil, false, err + return nil, false, err } - return reindexRootChosenChild, modifiedNodes, true, nil + return reindexRootChosenChild, true, nil } // findAncestorAmongChildren finds the reachability tree child @@ -997,39 +991,34 @@ func (rtn *reachabilityTreeNode) findAncestorAmongChildren(node *reachabilityTre } func (rt *reachabilityTree) concentrateIntervalAroundReindexRootChosenChild( - reindexRoot *reachabilityTreeNode, reindexRootChosenChild *reachabilityTreeNode) ( - modifiedTreeNodes, error) { - - allModifiedTreeNodes := newModifiedTreeNodes() + reindexRoot *reachabilityTreeNode, reindexRootChosenChild *reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) error { reindexRootChildNodesBeforeChosen, reindexRootChildNodesAfterChosen, err := reindexRoot.splitChildrenAroundChild(reindexRootChosenChild) if err != nil { - return nil, err + return err } - reindexRootChildNodesBeforeChosenSizesSum, modifiedNodesBeforeChosen, err := - rt.tightenIntervalsBeforeReindexRootChosenChild(reindexRoot, reindexRootChildNodesBeforeChosen) + reindexRootChildNodesBeforeChosenSizesSum, err := + rt.tightenIntervalsBeforeReindexRootChosenChild(reindexRoot, reindexRootChildNodesBeforeChosen, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodesBeforeChosen) - reindexRootChildNodesAfterChosenSizesSum, modifiedNodesAfterChosen, err := - rt.tightenIntervalsAfterReindexRootChosenChild(reindexRoot, reindexRootChildNodesAfterChosen) + reindexRootChildNodesAfterChosenSizesSum, err := + rt.tightenIntervalsAfterReindexRootChosenChild(reindexRoot, reindexRootChildNodesAfterChosen, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodesAfterChosen) - modifiedNodesForReindexRootExpansion, err := rt.expandIntervalInReindexRootChosenChild( - reindexRoot, reindexRootChosenChild, reindexRootChildNodesBeforeChosenSizesSum, reindexRootChildNodesAfterChosenSizesSum) + err = rt.expandIntervalInReindexRootChosenChild(reindexRoot, reindexRootChosenChild, + reindexRootChildNodesBeforeChosenSizesSum, reindexRootChildNodesAfterChosenSizesSum, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodesForReindexRootExpansion) - return allModifiedTreeNodes, nil + return nil } // splitChildrenAroundChild splits `rtn` into two slices: the nodes that are before @@ -1046,8 +1035,8 @@ func (rtn *reachabilityTreeNode) splitChildrenAroundChild(child *reachabilityTre } func (rt *reachabilityTree) tightenIntervalsBeforeReindexRootChosenChild( - reindexRoot *reachabilityTreeNode, reindexRootChildNodesBeforeChosen []*reachabilityTreeNode) ( - reindexRootChildNodesBeforeChosenSizesSum uint64, modifiedNodes modifiedTreeNodes, err error) { + reindexRoot *reachabilityTreeNode, reindexRootChildNodesBeforeChosen []*reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) (reindexRootChildNodesBeforeChosenSizesSum uint64, err error) { reindexRootChildNodesBeforeChosenSizes, reindexRootChildNodesBeforeChosenSubtreeSizeMaps, reindexRootChildNodesBeforeChosenSizesSum := calcReachabilityTreeNodeSizes(reindexRootChildNodesBeforeChosen) @@ -1057,17 +1046,17 @@ func (rt *reachabilityTree) tightenIntervalsBeforeReindexRootChosenChild( reindexRoot.interval.start+reachabilityReindexSlack+reindexRootChildNodesBeforeChosenSizesSum-1, ) - modifiedNodes, err = rt.propagateChildIntervals(intervalBeforeReindexRootStart, reindexRootChildNodesBeforeChosen, - reindexRootChildNodesBeforeChosenSizes, reindexRootChildNodesBeforeChosenSubtreeSizeMaps) + err = rt.propagateChildIntervals(intervalBeforeReindexRootStart, reindexRootChildNodesBeforeChosen, + reindexRootChildNodesBeforeChosenSizes, reindexRootChildNodesBeforeChosenSubtreeSizeMaps, modifiedNodes) if err != nil { - return 0, nil, err + return 0, err } - return reindexRootChildNodesBeforeChosenSizesSum, modifiedNodes, nil + return reindexRootChildNodesBeforeChosenSizesSum, nil } func (rt *reachabilityTree) tightenIntervalsAfterReindexRootChosenChild( - reindexRoot *reachabilityTreeNode, reindexRootChildNodesAfterChosen []*reachabilityTreeNode) ( - reindexRootChildNodesAfterChosenSizesSum uint64, modifiedNodes modifiedTreeNodes, err error) { + reindexRoot *reachabilityTreeNode, reindexRootChildNodesAfterChosen []*reachabilityTreeNode, + modifiedNodes modifiedTreeNodes) (reindexRootChildNodesAfterChosenSizesSum uint64, err error) { reindexRootChildNodesAfterChosenSizes, reindexRootChildNodesAfterChosenSubtreeSizeMaps, reindexRootChildNodesAfterChosenSizesSum := calcReachabilityTreeNodeSizes(reindexRootChildNodesAfterChosen) @@ -1077,19 +1066,17 @@ func (rt *reachabilityTree) tightenIntervalsAfterReindexRootChosenChild( reindexRoot.interval.end-reachabilityReindexSlack-1, ) - modifiedNodes, err = rt.propagateChildIntervals(intervalAfterReindexRootEnd, reindexRootChildNodesAfterChosen, - reindexRootChildNodesAfterChosenSizes, reindexRootChildNodesAfterChosenSubtreeSizeMaps) + err = rt.propagateChildIntervals(intervalAfterReindexRootEnd, reindexRootChildNodesAfterChosen, + reindexRootChildNodesAfterChosenSizes, reindexRootChildNodesAfterChosenSubtreeSizeMaps, modifiedNodes) if err != nil { - return 0, nil, err + return 0, err } - return reindexRootChildNodesAfterChosenSizesSum, modifiedNodes, nil + return reindexRootChildNodesAfterChosenSizesSum, nil } func (rt *reachabilityTree) expandIntervalInReindexRootChosenChild(reindexRoot *reachabilityTreeNode, reindexRootChosenChild *reachabilityTreeNode, reindexRootChildNodesBeforeChosenSizesSum uint64, - reindexRootChildNodesAfterChosenSizesSum uint64) (modifiedTreeNodes, error) { - - allModifiedTreeNodes := newModifiedTreeNodes() + reindexRootChildNodesAfterChosenSizesSum uint64, modifiedNodes modifiedTreeNodes) error { newReindexRootChildInterval := newReachabilityInterval( reindexRoot.interval.start+reindexRootChildNodesBeforeChosenSizesSum+reachabilityReindexSlack, @@ -1110,22 +1097,21 @@ func (rt *reachabilityTree) expandIntervalInReindexRootChosenChild(reindexRoot * newReindexRootChildInterval.start+reachabilityReindexSlack, newReindexRootChildInterval.end-reachabilityReindexSlack, ) - modifiedNodes, err := reindexRootChosenChild.countSubtreesAndPropagateInterval() + err := reindexRootChosenChild.countSubtreesAndPropagateInterval(modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) } reindexRootChosenChild.interval = newReindexRootChildInterval - allModifiedTreeNodes[reindexRootChosenChild] = struct{}{} - return allModifiedTreeNodes, nil + modifiedNodes[reindexRootChosenChild] = struct{}{} + return nil } -func (rtn *reachabilityTreeNode) countSubtreesAndPropagateInterval() (modifiedTreeNodes, error) { +func (rtn *reachabilityTreeNode) countSubtreesAndPropagateInterval(modifiedNodes modifiedTreeNodes) error { subtreeSizeMap := make(map[*reachabilityTreeNode]uint64) rtn.countSubtrees(subtreeSizeMap) - return rtn.propagateInterval(subtreeSizeMap) + return rtn.propagateInterval(subtreeSizeMap, modifiedNodes) } func calcReachabilityTreeNodeSizes(treeNodes []*reachabilityTreeNode) ( @@ -1146,14 +1132,12 @@ func calcReachabilityTreeNodeSizes(treeNodes []*reachabilityTreeNode) ( } func (rt *reachabilityTree) propagateChildIntervals(interval *reachabilityInterval, - childNodes []*reachabilityTreeNode, sizes []uint64, subtreeSizeMaps []map[*reachabilityTreeNode]uint64) ( - modifiedTreeNodes, error) { - - allModifiedTreeNodes := newModifiedTreeNodes() + childNodes []*reachabilityTreeNode, sizes []uint64, subtreeSizeMaps []map[*reachabilityTreeNode]uint64, + modifiedNodes modifiedTreeNodes) error { childIntervalSizes, err := interval.splitExact(sizes) if err != nil { - return nil, err + return err } for i, child := range childNodes { @@ -1161,14 +1145,13 @@ func (rt *reachabilityTree) propagateChildIntervals(interval *reachabilityInterv child.interval = childInterval childSubtreeSizeMap := subtreeSizeMaps[i] - modifiedNodes, err := child.propagateInterval(childSubtreeSizeMap) + err := child.propagateInterval(childSubtreeSizeMap, modifiedNodes) if err != nil { - return nil, err + return err } - allModifiedTreeNodes.addAll(modifiedNodes) } - return allModifiedTreeNodes, nil + return nil } // isInPast returns true if `this` is in the past (exclusive) of `other` diff --git a/blockdag/reachability_test.go b/blockdag/reachability_test.go index 47ad98bcc..07e03791a 100644 --- a/blockdag/reachability_test.go +++ b/blockdag/reachability_test.go @@ -19,7 +19,8 @@ func TestAddChild(t *testing.T) { currentTip := root for i := 0; i < 6; i++ { node := newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err := currentTip.addChild(node, root) + modifiedNodes := newModifiedTreeNodes() + err := currentTip.addChild(node, root, modifiedNodes) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } @@ -36,7 +37,8 @@ func TestAddChild(t *testing.T) { // Add another node to the tip of the chain to trigger a reindex (100 < 2^7=128) lastChild := newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err := currentTip.addChild(lastChild, root) + modifiedNodes := newModifiedTreeNodes() + err := currentTip.addChild(lastChild, root, modifiedNodes) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } @@ -80,7 +82,8 @@ func TestAddChild(t *testing.T) { childNodes := make([]*reachabilityTreeNode, 6) for i := 0; i < len(childNodes); i++ { childNodes[i] = newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err := root.addChild(childNodes[i], root) + modifiedNodes := newModifiedTreeNodes() + err := root.addChild(childNodes[i], root, modifiedNodes) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } @@ -95,7 +98,8 @@ func TestAddChild(t *testing.T) { // Add another node to the root to trigger a reindex (100 < 2^7=128) lastChild = newReachabilityTreeNode(&blockNode{}) - modifiedNodes, err = root.addChild(lastChild, root) + modifiedNodes = newModifiedTreeNodes() + err = root.addChild(lastChild, root, modifiedNodes) if err != nil { t.Fatalf("TestAddChild: addChild failed: %s", err) } @@ -135,7 +139,7 @@ func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) { descendants := make([]*reachabilityTreeNode, numberOfDescendants) for i := 0; i < numberOfDescendants; i++ { node := newReachabilityTreeNode(&blockNode{}) - _, err := currentTip.addChild(node, root) + err := currentTip.addChild(node, root, newModifiedTreeNodes()) if err != nil { t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: addChild failed: %s", err) } @@ -682,7 +686,7 @@ func TestReindexIntervalErrors(t *testing.T) { currentTreeNode := treeNode for i := 0; i < 100; i++ { childTreeNode := newReachabilityTreeNode(&blockNode{}) - _, err = currentTreeNode.addChild(childTreeNode, treeNode) + err = currentTreeNode.addChild(childTreeNode, treeNode, newModifiedTreeNodes()) if err != nil { break } @@ -719,7 +723,7 @@ func BenchmarkReindexInterval(b *testing.B) { currentTreeNode := root for i := 0; i < subTreeSize; i++ { childTreeNode := newReachabilityTreeNode(&blockNode{}) - _, err := currentTreeNode.addChild(childTreeNode, root) + err := currentTreeNode.addChild(childTreeNode, root, newModifiedTreeNodes()) if err != nil { b.Fatalf("addChild: %s", err) } @@ -732,7 +736,7 @@ func BenchmarkReindexInterval(b *testing.B) { // node should lead to a reindex from root. fullReindexTriggeringNode := newReachabilityTreeNode(&blockNode{}) b.StartTimer() - _, err := currentTreeNode.addChild(fullReindexTriggeringNode, root) + err := currentTreeNode.addChild(fullReindexTriggeringNode, root, newModifiedTreeNodes()) b.StopTimer() if err != nil { b.Fatalf("addChild: %s", err) @@ -818,6 +822,37 @@ func TestIsInPast(t *testing.T) { } } +func TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot(t *testing.T) { + // Create a new database and DAG instance to run tests against. + dag, teardownFunc, err := DAGSetup("TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot", + true, Config{DAGParams: &dagconfig.SimnetParams}) + if err != nil { + t.Fatalf("Failed to setup DAG instance: %v", err) + } + defer teardownFunc() + + // Set the reindex window to a low number to make this test run fast + originalReachabilityReindexWindow := reachabilityReindexWindow + reachabilityReindexWindow = 10 + defer func() { + reachabilityReindexWindow = originalReachabilityReindexWindow + }() + + // Add a block on top of the genesis block + chainRootBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) + + // Add chain of reachabilityReindexWindow blocks above chainRootBlock. + // This should move the reindex root + chainRootBlockTipHash := chainRootBlock.BlockHash() + for i := uint64(0); i < reachabilityReindexWindow; i++ { + chainBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{chainRootBlockTipHash}, nil) + chainRootBlockTipHash = chainBlock.BlockHash() + } + + // Add another block over genesis + PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil) +} + func TestUpdateReindexRoot(t *testing.T) { // Create a new database and DAG instance to run tests against. dag, teardownFunc, err := DAGSetup("TestUpdateReindexRoot", true, Config{ From 8c90344f280b562d0ca9a6928af599e14b3970bd Mon Sep 17 00:00:00 2001 From: Svarog Date: Wed, 1 Jul 2020 09:21:42 +0300 Subject: [PATCH 76/77] [NOD-1103] Fix testnetGenesisTxPayload with 8-byte blue-score (#780) * [NOD-1103] Fix testnetGenesisTxPayload with 8-byte blue-score * [NOD-1103] Fix genesis block bytes --- dagconfig/genesis.go | 22 +++++++++++----------- dagconfig/genesis_test.go | 18 +++++++++--------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/dagconfig/genesis.go b/dagconfig/genesis.go index 4da34a5bd..286023c9d 100644 --- a/dagconfig/genesis.go +++ b/dagconfig/genesis.go @@ -209,7 +209,7 @@ var simnetGenesisBlock = wire.MsgBlock{ var testnetGenesisTxOuts = []*wire.TxOut{} var testnetGenesisTxPayload = []byte{ - 0x00, // Blue score + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score 0x01, // Varint 0x00, // OP-FALSE 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x74, 0x65, 0x73, 0x74, 0x6e, 0x65, 0x74, // kaspa-testnet @@ -221,19 +221,19 @@ var testnetGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, testne // testnetGenesisHash is the hash of the first block in the block DAG for the test // network (genesis block). var testnetGenesisHash = daghash.Hash{ - 0x16, 0xf0, 0xfa, 0xaf, 0xb5, 0xe9, 0x99, 0x0f, - 0x4a, 0x32, 0xbc, 0xa3, 0x08, 0x9c, 0x65, 0xe4, - 0xdd, 0x0d, 0x14, 0x2d, 0x8c, 0x2b, 0x4f, 0x32, - 0xa3, 0xcf, 0x3b, 0x85, 0x77, 0x24, 0x00, 0x00, + 0xFC, 0x21, 0x64, 0x1A, 0xB5, 0x59, 0x61, 0x8E, + 0xF3, 0x9A, 0x95, 0xF1, 0xDA, 0x07, 0x79, 0xBD, + 0x11, 0x2F, 0x90, 0xFC, 0x8B, 0x33, 0x14, 0x8A, + 0x90, 0x6B, 0x76, 0x08, 0x4B, 0x52, 0x00, 0x00, } // testnetGenesisMerkleRoot is the hash of the first transaction in the genesis block // for testnet. var testnetGenesisMerkleRoot = daghash.Hash{ - 0x1a, 0xbb, 0x24, 0x6b, 0x72, 0x29, 0xb3, 0xd5, - 0x5f, 0x9c, 0x2b, 0xf3, 0xd1, 0x30, 0x67, 0xd5, - 0xbe, 0x52, 0x5c, 0xde, 0x67, 0x57, 0xbe, 0xb7, - 0x7e, 0x7f, 0x9f, 0x05, 0xf8, 0xd5, 0xec, 0x8c, + 0xA0, 0xA1, 0x3D, 0xFD, 0x86, 0x41, 0x35, 0xC8, + 0xBD, 0xBB, 0xE6, 0x37, 0x35, 0xBB, 0x4C, 0x51, + 0x11, 0x7B, 0x26, 0x90, 0x15, 0x64, 0x0F, 0x42, + 0x6D, 0x2B, 0x6F, 0x37, 0x4D, 0xC1, 0xA9, 0x72, } // testnetGenesisBlock defines the genesis block of the block DAG which serves as the @@ -245,9 +245,9 @@ var testnetGenesisBlock = wire.MsgBlock{ HashMerkleRoot: &testnetGenesisMerkleRoot, AcceptedIDMerkleRoot: &daghash.ZeroHash, UTXOCommitment: &daghash.ZeroHash, - Timestamp: time.Unix(0x5ede4dbc, 0), + Timestamp: time.Unix(0x5efc2128, 0), Bits: 0x1e7fffff, - Nonce: 0x6f12, + Nonce: 0x1124, }, Transactions: []*wire.MsgTx{testnetGenesisCoinbaseTx}, } diff --git a/dagconfig/genesis_test.go b/dagconfig/genesis_test.go index e1eaa8621..f5cd2b613 100644 --- a/dagconfig/genesis_test.go +++ b/dagconfig/genesis_test.go @@ -189,20 +189,20 @@ var regtestGenesisBlockBytes = []byte{ // testnetGenesisBlockBytes are the wire encoded bytes for the genesis block of // the test network as of protocol version 1. var testnetGenesisBlockBytes = []byte{ - 0x00, 0x00, 0x00, 0x10, 0x00, 0x1a, 0xbb, 0x24, 0x6b, 0x72, 0x29, 0xb3, 0xd5, 0x5f, 0x9c, 0x2b, - 0xf3, 0xd1, 0x30, 0x67, 0xd5, 0xbe, 0x52, 0x5c, 0xde, 0x67, 0x57, 0xbe, 0xb7, 0x7e, 0x7f, 0x9f, - 0x05, 0xf8, 0xd5, 0xec, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x10, 0x00, 0xa0, 0xa1, 0x3d, 0xfd, 0x86, 0x41, 0x35, 0xc8, 0xbd, 0xbb, 0xe6, + 0x37, 0x35, 0xbb, 0x4c, 0x51, 0x11, 0x7b, 0x26, 0x90, 0x15, 0x64, 0x0f, 0x42, 0x6d, 0x2b, 0x6f, + 0x37, 0x4d, 0xc1, 0xa9, 0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x4d, 0xde, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0x1e, 0x12, 0x6f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x21, 0xfc, 0x5e, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, + 0x1e, 0x24, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x6c, 0x53, 0x02, 0xe8, 0xa9, 0xb9, 0x5d, 0x7c, 0x58, 0x91, 0x9c, 0x87, - 0xb9, 0x1d, 0x68, 0x5a, 0x7e, 0x07, 0xbe, 0xd4, 0xb2, 0x94, 0x6d, 0xdf, 0x5a, 0x97, 0x9f, 0xec, - 0x46, 0xdf, 0x87, 0x34, 0x10, 0x00, 0x01, 0x00, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x2d, 0x74, 0x65, - 0x73, 0x74, 0x6e, 0x65, 0x74, + 0x00, 0x00, 0x00, 0x00, 0xf5, 0x41, 0x4c, 0xf4, 0xa8, 0xa2, 0x8c, 0x47, 0x9d, 0xb5, 0x75, 0x5e, + 0x0f, 0x38, 0xd3, 0x27, 0x82, 0xc6, 0xd1, 0x89, 0xc1, 0x60, 0x49, 0xd9, 0x99, 0xc6, 0x2e, 0xbf, + 0x4b, 0x5a, 0x3a, 0xcf, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x6b, + 0x61, 0x73, 0x70, 0x61, 0x2d, 0x74, 0x65, 0x73, 0x74, 0x6e, 0x65, 0x74, } // simnetGenesisBlockBytes are the wire encoded bytes for the genesis block of From 9893b7396caba1b80e0c69a7b23cd16b39550d48 Mon Sep 17 00:00:00 2001 From: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Date: Wed, 1 Jul 2020 13:43:51 +0300 Subject: [PATCH 77/77] [NOD-1105] When recovering acceptance index, use a database transaction per block instead of for the entire recovery (#781) * [NOD-1105] Don't use a database transaction when recovering acceptance index. * Revert "[NOD-1105] Don't use a database transaction when recovering acceptance index." This reverts commit da550f8e * [NOD-1105] When recovering acceptance index, use a database transaction per block instead of for the entire recovery. --- blockdag/indexers/acceptanceindex.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/blockdag/indexers/acceptanceindex.go b/blockdag/indexers/acceptanceindex.go index 6db489437..95aca002a 100644 --- a/blockdag/indexers/acceptanceindex.go +++ b/blockdag/indexers/acceptanceindex.go @@ -59,13 +59,13 @@ func (idx *AcceptanceIndex) Init(dag *blockdag.BlockDAG) error { // // This is part of the Indexer interface. func (idx *AcceptanceIndex) recover() error { - dbTx, err := dbaccess.NewTx() - if err != nil { - return err - } - defer dbTx.RollbackUnlessClosed() + return idx.dag.ForEachHash(func(hash daghash.Hash) error { + dbTx, err := dbaccess.NewTx() + if err != nil { + return err + } + defer dbTx.RollbackUnlessClosed() - err = idx.dag.ForEachHash(func(hash daghash.Hash) error { exists, err := dbaccess.HasAcceptanceData(dbTx, &hash) if err != nil { return err @@ -77,13 +77,13 @@ func (idx *AcceptanceIndex) recover() error { if err != nil { return err } - return idx.ConnectBlock(dbTx, &hash, txAcceptanceData) - }) - if err != nil { - return err - } + err = idx.ConnectBlock(dbTx, &hash, txAcceptanceData) + if err != nil { + return err + } - return dbTx.Commit() + return dbTx.Commit() + }) } // ConnectBlock is invoked by the index manager when a new block has been