Compare commits

...

24 Commits

Author SHA1 Message Date
Ori Newman
11a9848540 Add log for pruning point anticone size 2022-03-26 14:08:45 +03:00
Michael Sutton
9fa08442cf Minor log fixes (#1983) 2022-03-21 11:45:52 +02:00
Michael Sutton
0dd50394ec Fix a bug in the new p2p v5 IBD chain negotiation (#1981)
* Fix a bug in the case where syncer chain is fully known to syncee

* Extract chain negotiation to a separated method

* Bump version and update the changelog

* Add zoom-in progress validation and some debug logs

* Improved error explanation

* go fmt

* Validate zoom-in progress through a total count
2022-03-20 18:06:55 +02:00
Michael Sutton
ac8d4e1341 Update changelog with v0.11.13 changes (#1980) 2022-03-16 10:48:11 +02:00
Michael Sutton
2488fbde78 Apply avoiding IBD patch10 logic to p2p v4 (#1979)
* a patch for fixing p2p v4 IBD issues for all side-chains

* Perform side-chain check earlier to avoid IBD start

* A few comments explaining the IBD patch
2022-03-15 19:16:25 +02:00
Michael Sutton
2ab8065142 Improve output of non-critical protocol errors to avoid user panic (#1978)
* Improve output of non-critical protocol errors to avoid user panic

* Add log messages at the end of IBD with headers proof

* Found a case where this was falsely triggered due to the wrong equality test
2022-03-15 17:23:29 +02:00
Michael Sutton
25410b86ae Make sure there are no negative numbers in the progress report (#1977) 2022-03-15 11:58:15 +02:00
Michael Sutton
4e44dd8510 Various P2P V5 IBD fixes (#1976)
* The first message is expected to contain headers and not a "done" message (+comment and error text fixes)

* Dequeue w/o timeout during pp anticone batch processing

* Add a verification step for catching possible new IBD errors

* Fetch missing bodies for both, syncer selected tip past and relay block past

* Make sure the syncer is behaving correctly to avoid out of index errors

* Make sure progress reporter does not exceed 100%

* No orphan roots, so no need to queue the empty list

* Add a log to report utxo fetch failure with err message

* A duplicate blocks should not appear as a warning

* typo
2022-03-14 12:21:32 +02:00
Svarog
1e56a22b32 Ignore not found errors from tp.transactionsOrderedByFeeRate.Remove. (#1974)
* Fix error message referencing wrong function name

* Ignore not found errors from tp.transactionsOrderedByFeeRate.Remove.

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2022-03-13 18:01:13 +02:00
Ori Newman
7a95f0c7a4 Use nil suggestedLowHash if selected parent pruning point is not in the future of the current one (#1972)
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2022-03-13 17:22:03 +02:00
Michael Sutton
c81506220b Send pruning point anticone in batches (#1973)
* add p2p v5 which is currently identical to v4

* set all internal imports to v5

* set default version to 5

* Send pruning point and its anticone in batches

* go lint

* Fix jsom format

* Use DequeueWithTimeout

* Assert that batch size < route capacity

* oops, this is a flow handler, by definition it needs to be w/o a timeout

* here however, a timeout is required

* Keep IDs of prev messages unmodified

* previous merge operation accidentally erased an important part of this pr

* Extend timeout of simple sync

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-03-13 16:31:34 +02:00
Michael Sutton
e5598c15a7 Fix ibd shared past negotiation to be non quadratic also in the worst-case (#1969)
* add p2p v5 which is currently identical to v4

* set all internal imports to v5

* wip

* set default version to 5

* protobuf gen for new ibd chain locator

* wire for new ibd chain locator types

* new ibd shared block algo -- only basic test passing

* address the case where pruning points disagree, now both IBD tests pass

* protobuf gen for new past diff request message

* wire for new request past diff message

* handle and flow for new request past diff message - logic unimplemented yet

* implement ibd sync past diff of relay and selected tip

* go fmt

* remove unused methods

* missed one err check

* addressing simple comments

* apply the traversal limit logic and sort headers

* rename pastdiff -> anticone

* apply Don't relay blocks in virtual anticone #1970 to v5

* go fmt

* Fixed minor comments

* Limit the number of chain negotiation restarts
2022-03-13 11:27:50 +02:00
Svarog
433af5e0fe Make findTransactionIndex return wasFound explicitly + fix crash caused by invalid handling of not found transaction (#1971)
* Make findTransactionIndex return wasFound explicitly + fix crash caused by invalid handling of not found transaction

* Add comment on findTransactionIndex
2022-03-12 11:07:10 +02:00
Ori Newman
b7be807167 Don't relay blocks in virtual anticone (#1970) 2022-03-11 13:24:45 +02:00
Ori Newman
e687ceeae7 Add version to block template (#1967) 2022-03-11 08:56:36 +02:00
Ori Newman
04e35321aa Bump to v0.11.13 (#1968) 2022-03-11 08:33:53 +02:00
Ori Newman
061e65be93 Fix argument order for IsAncestorOf in boundedMergeBreakingParents (#1966) 2022-03-09 21:11:00 +02:00
Ori Newman
190e725dd0 Optimize expected header pruning point (#1962)
* Use the correct heuristic to avoid checking for next pruning point movement when not needed
2022-03-07 00:16:29 +02:00
Ori Newman
6449b03034 Ignore transaction invs on IBD (#1960)
* Ignore transaction invs on IBD

* Add IsIBDRunning mock to TestHandleRelayedTransactionsNotFound

Co-authored-by: Ori Newman <>
2022-02-26 22:20:08 +02:00
Ori Newman
9f02a24e8b Add merge set and IsChainBlock to the RPC (#1961)
* Add merge set and IsChainBlock to the RPC

* Fix BlockInfo.Clone()
2022-02-25 16:22:00 +02:00
Isaac Cook
9b23bbcdb5 kaspactl: string slice deser for GetUtxosByAddresses (#1955)
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-02-24 00:40:01 +02:00
stasatdaglabs
b30f7309a2 Implement a parse sub command in the walllet (#1953)
* Add boilerplate for the `parse` sub command.

* Deserialize the given transaction hax.

* Implement the rest of the wallet parse command.

* Hide transaction inputs behind a `verbose` flag.

* Indicate that we aren't able to extract an address out of a nonstandard transaction.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-02-20 22:12:23 +02:00
Ori Newman
1c18a49992 Add cache to block window (#1948)
* Add cache to block window

* Copy the window heap slice with the right capacity

* Use WindowHeapSliceStore

* Use the selected parent window as a basis (and some comments and variable renames)

* Clone slice on newSizedUpHeapFromSlice

* Rename isNotFoundError->currentIsNonTrustedBlock

* Increase windowHeapSliceStore cache size to 2000 and some cosmetic changes
2022-02-20 16:52:36 +02:00
stasatdaglabs
28d0f1ea2e Set MaxBlockLevels for non-mainnet networks to 250 (#1952)
* Make MaxBlockLevel a DAG params instead of a constant.

* Change the testnet network name to 9.

* Fix TestBlockWindow.

* Set MaxBlockLevels for non-mainnet networks to 250.

* Revert "Fix TestBlockWindow."

This reverts commit 30a7892f53.

* Fix TestPruning.
2022-02-20 13:43:42 +02:00
116 changed files with 7156 additions and 1724 deletions

View File

@@ -69,6 +69,10 @@ const (
CmdReady
CmdTrustedData
CmdBlockWithTrustedDataV4
CmdRequestNextPruningPointAndItsAnticoneBlocks
CmdRequestIBDChainBlockLocator
CmdIBDChainBlockLocator
CmdRequestAnticone
// rpc
CmdGetCurrentNetworkRequestMessage
@@ -195,6 +199,10 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
CmdReady: "Ready",
CmdTrustedData: "TrustedData",
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
CmdRequestNextPruningPointAndItsAnticoneBlocks: "RequestNextPruningPointAndItsAnticoneBlocks",
CmdRequestIBDChainBlockLocator: "RequestIBDChainBlockLocator",
CmdIBDChainBlockLocator: "IBDChainBlockLocator",
CmdRequestAnticone: "RequestAnticone",
}
// RPCMessageCommandToString maps all MessageCommands to their string representation

View File

@@ -0,0 +1,27 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgIBDChainBlockLocator implements the Message interface and represents a kaspa
// locator message. It is used to find the blockLocator of a peer that is
// syncing with you.
type MsgIBDChainBlockLocator struct {
baseMessage
BlockLocatorHashes []*externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgIBDChainBlockLocator) Command() MessageCommand {
return CmdIBDChainBlockLocator
}
// NewMsgIBDChainBlockLocator returns a new kaspa locator message that conforms to
// the Message interface. See MsgBlockLocator for details.
func NewMsgIBDChainBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgIBDChainBlockLocator {
return &MsgIBDChainBlockLocator{
BlockLocatorHashes: locatorHashes,
}
}

View File

@@ -0,0 +1,33 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestAnticone implements the Message interface and represents a kaspa
// RequestHeaders message. It is used to request the set past(ContextHash) \cap anticone(BlockHash)
type MsgRequestAnticone struct {
baseMessage
BlockHash *externalapi.DomainHash
ContextHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestAnticone) Command() MessageCommand {
return CmdRequestAnticone
}
// NewMsgRequestAnticone returns a new kaspa RequestPastDiff message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequestAnticone(blockHash, contextHash *externalapi.DomainHash) *MsgRequestAnticone {
return &MsgRequestAnticone{
BlockHash: blockHash,
ContextHash: contextHash,
}
}

View File

@@ -0,0 +1,31 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestIBDChainBlockLocator implements the Message interface and represents a kaspa
// IBDRequestChainBlockLocator message. It is used to request a block locator between low
// and high hash.
// The locator is returned via a locator message (MsgIBDChainBlockLocator).
type MsgRequestIBDChainBlockLocator struct {
baseMessage
HighHash *externalapi.DomainHash
LowHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestIBDChainBlockLocator) Command() MessageCommand {
return CmdRequestIBDChainBlockLocator
}
// NewMsgIBDRequestChainBlockLocator returns a new IBDRequestChainBlockLocator message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgIBDRequestChainBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestIBDChainBlockLocator {
return &MsgRequestIBDChainBlockLocator{
HighHash: highHash,
LowHash: lowHash,
}
}

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgRequestNextPruningPointAndItsAnticoneBlocks implements the Message interface and represents a kaspa
// RequestNextPruningPointAndItsAnticoneBlocks message. It is used to notify the IBD syncer peer to send
// more blocks from the pruning anticone.
//
// This message has no payload.
type MsgRequestNextPruningPointAndItsAnticoneBlocks struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestNextPruningPointAndItsAnticoneBlocks) Command() MessageCommand {
return CmdRequestNextPruningPointAndItsAnticoneBlocks
}
// NewMsgRequestNextPruningPointAndItsAnticoneBlocks returns a new kaspa RequestNextPruningPointAndItsAnticoneBlocks message that conforms to the
// Message interface.
func NewMsgRequestNextPruningPointAndItsAnticoneBlocks() *MsgRequestNextPruningPointAndItsAnticoneBlocks {
return &MsgRequestNextPruningPointAndItsAnticoneBlocks{}
}

View File

@@ -92,11 +92,14 @@ type RPCBlockLevelParents struct {
// RPCBlockVerboseData holds verbose data about a block
type RPCBlockVerboseData struct {
Hash string
Difficulty float64
SelectedParentHash string
TransactionIDs []string
IsHeaderOnly bool
BlueScore uint64
ChildrenHashes []string
Hash string
Difficulty float64
SelectedParentHash string
TransactionIDs []string
IsHeaderOnly bool
BlueScore uint64
ChildrenHashes []string
MergeSetBluesHashes []string
MergeSetRedsHashes []string
IsChainBlock bool
}

View File

@@ -150,7 +150,7 @@ func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
return false
}
f.ibdPeer = ibdPeer
log.Infof("IBD started")
log.Infof("IBD started with peer %s", ibdPeer)
return true
}

View File

@@ -2,6 +2,7 @@ package flowcontext
import (
"errors"
"strings"
"sync/atomic"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
@@ -9,6 +10,11 @@ import (
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
)
var (
// ErrPingTimeout signifies that a ping operation timed out.
ErrPingTimeout = protocolerrors.New(false, "timeout expired on ping")
)
// HandleError handles an error from a flow,
// It sends the error to errChan if isStopping == 0 and increments isStopping
//
@@ -21,8 +27,15 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
panic(err)
}
log.Errorf("error from %s: %s", flowName, err)
if errors.Is(err, ErrPingTimeout) {
// Avoid printing the call stack on ping timeouts, since users get panicked and this case is not interesting
log.Errorf("error from %s: %s", flowName, err)
} else {
// Explain to the user that this is not a panic, but only a protocol error with a specific peer
logFrame := strings.Repeat("=", 52)
log.Errorf("Non-critical peer protocol error from %s, printing the full stack for debug purposes: \n%s\n%+v \n%s",
flowName, logFrame, err, logFrame)
}
}
if atomic.AddUint32(isStopping, 1) == 1 {

View File

@@ -20,7 +20,7 @@ var (
// connected peer may support.
minAcceptableProtocolVersion = uint32(4)
maxAcceptableProtocolVersion = uint32(4)
maxAcceptableProtocolVersion = uint32(5)
)
type receiveVersionFlow struct {

View File

@@ -0,0 +1,16 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"testing"
)
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
if ibdBatchSize > router.DefaultMaxMessages {
t.Fatalf("IBD batch size (%d) must be smaller than or equal to router.DefaultMaxMessages (%d)",
ibdBatchSize, router.DefaultMaxMessages)
}
}

View File

@@ -33,7 +33,7 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
return protocolerrors.Errorf(true, "block %s not found (v4)", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil {

View File

@@ -10,6 +10,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
@@ -131,6 +132,10 @@ func (flow *handleRelayInvsFlow) start() error {
}
log.Debugf("Processing block %s", inv.Hash)
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
if err != nil {
return err
}
missingParents, virtualChangeSet, err := flow.processBlock(block)
if err != nil {
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
@@ -153,11 +158,33 @@ func (flow *handleRelayInvsFlow) start() error {
continue
}
log.Debugf("Relaying block %s", inv.Hash)
err = flow.relayBlock(block)
oldVirtualParents := hashset.New()
for _, parent := range oldVirtualInfo.ParentHashes {
oldVirtualParents.Add(parent)
}
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
if err != nil {
return err
}
for _, parent := range newVirtualInfo.ParentHashes {
if oldVirtualParents.Contains(parent) {
continue
}
block, err := flow.Domain().Consensus().GetBlock(parent)
if err != nil {
return err
}
blockHash := consensushashing.BlockHash(block)
log.Debugf("Relaying block %s", blockHash)
err = flow.relayBlock(block)
if err != nil {
return err
}
}
log.Infof("Accepted block %s via relay", inv.Hash)
err = flow.OnNewBlock(block, virtualChangeSet)
if err != nil {
@@ -258,7 +285,10 @@ func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([
if errors.As(err, missingParentsError) {
return missingParentsError.MissingParentHashes, nil, nil
}
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
}
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
}
return nil, virtualChangeSet, nil
@@ -369,6 +399,10 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
"probably happened because it was randomly evicted immediately after it was added.", orphan)
}
if len(orphanRoots) == 0 {
// In some rare cases we get here when there are no orphan roots already
return nil
}
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))

View File

@@ -10,7 +10,9 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
const ibdBatchSize = router.DefaultMaxMessages
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
const ibdBatchSize = 100
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
type RequestHeadersContext interface {

View File

@@ -13,7 +13,9 @@ import (
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
"math/big"
"time"
)
@@ -64,6 +66,34 @@ func (flow *handleIBDFlow) start() error {
}
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
highHash := consensushashing.BlockHash(block)
// Temp code to avoid IBD from lagging nodes publishing their side-chain. This patch
// is applied only to p2p v4 since the implemented IBD negotiation has quadratic complexity in this worst-case.
// See IBD logic of p2p v5 for further details.
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err == nil {
virtualSelectedParentHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualSelectedParent)
if err == nil {
// We first check that DAA score of the relay block is at distance of more than DAA window size.
// This indicates a side-chain which is not in the future of any block in the current virtual DAA window.
if virtualSelectedParentHeader.DAAScore() > block.Header.DAAScore()+2641 {
// We then find the 'unit' of current virtual difficulty. We check if the relay block is at least
// at distance of 180 such units. This signals another condition for a pow-weak side-chain.
virtualDifficulty := difficulty.CalcWork(virtualSelectedParentHeader.Bits())
var virtualSub, difficultyMul big.Int
if difficultyMul.Mul(virtualDifficulty, big.NewInt(180)).
Cmp(virtualSub.Sub(virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())) < 0 {
log.Criticalf("Avoiding IBD triggered by relay %s with %d DAA score diff and lower blue work (%d, %d)",
highHash,
virtualSelectedParentHeader.DAAScore()-block.Header.DAAScore(),
virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())
return nil
}
}
}
}
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
if !wasIBDNotRunning {
log.Debugf("IBD is already running")
@@ -76,15 +106,14 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
flow.logIBDFinished(isFinishedSuccessfully)
}()
highHash := consensushashing.BlockHash(block)
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
log.Debugf("Syncing blocks up to %s", highHash)
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
log.Infof("IBD started with peer %s and highHash %s", flow.peer, highHash)
log.Infof("Syncing blocks up to %s", highHash)
log.Infof("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
if err != nil {
return err
}
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
log.Infof("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
if err != nil {

View File

@@ -10,6 +10,10 @@ type ibdProgressReporter struct {
}
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
if highDAAScore <= lowDAAScore {
// Avoid a zero or negative diff
highDAAScore = lowDAAScore + 1
}
return &ibdProgressReporter{
lowDAAScore: lowDAAScore,
highDAAScore: highDAAScore,
@@ -23,7 +27,16 @@ func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
ipr.processed += processedDelta
relativeDAAScore := highestProcessedDAAScore - ipr.lowDAAScore
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
if highestProcessedDAAScore > ipr.highDAAScore {
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
}
relativeDAAScore := uint64(0)
if highestProcessedDAAScore > ipr.lowDAAScore {
// Avoid a negative diff
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
}
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
if progressPercent > ipr.lastReportedProgressPercent {
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)

View File

@@ -24,6 +24,7 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash,
return err
}
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus.", flow.peer)
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
if deleteStagingConsensusErr != nil {
return deleteStagingConsensusErr
@@ -32,6 +33,8 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash,
return err
}
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
err = flow.Domain().CommitStagingConsensus()
if err != nil {
return err
@@ -344,6 +347,7 @@ func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consens
log.Info("Fetching the pruning point UTXO set")
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
if err != nil {
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
return false, err
}

View File

@@ -2,6 +2,8 @@ package ping
import (
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/pkg/errors"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
@@ -61,6 +63,9 @@ func (flow *sendPingsFlow) start() error {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
if errors.Is(err, router.ErrTimeout) {
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
}
return err
}
pongMessage := message.(*appmessage.MsgPong)

View File

@@ -22,6 +22,7 @@ type TransactionsRelayContext interface {
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
OnTransactionAddedToMempool()
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
IsIBDRunning() bool
}
type handleRelayedTransactionsFlow struct {
@@ -49,6 +50,10 @@ func (flow *handleRelayedTransactionsFlow) start() error {
return err
}
if flow.IsIBDRunning() {
continue
}
requestedIDs, err := flow.requestInvTransactions(inv)
if err != nil {
return err

View File

@@ -47,6 +47,10 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
}
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
return false
}
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
// have the requested transactions in the mempool.
func TestHandleRelayedTransactionsNotFound(t *testing.T) {

View File

@@ -0,0 +1,39 @@
package addressexchange
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// ReceiveAddressesContext is the interface for the context needed for the ReceiveAddresses flow.
type ReceiveAddressesContext interface {
AddressManager() *addressmanager.AddressManager
}
// ReceiveAddresses asks a peer for more addresses if needed.
func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
subnetworkID := peer.SubnetworkID()
msgGetAddresses := appmessage.NewMsgRequestAddresses(false, subnetworkID)
err := outgoingRoute.Enqueue(msgGetAddresses)
if err != nil {
return err
}
message, err := incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
msgAddresses := message.(*appmessage.MsgAddresses)
if len(msgAddresses.AddressList) > addressmanager.GetAddressesMax {
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
}
return context.AddressManager().AddAddresses(msgAddresses.AddressList...)
}

View File

@@ -0,0 +1,52 @@
package addressexchange
import (
"math/rand"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// SendAddressesContext is the interface for the context needed for the SendAddresses flow.
type SendAddressesContext interface {
AddressManager() *addressmanager.AddressManager
}
// SendAddresses sends addresses to a peer that requests it.
func SendAddresses(context SendAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
for {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
addresses := context.AddressManager().Addresses()
msgAddresses := appmessage.NewMsgAddresses(shuffleAddresses(addresses))
err = outgoingRoute.Enqueue(msgAddresses)
if err != nil {
return err
}
}
}
// shuffleAddresses randomizes the given addresses sent if there are more than the maximum allowed in one message.
func shuffleAddresses(addresses []*appmessage.NetAddress) []*appmessage.NetAddress {
addressCount := len(addresses)
if addressCount < appmessage.MaxAddressesPerMsg {
return addresses
}
shuffleAddresses := make([]*appmessage.NetAddress, addressCount)
copy(shuffleAddresses, addresses)
rand.Shuffle(addressCount, func(i, j int) {
shuffleAddresses[i], shuffleAddresses[j] = shuffleAddresses[j], shuffleAddresses[i]
})
// Truncate it to the maximum size.
shuffleAddresses = shuffleAddresses[:appmessage.MaxAddressesPerMsg]
return shuffleAddresses
}

View File

@@ -0,0 +1,16 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"testing"
)
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
if ibdBatchSize >= router.DefaultMaxMessages {
t.Fatalf("IBD batch size (%d) must be smaller than router.DefaultMaxMessages (%d)",
ibdBatchSize, router.DefaultMaxMessages)
}
}

View File

@@ -0,0 +1,33 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.DomainHash, limit uint32) error {
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, limit)
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
}
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
switch message := message.(type) {
case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlockLocator:
return message.BlockLocatorHashes, nil
default:
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
}
}
}

View File

@@ -0,0 +1,86 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleIBDBlockLocatorContext is the interface for the context needed for the HandleIBDBlockLocator flow.
type HandleIBDBlockLocatorContext interface {
Domain() domain.Domain
}
// HandleIBDBlockLocator listens to appmessage.MsgIBDBlockLocator messages and sends
// the highest known block that's in the selected parent chain of `targetHash` to the
// requesting peer.
func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peer.Peer) error {
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
ibdBlockLocatorMessage := message.(*appmessage.MsgIBDBlockLocator)
targetHash := ibdBlockLocatorMessage.TargetHash
log.Debugf("Received IBDBlockLocator from %s with targetHash %s", peer, targetHash)
blockInfo, err := context.Domain().Consensus().GetBlockInfo(targetHash)
if err != nil {
return err
}
if !blockInfo.Exists {
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
"with an unknown targetHash %s", targetHash)
}
foundHighestHashInTheSelectedParentChainOfTargetHash := false
for _, blockLocatorHash := range ibdBlockLocatorMessage.BlockLocatorHashes {
blockInfo, err := context.Domain().Consensus().GetBlockInfo(blockLocatorHash)
if err != nil {
return err
}
// The IBD block locator is checking only existing blocks with bodies.
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
continue
}
isBlockLocatorHashInSelectedParentChainOfHighHash, err :=
context.Domain().Consensus().IsInSelectedParentChainOf(blockLocatorHash, targetHash)
if err != nil {
return err
}
if !isBlockLocatorHashInSelectedParentChainOfHighHash {
continue
}
foundHighestHashInTheSelectedParentChainOfTargetHash = true
log.Debugf("Found a known hash %s amongst peer %s's "+
"blockLocator that's in the selected parent chain of targetHash %s", blockLocatorHash, peer, targetHash)
ibdBlockLocatorHighestHashMessage := appmessage.NewMsgIBDBlockLocatorHighestHash(blockLocatorHash)
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashMessage)
if err != nil {
return err
}
break
}
if !foundHighestHashInTheSelectedParentChainOfTargetHash {
log.Warnf("no hash was found in the blockLocator "+
"that was in the selected parent chain of targetHash %s", targetHash)
ibdBlockLocatorHighestHashNotFoundMessage := appmessage.NewMsgIBDBlockLocatorHighestHashNotFound()
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashNotFoundMessage)
if err != nil {
return err
}
}
}
}

View File

@@ -0,0 +1,54 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// HandleIBDBlockRequestsContext is the interface for the context needed for the HandleIBDBlockRequests flow.
type HandleIBDBlockRequestsContext interface {
Domain() domain.Domain
}
// HandleIBDBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
// their corresponding blocks to the requesting peer.
func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route) error {
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
msgRequestIBDBlocks := message.(*appmessage.MsgRequestIBDBlocks)
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
for i, hash := range msgRequestIBDBlocks.Hashes {
// Fetch the block from the database.
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found (v5)", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed
blockMessage := appmessage.DomainBlockToMsgBlock(block)
ibdBlockMessage := appmessage.NewMsgIBDBlock(blockMessage)
err = outgoingRoute.Enqueue(ibdBlockMessage)
if err != nil {
return err
}
log.Debugf("sent %d out of %d", i+1, len(msgRequestIBDBlocks.Hashes))
}
}
}

View File

@@ -0,0 +1,85 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// RequestIBDChainBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
type RequestIBDChainBlockLocatorContext interface {
Domain() domain.Domain
}
type handleRequestIBDChainBlockLocatorFlow struct {
RequestIBDChainBlockLocatorContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestIBDChainBlockLocator handles getBlockLocator messages
func HandleRequestIBDChainBlockLocator(context RequestIBDChainBlockLocatorContext, incomingRoute *router.Route,
outgoingRoute *router.Route) error {
flow := &handleRequestIBDChainBlockLocatorFlow{
RequestIBDChainBlockLocatorContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestIBDChainBlockLocatorFlow) start() error {
for {
highHash, lowHash, err := flow.receiveRequestIBDChainBlockLocator()
if err != nil {
return err
}
log.Debugf("Received getIBDChainBlockLocator with highHash: %s, lowHash: %s", highHash, lowHash)
var locator externalapi.BlockLocator
if highHash == nil || lowHash == nil {
locator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
} else {
locator, err = flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
// The chain has been modified, signal it by sending an empty locator
locator, err = externalapi.BlockLocator{}, nil
}
}
if err != nil {
log.Debugf("Received error from CreateHeadersSelectedChainBlockLocator: %s", err)
return protocolerrors.Errorf(true, "couldn't build a block "+
"locator between %s and %s", lowHash, highHash)
}
err = flow.sendIBDChainBlockLocator(locator)
if err != nil {
return err
}
}
}
func (flow *handleRequestIBDChainBlockLocatorFlow) receiveRequestIBDChainBlockLocator() (highHash, lowHash *externalapi.DomainHash, err error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgGetBlockLocator := message.(*appmessage.MsgRequestIBDChainBlockLocator)
return msgGetBlockLocator.HighHash, msgGetBlockLocator.LowHash, nil
}
func (flow *handleRequestIBDChainBlockLocatorFlow) sendIBDChainBlockLocator(locator externalapi.BlockLocator) error {
msgIBDChainBlockLocator := appmessage.NewMsgIBDChainBlockLocator(locator)
err := flow.outgoingRoute.Enqueue(msgIBDChainBlockLocator)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,160 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"sync/atomic"
)
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
type PruningPointAndItsAnticoneRequestsContext interface {
Domain() domain.Domain
Config() *config.Config
}
var isBusy uint32
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
// the pruning point and its anticone to the requesting peer.
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
err := func() error {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
}
defer atomic.StoreUint32(&isBusy, 0)
log.Debugf("Got request for pruning point and its anticone from %s", peer)
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
if err != nil {
return err
}
log.Criticalf("Pruning point anticone size is %d", len(pruningPointHeaders))
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
for i, header := range pruningPointHeaders {
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
}
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
if err != nil {
return err
}
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
if err != nil {
return err
}
windowSize := context.Config().NetParams().DifficultyAdjustmentWindowSize
daaWindowBlocks := make([]*externalapi.TrustedDataDataDAAHeader, 0, windowSize)
daaWindowHashesToIndex := make(map[externalapi.DomainHash]int, windowSize)
trustedDataDAABlockIndexes := make(map[externalapi.DomainHash][]uint64)
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, 0)
ghostdagDataHashToIndex := make(map[externalapi.DomainHash]int)
trustedDataGHOSTDAGDataIndexes := make(map[externalapi.DomainHash][]uint64)
for _, blockHash := range pointAndItsAnticone {
blockDAAWindowHashes, err := context.Domain().Consensus().BlockDAAWindowHashes(blockHash)
if err != nil {
return err
}
trustedDataDAABlockIndexes[*blockHash] = make([]uint64, 0, windowSize)
for i, daaBlockHash := range blockDAAWindowHashes {
index, exists := daaWindowHashesToIndex[*daaBlockHash]
if !exists {
trustedDataDataDAAHeader, err := context.Domain().Consensus().TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i))
if err != nil {
return err
}
daaWindowBlocks = append(daaWindowBlocks, trustedDataDataDAAHeader)
index = len(daaWindowBlocks) - 1
daaWindowHashesToIndex[*daaBlockHash] = index
}
trustedDataDAABlockIndexes[*blockHash] = append(trustedDataDAABlockIndexes[*blockHash], uint64(index))
}
ghostdagDataBlockHashes, err := context.Domain().Consensus().TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash)
if err != nil {
return err
}
trustedDataGHOSTDAGDataIndexes[*blockHash] = make([]uint64, 0, context.Config().NetParams().K)
for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes {
index, exists := ghostdagDataHashToIndex[*ghostdagDataBlockHash]
if !exists {
data, err := context.Domain().Consensus().TrustedGHOSTDAGData(ghostdagDataBlockHash)
if err != nil {
return err
}
ghostdagData = append(ghostdagData, &externalapi.BlockGHOSTDAGDataHashPair{
Hash: ghostdagDataBlockHash,
GHOSTDAGData: data,
})
index = len(ghostdagData) - 1
ghostdagDataHashToIndex[*ghostdagDataBlockHash] = index
}
trustedDataGHOSTDAGDataIndexes[*blockHash] = append(trustedDataGHOSTDAGDataIndexes[*blockHash], uint64(index))
}
}
err = outgoingRoute.Enqueue(appmessage.DomainTrustedDataToTrustedData(daaWindowBlocks, ghostdagData))
if err != nil {
return err
}
for i, blockHash := range pointAndItsAnticone {
block, err := context.Domain().Consensus().GetBlock(blockHash)
if err != nil {
return err
}
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
if err != nil {
return err
}
if (i+1)%ibdBatchSize == 0 {
// No timeout here, as we don't care if the syncee takes its time computing,
// since it only blocks this dedicated flow
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
if _, ok := message.(*appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks); !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks, message.Command())
}
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
if err != nil {
return err
}
log.Debugf("Sent pruning point and its anticone to %s", peer)
return nil
}()
if err != nil {
return err
}
}
}

View File

@@ -0,0 +1,40 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// PruningPointProofRequestsContext is the interface for the context needed for the HandlePruningPointProofRequests flow.
type PruningPointProofRequestsContext interface {
Domain() domain.Domain
}
// HandlePruningPointProofRequests listens to appmessage.MsgRequestPruningPointProof messages and sends
// the pruning point proof to the requesting peer.
func HandlePruningPointProofRequests(context PruningPointProofRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
log.Debugf("Got request for pruning point proof from %s", peer)
pruningPointProof, err := context.Domain().Consensus().BuildPruningPointProof()
if err != nil {
return err
}
pruningPointProofMessage := appmessage.DomainPruningPointProofToMsgPruningPointProof(pruningPointProof)
err = outgoingRoute.Enqueue(pruningPointProofMessage)
if err != nil {
return err
}
log.Debugf("Sent pruning point proof to %s", peer)
}
}

View File

@@ -0,0 +1,53 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// RelayBlockRequestsContext is the interface for the context needed for the HandleRelayBlockRequests flow.
type RelayBlockRequestsContext interface {
Domain() domain.Domain
}
// HandleRelayBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
// their corresponding blocks to the requesting peer.
func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
getRelayBlocksMessage := message.(*appmessage.MsgRequestRelayBlocks)
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
for _, hash := range getRelayBlocksMessage.Hashes {
// Fetch the block from the database.
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
if err != nil {
return err
}
log.Debugf("Relayed block with hash %s", hash)
}
}
}

View File

@@ -0,0 +1,416 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// orphanResolutionRange is the maximum amount of blockLocator hashes
// to search for known blocks. See isBlockInOrphanResolutionRange for
// further details
var orphanResolutionRange uint32 = 5
// RelayInvsContext is the interface for the context needed for the HandleRelayInvs flow.
type RelayInvsContext interface {
Domain() domain.Domain
Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
OnPruningPointUTXOSetOverride() error
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
Broadcast(message appmessage.Message) error
AddOrphan(orphanBlock *externalapi.DomainBlock)
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
IsOrphan(blockHash *externalapi.DomainHash) bool
IsIBDRunning() bool
IsRecoverableError(err error) bool
}
type handleRelayInvsFlow struct {
RelayInvsContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
invsQueue []*appmessage.MsgInvRelayBlock
}
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
// are missing, adds them to the DAG and propagates them to the rest of the network.
func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
flow := &handleRelayInvsFlow{
RelayInvsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
}
err := flow.start()
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
close(peer.IBDRequestChannel())
return err
}
func (flow *handleRelayInvsFlow) start() error {
for {
log.Debugf("Waiting for inv")
inv, err := flow.readInv()
if err != nil {
return err
}
log.Debugf("Got relay inv for block %s", inv.Hash)
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(inv.Hash)
if err != nil {
return err
}
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
if blockInfo.BlockStatus == externalapi.StatusInvalid {
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
inv.Hash)
}
log.Debugf("Block %s already exists. continuing...", inv.Hash)
continue
}
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if flow.IsOrphan(inv.Hash) {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent {
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", inv.Hash)
continue
}
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
err := flow.AddOrphanRootsToQueue(inv.Hash)
if err != nil {
return err
}
continue
}
// Block relay is disabled during IBD
if flow.IsIBDRunning() {
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
continue
}
log.Debugf("Requesting block %s", inv.Hash)
block, exists, err := flow.requestBlock(inv.Hash)
if err != nil {
return err
}
if exists {
log.Debugf("Aborting requesting block %s because it already exists", inv.Hash)
continue
}
err = flow.banIfBlockIsHeaderOnly(block)
if err != nil {
return err
}
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) {
log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block))
continue
}
log.Debugf("Processing block %s", inv.Hash)
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
if err != nil {
return err
}
missingParents, virtualChangeSet, err := flow.processBlock(block)
if err != nil {
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
log.Infof("Ignoring pruned block %s", inv.Hash)
continue
}
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Infof("Ignoring duplicate block %s", inv.Hash)
continue
}
return err
}
if len(missingParents) > 0 {
log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents)
err := flow.processOrphan(block)
if err != nil {
return err
}
continue
}
oldVirtualParents := hashset.New()
for _, parent := range oldVirtualInfo.ParentHashes {
oldVirtualParents.Add(parent)
}
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
if err != nil {
return err
}
for _, parent := range newVirtualInfo.ParentHashes {
if oldVirtualParents.Contains(parent) {
continue
}
block, err := flow.Domain().Consensus().GetBlock(parent)
if err != nil {
return err
}
blockHash := consensushashing.BlockHash(block)
log.Debugf("Relaying block %s", blockHash)
err = flow.relayBlock(block)
if err != nil {
return err
}
}
log.Infof("Accepted block %s via relay", inv.Hash)
err = flow.OnNewBlock(block, virtualChangeSet)
if err != nil {
return err
}
}
}
func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
if len(block.Transactions) == 0 {
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
consensushashing.BlockHash(block))
}
return nil
}
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
if len(flow.invsQueue) > 0 {
var inv *appmessage.MsgInvRelayBlock
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
return inv, nil
}
msg, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
if !ok {
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
"expecting an inv message", msg.Command())
}
return inv, nil
}
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash)
if exists {
return nil, true, nil
}
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
// clean from any pending blocks.
defer flow.SharedRequestedBlocks().Remove(requestHash)
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
if err != nil {
return nil, false, err
}
msgBlock, err := flow.readMsgBlock()
if err != nil {
return nil, false, err
}
block := appmessage.MsgBlockToDomainBlock(msgBlock)
blockHash := consensushashing.BlockHash(block)
if !blockHash.Equal(requestHash) {
return nil, false, protocolerrors.Errorf(true, "got unrequested block %s", blockHash)
}
return block, false, nil
}
// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive.
//
// Note: this function assumes msgChan can contain only appmessage.MsgInvRelayBlock and appmessage.MsgBlock messages.
func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock, err error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
switch message := message.(type) {
case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlock:
return message, nil
default:
return nil, errors.Errorf("unexpected message %s", message.Command())
}
}
}
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
blockHash := consensushashing.BlockHash(block)
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
}
missingParentsError := &ruleerrors.ErrMissingParents{}
if errors.As(err, missingParentsError) {
return missingParentsError.MissingParentHashes, nil, nil
}
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
}
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
}
return nil, virtualChangeSet, nil
}
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
blockHash := consensushashing.BlockHash(block)
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
}
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) error {
blockHash := consensushashing.BlockHash(block)
// Return if the block has been orphaned from elsewhere already
if flow.IsOrphan(blockHash) {
log.Debugf("Skipping orphan processing for block %s because it is already an orphan", blockHash)
return nil
}
// Add the block to the orphan set if it's within orphan resolution range
isBlockInOrphanResolutionRange, err := flow.isBlockInOrphanResolutionRange(blockHash)
if err != nil {
return err
}
if isBlockInOrphanResolutionRange {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if isGenesisVirtualSelectedParent {
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", blockHash)
return nil
}
}
log.Debugf("Block %s is within orphan resolution range. "+
"Adding it to the orphan set", blockHash)
flow.AddOrphan(block)
log.Debugf("Requesting block %s missing ancestors", blockHash)
return flow.AddOrphanRootsToQueue(blockHash)
}
// Start IBD unless we already are in IBD
log.Debugf("Block %s is out of orphan resolution range. "+
"Attempting to start IBD against it.", blockHash)
// Send the block to IBD flow via the IBDRequestChannel.
// Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it
select {
case flow.peer.IBDRequestChannel() <- block:
default:
}
return nil
}
func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
}
func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool {
parents := block.Header.DirectParents()
return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash)
}
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
// retrieved via the unorphaning mechanism or via IBD. This method sends a
// getBlockLocator request to the peer with a limit of orphanResolutionRange.
// In the response, if we know none of the hashes, we should retrieve the given
// blockHash via IBD. Otherwise, via unorphaning.
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
err := flow.sendGetBlockLocator(blockHash, orphanResolutionRange)
if err != nil {
return false, err
}
blockLocatorHashes, err := flow.receiveBlockLocator()
if err != nil {
return false, err
}
for _, blockLocatorHash := range blockLocatorHashes {
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockLocatorHash)
if err != nil {
return false, err
}
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
return true, nil
}
}
return false, nil
}
func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.DomainHash) error {
orphanRoots, orphanExists, err := flow.GetOrphanRoots(orphan)
if err != nil {
return err
}
if !orphanExists {
log.Infof("Orphan block %s was missing from the orphan pool while requesting for its roots. This "+
"probably happened because it was randomly evicted immediately after it was added.", orphan)
}
if len(orphanRoots) == 0 {
// In some rare cases we get here when there are no orphan roots already
return nil
}
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
for i, root := range orphanRoots {
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
invMessages[i] = appmessage.NewMsgInvBlock(root)
}
flow.invsQueue = append(invMessages, flow.invsQueue...)
return nil
}

View File

@@ -0,0 +1,95 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"sort"
)
// RequestAnticoneContext is the interface for the context needed for the HandleRequestHeaders flow.
type RequestAnticoneContext interface {
Domain() domain.Domain
Config() *config.Config
}
type handleRequestAnticoneFlow struct {
RequestAnticoneContext
incomingRoute, outgoingRoute *router.Route
peer *peer.Peer
}
// HandleRequestAnticone handles RequestAnticone messages
func HandleRequestAnticone(context RequestAnticoneContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peer.Peer) error {
flow := &handleRequestAnticoneFlow{
RequestAnticoneContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleRequestAnticoneFlow) start() error {
for {
blockHash, contextHash, err := receiveRequestAnticone(flow.incomingRoute)
if err != nil {
return err
}
log.Debugf("Received requestAnticone with blockHash: %s, contextHash: %s", blockHash, contextHash)
log.Debugf("Getting past(%s) cap anticone(%s) for peer %s", contextHash, blockHash, flow.peer)
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
// we relay blocks only if they enter virtual's mergeset. We add 2 for a small margin error.
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
flow.Config().ActiveNetParams.MergeSetSizeLimit+2)
if err != nil {
return protocolerrors.Wrap(true, err, "Failed querying anticone")
}
log.Debugf("Got %d header hashes in past(%s) cap anticone(%s)", len(blockHashes), contextHash, blockHash)
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
for i, blockHash := range blockHashes {
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
if err != nil {
return err
}
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
}
// We sort the headers in bottom-up topological order before sending
sort.Slice(blockHeaders, func(i, j int) bool {
return blockHeaders[i].BlueWork.Cmp(blockHeaders[j].BlueWork) < 0
})
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
if err != nil {
return err
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
return err
}
}
}
func receiveRequestAnticone(incomingRoute *router.Route) (blockHash *externalapi.DomainHash,
contextHash *externalapi.DomainHash, err error) {
message, err := incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgRequestAnticone := message.(*appmessage.MsgRequestAnticone)
return msgRequestAnticone.BlockHash, msgRequestAnticone.ContextHash, nil
}

View File

@@ -0,0 +1,75 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// RequestBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
type RequestBlockLocatorContext interface {
Domain() domain.Domain
}
type handleRequestBlockLocatorFlow struct {
RequestBlockLocatorContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestBlockLocator handles getBlockLocator messages
func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute *router.Route,
outgoingRoute *router.Route) error {
flow := &handleRequestBlockLocatorFlow{
RequestBlockLocatorContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestBlockLocatorFlow) start() error {
for {
highHash, limit, err := flow.receiveGetBlockLocator()
if err != nil {
return err
}
log.Debugf("Received getBlockLocator with highHash: %s, limit: %d", highHash, limit)
locator, err := flow.Domain().Consensus().CreateBlockLocatorFromPruningPoint(highHash, limit)
if err != nil || len(locator) == 0 {
if err != nil {
log.Debugf("Received error from CreateBlockLocatorFromPruningPoint: %s", err)
}
return protocolerrors.Errorf(true, "couldn't build a block "+
"locator between the pruning point and %s", highHash)
}
err = flow.sendBlockLocator(locator)
if err != nil {
return err
}
}
}
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (highHash *externalapi.DomainHash, limit uint32, err error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, 0, err
}
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
return msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
}
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {
msgBlockLocator := appmessage.NewMsgBlockLocator(locator)
err := flow.outgoingRoute.Enqueue(msgBlockLocator)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,116 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
const ibdBatchSize = 99
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
type RequestHeadersContext interface {
Domain() domain.Domain
}
type handleRequestHeadersFlow struct {
RequestHeadersContext
incomingRoute, outgoingRoute *router.Route
peer *peer.Peer
}
// HandleRequestHeaders handles RequestHeaders messages
func HandleRequestHeaders(context RequestHeadersContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peer.Peer) error {
flow := &handleRequestHeadersFlow{
RequestHeadersContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleRequestHeadersFlow) start() error {
for {
lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute)
if err != nil {
return err
}
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
isLowSelectedAncestorOfHigh, err := flow.Domain().Consensus().IsInSelectedParentChainOf(lowHash, highHash)
if err != nil {
return err
}
if !isLowSelectedAncestorOfHigh {
return protocolerrors.Errorf(true, "Expected %s to be on the selected chain of %s",
lowHash, highHash)
}
for !lowHash.Equal(highHash) {
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
// GetHashesBetween is a relatively heavy operation so we limit it
// in order to avoid locking the consensus for too long
// maxBlocks MUST be >= MergeSetSizeLimit + 1
const maxBlocks = 1 << 10
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
if err != nil {
return err
}
log.Debugf("Got %d header hashes above lowHash %s", len(blockHashes), lowHash)
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
for i, blockHash := range blockHashes {
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
if err != nil {
return err
}
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
}
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
if err != nil {
return err
}
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
if _, ok := message.(*appmessage.MsgRequestNextHeaders); !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextHeaders, message.Command())
}
// The next lowHash is the last element in blockHashes
lowHash = blockHashes[len(blockHashes)-1]
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
return err
}
}
}
func receiveRequestHeaders(incomingRoute *router.Route) (lowHash *externalapi.DomainHash,
highHash *externalapi.DomainHash, err error) {
message, err := incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgRequestIBDBlocks := message.(*appmessage.MsgRequestHeaders)
return msgRequestIBDBlocks.LowHash, msgRequestIBDBlocks.HighHash, nil
}

View File

@@ -0,0 +1,140 @@
package blockrelay
import (
"errors"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleRequestPruningPointUTXOSetContext is the interface for the context needed for the HandleRequestPruningPointUTXOSet flow.
type HandleRequestPruningPointUTXOSetContext interface {
Domain() domain.Domain
}
type handleRequestPruningPointUTXOSetFlow struct {
HandleRequestPruningPointUTXOSetContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestPruningPointUTXOSet listens to appmessage.MsgRequestPruningPointUTXOSet messages and sends
// the pruning point UTXO set and block body.
func HandleRequestPruningPointUTXOSet(context HandleRequestPruningPointUTXOSetContext, incomingRoute,
outgoingRoute *router.Route) error {
flow := &handleRequestPruningPointUTXOSetFlow{
HandleRequestPruningPointUTXOSetContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestPruningPointUTXOSetFlow) start() error {
for {
msgRequestPruningPointUTXOSet, err := flow.waitForRequestPruningPointUTXOSetMessages()
if err != nil {
return err
}
err = flow.handleRequestPruningPointUTXOSetMessage(msgRequestPruningPointUTXOSet)
if err != nil {
return err
}
}
}
func (flow *handleRequestPruningPointUTXOSetFlow) handleRequestPruningPointUTXOSetMessage(
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetFlow")
defer onEnd()
log.Debugf("Got request for pruning point UTXO set")
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSet)
}
func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXOSetMessages() (
*appmessage.MsgRequestPruningPointUTXOSet, error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
if !ok {
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
}
return msgRequestPruningPointUTXOSet, nil
}
func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
// Send the UTXO set in `step`-sized chunks
const step = 1000
var fromOutpoint *externalapi.DomainOutpoint
chunksSent := 0
for {
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
msgRequestPruningPointUTXOSet.PruningPointHash, fromOutpoint, step)
if err != nil {
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
}
}
log.Debugf("Retrieved %d UTXOs for pruning block %s",
len(pruningPointUTXOs), msgRequestPruningPointUTXOSet.PruningPointHash)
outpointAndUTXOEntryPairs :=
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
if err != nil {
return err
}
finished := len(pruningPointUTXOs) < step
if finished && chunksSent%ibdBatchSize != 0 {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
if len(pruningPointUTXOs) > 0 {
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
}
chunksSent++
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
if chunksSent%ibdBatchSize == 0 {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
if !ok {
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
}
if finished {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
}
}
}

View File

@@ -0,0 +1,723 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
"time"
)
// IBDContext is the interface for the context needed for the HandleIBD flow.
type IBDContext interface {
Domain() domain.Domain
Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
OnPruningPointUTXOSetOverride() error
IsIBDRunning() bool
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
UnsetIBDRunning()
IsRecoverableError(err error) bool
}
type handleIBDFlow struct {
IBDContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// HandleIBD handles IBD
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
flow := &handleIBDFlow{
IBDContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleIBDFlow) start() error {
for {
// Wait for IBD requests triggered by other flows
block, ok := <-flow.peer.IBDRequestChannel()
if !ok {
return nil
}
err := flow.runIBDIfNotRunning(block)
if err != nil {
return err
}
}
}
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
if !wasIBDNotRunning {
log.Debugf("IBD is already running")
return nil
}
isFinishedSuccessfully := false
defer func() {
flow.UnsetIBDRunning()
flow.logIBDFinished(isFinishedSuccessfully)
}()
relayBlockHash := consensushashing.BlockHash(block)
log.Debugf("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
log.Debugf("Syncing blocks up to %s", relayBlockHash)
log.Debugf("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
if err != nil {
return err
}
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(
block, highestKnownSyncerChainHash)
if err != nil {
return err
}
if !shouldSync {
return nil
}
if shouldDownloadHeadersProof {
log.Infof("Starting IBD with headers proof")
err := flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
if err != nil {
return err
}
} else {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if isGenesisVirtualSelectedParent {
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", relayBlockHash)
return nil
}
}
err = flow.syncPruningPointFutureHeaders(
flow.Domain().Consensus(),
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash, block.Header.DAAScore())
if err != nil {
return err
}
}
// We start by syncing missing bodies over the syncer selected chain
err = flow.syncMissingBlockBodies(syncerHeaderSelectedTipHash)
if err != nil {
return err
}
relayBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(relayBlockHash)
if err != nil {
return err
}
// Relay block might be in the anticone of syncer selected tip, thus
// check his chain for missing bodies as well.
// Note: this operation can be slightly optimized to avoid the full chain search since relay block
// is in syncer virtual mergeset which has bounded size.
if relayBlockInfo.BlockStatus == externalapi.StatusHeaderOnly {
err = flow.syncMissingBlockBodies(relayBlockHash)
if err != nil {
return err
}
}
log.Debugf("Finished syncing blocks up to %s", relayBlockHash)
isFinishedSuccessfully = true
return nil
}
func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) {
/*
Algorithm:
Request full selected chain block locator from syncer
Find the highest block which we know
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
*/
// Empty hashes indicate that the full chain is queried
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout)
if err != nil {
return nil, nil, err
}
if len(locatorHashes) == 0 {
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
"to contain at least one element")
}
log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer,
len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
syncerHeaderSelectedTipHash := locatorHashes[0]
var highestKnownSyncerChainHash *externalapi.DomainHash
chainNegotiationRestartCounter := 0
chainNegotiationZoomCounts := 0
initialLocatorLen := len(locatorHashes)
for {
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
for _, syncerChainHash := range locatorHashes {
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
if err != nil {
return nil, nil, err
}
if info.Exists {
currentHighestKnownSyncerChainHash = syncerChainHash
break
}
lowestUnknownSyncerChainHash = syncerChainHash
}
// No unknown blocks, break. Note this can only happen in the first iteration
if lowestUnknownSyncerChainHash == nil {
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
break
}
// No shared block, break
if currentHighestKnownSyncerChainHash == nil {
highestKnownSyncerChainHash = nil
break
}
// No point in zooming further
if len(locatorHashes) == 1 {
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
break
}
// Zoom in
locatorHashes, err = flow.getSyncerChainBlockLocator(
lowestUnknownSyncerChainHash,
currentHighestKnownSyncerChainHash, time.Second*10)
if err != nil {
return nil, nil, err
}
if len(locatorHashes) > 0 {
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
!locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) {
return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+
"hashes to match the locator bounds")
}
chainNegotiationZoomCounts++
log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer,
chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
if len(locatorHashes) == 2 {
// We found our search target
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
break
}
if chainNegotiationZoomCounts > initialLocatorLen*2 {
// Since the zoom-in always queries two consecutive entries in the previous locator, it is
// expected to decrease in size at least every two iterations
return nil, nil, protocolerrors.Errorf(true,
"IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d",
chainNegotiationZoomCounts, initialLocatorLen)
}
} else { // Empty locator signals a restart due to chain changes
chainNegotiationZoomCounts = 0
chainNegotiationRestartCounter++
if chainNegotiationRestartCounter > 32 {
return nil, nil, protocolerrors.Errorf(false,
"IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
}
log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter)
// An empty locator signals that the syncer chain was modified and no longer contains one of
// the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10)
if err != nil {
return nil, nil, err
}
if len(locatorHashes) == 0 {
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
"to contain at least one element")
}
log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer,
chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
initialLocatorLen = len(locatorHashes)
// Reset syncer's header selected tip
syncerHeaderSelectedTipHash = locatorHashes[0]
}
}
log.Debugf("Found highest known syncer chain block %s from peer %s",
highestKnownSyncerChainHash, flow.peer)
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
}
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
}
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
successString := "successfully"
if !isFinishedSuccessfully {
successString = "(interrupted)"
}
log.Infof("IBD with peer %s finished %s", flow.peer, successString)
}
func (flow *handleIBDFlow) getSyncerChainBlockLocator(
highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) {
requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash)
err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage)
if err != nil {
return nil, err
}
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
if err != nil {
return nil, err
}
switch message := message.(type) {
case *appmessage.MsgIBDChainBlockLocator:
if len(message.BlockLocatorHashes) > 64 {
return nil, protocolerrors.Errorf(true,
"Got block locator of size %d>64 while expecting locator to have size "+
"which is logarithmic in DAG size (which should never exceed 2^64)",
len(message.BlockLocatorHashes))
}
return message.BlockLocatorHashes, nil
default:
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDChainBlockLocator, message.Command())
}
}
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus,
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash *externalapi.DomainHash,
highBlockDAAScoreHint uint64) error {
log.Infof("Downloading headers from %s", flow.peer)
if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) {
// No need to get syncer selected tip headers, so sync relay past and return
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
}
err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
if err != nil {
return err
}
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestKnownSyncerChainHash)
if err != nil {
return err
}
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScoreHint, "block headers")
// Keep a short queue of BlockHeadersMessages so that there's
// never a moment when the node is not validating and inserting
// headers
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
errChan := make(chan error)
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
for {
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
if err != nil {
errChan <- err
return
}
if doneIBD {
close(blockHeadersMessageChan)
return
}
if len(blockHeadersMessage.BlockHeaders) == 0 {
// The syncer should have sent a done message if the search completed, and not an empty list
errChan <- protocolerrors.Errorf(true, "Received an empty headers message from peer %s", flow.peer)
return
}
blockHeadersMessageChan <- blockHeadersMessage
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
if err != nil {
errChan <- err
return
}
}
})
for {
select {
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
if !ok {
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
}
for _, header := range ibdBlocksMessage.BlockHeaders {
err = flow.processHeader(consensus, header)
if err != nil {
return err
}
}
lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1]
progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore)
case err := <-errChan:
return err
}
}
}
func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error {
// Finished downloading syncer selected tip blocks,
// check if we already have the triggering relayBlockHash
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
if err != nil {
return err
}
if !relayBlockInfo.Exists {
// Send a special header request for the selected tip anticone. This is expected to
// be a small set, as it is bounded to the size of virtual's mergeset.
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
if err != nil {
return err
}
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
if err != nil {
return err
}
if anticoneDone {
return protocolerrors.Errorf(true,
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
relayBlockHash, syncerHeaderSelectedTipHash)
}
_, anticoneDone, err = flow.receiveHeaders()
if err != nil {
return err
}
if !anticoneDone {
return protocolerrors.Errorf(true,
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
relayBlockHash, syncerHeaderSelectedTipHash)
}
for _, header := range anticoneHeadersMessage.BlockHeaders {
err = flow.processHeader(consensus, header)
if err != nil {
return err
}
}
}
// If the relayBlockHash has still not been received, the peer is misbehaving
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
if err != nil {
return err
}
if !relayBlockInfo.Exists {
return protocolerrors.Errorf(true, "did not receive "+
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
}
return nil
}
func (flow *handleIBDFlow) sendRequestAnticone(
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error {
msgRequestAnticone := appmessage.NewMsgRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
return flow.outgoingRoute.Enqueue(msgRequestAnticone)
}
func (flow *handleIBDFlow) sendRequestHeaders(
highestKnownSyncerChainHash, syncerHeaderSelectedTipHash *externalapi.DomainHash) error {
msgRequestHeaders := appmessage.NewMsgRequstHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
return flow.outgoingRoute.Enqueue(msgRequestHeaders)
}
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch message := message.(type) {
case *appmessage.BlockHeadersMessage:
return message, false, nil
case *appmessage.MsgDoneHeaders:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
appmessage.CmdBlockHeaders,
appmessage.CmdDoneHeaders,
message.Command())
}
}
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
block := &externalapi.DomainBlock{
Header: header,
Transactions: nil,
}
blockHash := consensushashing.BlockHash(block)
blockInfo, err := consensus.GetBlockInfo(blockHash)
if err != nil {
return err
}
if blockInfo.Exists {
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
return nil
}
_, err = consensus.ValidateAndInsertBlock(block, false)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
}
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
} else {
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
}
}
return nil
}
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
if err != nil {
return err
}
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
if err != nil {
return err
}
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
if err != nil {
return err
}
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
if err != nil {
return err
}
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
"tip is smaller than the current selected tip")
}
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
return protocolerrors.Errorf(false, "difference between the timestamps of "+
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
}
return nil
}
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
defer onEnd()
receivedChunkCount := 0
receivedUTXOCount := 0
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return false, err
}
switch message := message.(type) {
case *appmessage.MsgPruningPointUTXOSetChunk:
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
domainOutpointAndUTXOEntryPairs :=
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
if err != nil {
return false, err
}
receivedChunkCount++
if receivedChunkCount%ibdBatchSize == 0 {
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
receivedChunkCount, receivedUTXOCount)
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
if err != nil {
return false, err
}
}
case *appmessage.MsgDonePruningPointUTXOSetChunks:
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
return true, nil
case *appmessage.MsgUnexpectedPruningPoint:
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
return false, nil
default:
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
)
}
}
}
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
if err != nil {
return err
}
if len(hashes) == 0 {
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
// In these cases - GetMissingBlockBodyHashes would return an empty array.
log.Debugf("No missing block body hashes found.")
return nil
}
lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0])
if err != nil {
return err
}
highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1])
if err != nil {
return err
}
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
highestProcessedDAAScore := lowBlockHeader.DAAScore()
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
var hashesToRequest []*externalapi.DomainHash
if offset+ibdBatchSize < len(hashes) {
hashesToRequest = hashes[offset : offset+ibdBatchSize]
} else {
hashesToRequest = hashes[offset:]
}
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
if err != nil {
return err
}
for _, expectedHash := range hashesToRequest {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
}
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
blockHash := consensushashing.BlockHash(block)
if !expectedHash.Equal(blockHash) {
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
}
err = flow.banIfBlockIsHeaderOnly(block)
if err != nil {
return err
}
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
if err != nil {
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
continue
}
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
}
err = flow.OnNewBlock(block, virtualChangeSet)
if err != nil {
return err
}
highestProcessedDAAScore = block.Header.DAAScore()
}
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
}
return flow.resolveVirtual(highestProcessedDAAScore)
}
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
if len(block.Transactions) == 0 {
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
consensushashing.BlockHash(block))
}
return nil
}
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
for i := 0; ; i++ {
if i%10 == 0 {
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
var percents int
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
percents = 100
} else {
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
}
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
}
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
if err != nil {
return err
}
err = flow.OnVirtualChange(virtualChangeSet)
if err != nil {
return err
}
if isCompletelyResolved {
log.Infof("Resolved virtual")
return nil
}
}
}

View File

@@ -0,0 +1,45 @@
package blockrelay
type ibdProgressReporter struct {
lowDAAScore uint64
highDAAScore uint64
objectName string
totalDAAScoreDifference uint64
lastReportedProgressPercent int
processed int
}
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
if highDAAScore <= lowDAAScore {
// Avoid a zero or negative diff
highDAAScore = lowDAAScore + 1
}
return &ibdProgressReporter{
lowDAAScore: lowDAAScore,
highDAAScore: highDAAScore,
objectName: objectName,
totalDAAScoreDifference: highDAAScore - lowDAAScore,
lastReportedProgressPercent: 0,
processed: 0,
}
}
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
ipr.processed += processedDelta
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
if highestProcessedDAAScore > ipr.highDAAScore {
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
}
relativeDAAScore := uint64(0)
if highestProcessedDAAScore > ipr.lowDAAScore {
// Avoid a negative diff
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
}
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
if progressPercent > ipr.lastReportedProgressPercent {
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
ipr.lastReportedProgressPercent = progressPercent
}
}

View File

@@ -0,0 +1,428 @@
package blockrelay
import (
"fmt"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/pkg/errors"
"time"
)
func (flow *handleIBDFlow) ibdWithHeadersProof(
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
err := flow.Domain().InitStagingConsensus()
if err != nil {
return err
}
err = flow.downloadHeadersAndPruningUTXOSet(syncerHeaderSelectedTipHash, relayBlockHash, highBlockDAAScore)
if err != nil {
if !flow.IsRecoverableError(err) {
return err
}
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus.", flow.peer)
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
if deleteStagingConsensusErr != nil {
return deleteStagingConsensusErr
}
return err
}
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
err = flow.Domain().CommitStagingConsensus()
if err != nil {
return err
}
err = flow.OnPruningPointUTXOSetOverride()
if err != nil {
return err
}
return nil
}
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
relayBlock *externalapi.DomainBlock,
highestKnownSyncerChainHash *externalapi.DomainHash) (shouldDownload, shouldSync bool, err error) {
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
if highestKnownSyncerChainHash != nil {
highestSharedBlockFound = true
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return false, false, err
}
isPruningPointInSharedBlockChain, err = flow.Domain().Consensus().IsInSelectedParentChainOf(
pruningPoint, highestKnownSyncerChainHash)
if err != nil {
return false, false, err
}
}
// Note: in the case where `highestSharedBlockFound == true && isPruningPointInSharedBlockChain == false`
// we might have here info which is relevant to finality conflict decisions. This should be taken into
// account when we improve this aspect.
if !highestSharedBlockFound || !isPruningPointInSharedBlockChain {
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock)
if err != nil {
return false, false, err
}
if hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore {
return true, true, nil
}
return false, false, nil
}
return false, true, nil
}
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
if err != nil {
return false, err
}
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
if err != nil {
return false, err
}
if relayBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
return false, nil
}
return relayBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
}
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
log.Infof("Downloading the pruning point proof from %s", flow.peer)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointProof())
if err != nil {
return nil, err
}
message, err := flow.incomingRoute.DequeueWithTimeout(10 * time.Minute)
if err != nil {
return nil, err
}
pruningPointProofMessage, ok := message.(*appmessage.MsgPruningPointProof)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdPruningPointProof, message.Command())
}
pruningPointProof := appmessage.MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage)
err = flow.Domain().Consensus().ValidatePruningPointProof(pruningPointProof)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
return nil, protocolerrors.Wrapf(true, err, "pruning point proof validation failed")
}
return nil, err
}
err = flow.Domain().StagingConsensus().ApplyPruningPointProof(pruningPointProof)
if err != nil {
return nil, err
}
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
}
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash,
highBlockDAAScore uint64) error {
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
if err != nil {
return err
}
err = flow.syncPruningPointsAndPruningPointAnticone(proofPruningPoint)
if err != nil {
return err
}
// TODO: Remove this condition once there's more proper way to check finality violation
// in the headers proof.
if proofPruningPoint.Equal(flow.Config().NetParams().GenesisHash) {
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
}
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(),
syncerHeaderSelectedTipHash, proofPruningPoint, relayBlockHash, highBlockDAAScore)
if err != nil {
return err
}
log.Infof("Headers downloaded from peer %s", flow.peer)
relayBlockInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(relayBlockHash)
if err != nil {
return err
}
if !relayBlockInfo.Exists {
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
}
err = flow.validatePruningPointFutureHeaderTimestamps()
if err != nil {
return err
}
log.Debugf("Syncing the current pruning point UTXO set")
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet(flow.Domain().StagingConsensus(), proofPruningPoint)
if err != nil {
return err
}
if !syncedPruningPointUTXOSetSuccessfully {
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
return nil
}
log.Debugf("Finished syncing the current pruning point UTXO set")
return nil
}
func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error {
log.Infof("Downloading the past pruning points and the pruning point anticone from %s", flow.peer)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone())
if err != nil {
return err
}
err = flow.validateAndInsertPruningPoints(proofPruningPoint)
if err != nil {
return err
}
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
msgTrustedData, ok := message.(*appmessage.MsgTrustedData)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdTrustedData, message.Command())
}
pruningPointWithMetaData, done, err := flow.receiveBlockWithTrustedData()
if err != nil {
return err
}
if done {
return protocolerrors.Errorf(true, "got `done` message before receiving the pruning point")
}
if !pruningPointWithMetaData.Block.Header.BlockHash().Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "first block with trusted data is not the pruning point")
}
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData, msgTrustedData)
if err != nil {
return err
}
i := 0
for ; ; i++ {
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
if err != nil {
return err
}
if done {
break
}
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData, msgTrustedData)
if err != nil {
return err
}
// We're using i+2 because we want to check if the next block will belong to the next batch, but we already downloaded
// the pruning point outside the loop so we use i+2 instead of i+1.
if (i+2)%ibdBatchSize == 0 {
log.Infof("Downloaded %d blocks from the pruning point anticone", i+1)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextPruningPointAndItsAnticoneBlocks())
if err != nil {
return err
}
}
}
log.Infof("Finished downloading pruning point and its anticone from %s. Total blocks downloaded: %d", flow.peer, i+1)
return nil
}
func (flow *handleIBDFlow) processBlockWithTrustedData(
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedDataV4, data *appmessage.MsgTrustedData) error {
blockWithTrustedData := &externalapi.BlockWithTrustedData{
Block: appmessage.MsgBlockToDomainBlock(block.Block),
DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(block.DAAWindowIndices)),
GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(block.GHOSTDAGDataIndices)),
}
for _, index := range block.DAAWindowIndices {
blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, appmessage.TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(data.DAAWindow[index]))
}
for _, index := range block.GHOSTDAGDataIndices {
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
}
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
return err
}
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch downCastedMessage := message.(type) {
case *appmessage.MsgBlockWithTrustedDataV4:
return downCastedMessage, false, nil
case *appmessage.MsgDoneBlocksWithTrustedData:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
(&appmessage.MsgBlockWithTrustedData{}).Command(),
(&appmessage.MsgDoneBlocksWithTrustedData{}).Command(),
downCastedMessage.Command())
}
}
func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgPruningPoints, ok := message.(*appmessage.MsgPruningPoints)
if !ok {
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdPruningPoints, message.Command())
}
return msgPruningPoints, nil
}
func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error {
currentPruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return err
}
if currentPruningPoint.Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "the proposed pruning point is the same as the current pruning point")
}
pruningPoints, err := flow.receivePruningPoints()
if err != nil {
return err
}
headers := make([]externalapi.BlockHeader, len(pruningPoints.Headers))
for i, header := range pruningPoints.Headers {
headers[i] = appmessage.BlockHeaderToDomainBlockHeader(header)
}
arePruningPointsViolatingFinality, err := flow.Domain().Consensus().ArePruningPointsViolatingFinality(headers)
if err != nil {
return err
}
if arePruningPointsViolatingFinality {
// TODO: Find a better way to deal with finality conflicts.
return protocolerrors.Errorf(false, "pruning points are violating finality")
}
lastPruningPoint := consensushashing.HeaderHash(headers[len(headers)-1])
if !lastPruningPoint.Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "the proof pruning point is not equal to the last pruning "+
"point in the list")
}
err = flow.Domain().StagingConsensus().ImportPruningPoints(headers)
if err != nil {
return err
}
return nil
}
func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus,
pruningPoint *externalapi.DomainHash) (bool, error) {
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint)
isValid, err := flow.Domain().StagingConsensus().IsValidPruningPoint(pruningPoint)
if err != nil {
return false, err
}
if !isValid {
return false, protocolerrors.Errorf(true, "invalid pruning point %s", pruningPoint)
}
log.Info("Fetching the pruning point UTXO set")
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
if err != nil {
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
return false, err
}
if !isSuccessful {
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
return false, nil
}
log.Info("Fetched the new pruning point UTXO set")
return true, nil
}
func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
defer func() {
err := flow.Domain().StagingConsensus().ClearImportedPruningPointData()
if err != nil {
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
}
}()
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSet(pruningPointHash))
if err != nil {
return false, err
}
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(consensus, pruningPointHash)
if err != nil {
return false, err
}
if !receivedAll {
return false, nil
}
err = flow.Domain().StagingConsensus().ValidateAndInsertImportedPruningPoint(pruningPointHash)
if err != nil {
// TODO: Find a better way to deal with finality conflicts.
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
return false, nil
}
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
}
return true, nil
}

View File

@@ -0,0 +1,9 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("PROT")
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,35 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// SendVirtualSelectedParentInvContext is the interface for the context needed for the SendVirtualSelectedParentInv flow.
type SendVirtualSelectedParentInvContext interface {
Domain() domain.Domain
Config() *config.Config
}
// SendVirtualSelectedParentInv sends a peer the selected parent hash of the virtual
func SendVirtualSelectedParentInv(context SendVirtualSelectedParentInvContext,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
virtualSelectedParent, err := context.Domain().Consensus().GetVirtualSelectedParent()
if err != nil {
return err
}
if virtualSelectedParent.Equal(context.Config().NetParams().GenesisHash) {
log.Debugf("Skipping sending the virtual selected parent hash to peer %s because it's the genesis", peer)
return nil
}
log.Debugf("Sending virtual selected parent hash %s to peer %s", virtualSelectedParent, peer)
virtualSelectedParentInv := appmessage.NewMsgInvBlock(virtualSelectedParent)
return outgoingRoute.Enqueue(virtualSelectedParentInv)
}

View File

@@ -0,0 +1,42 @@
package ping
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// ReceivePingsContext is the interface for the context needed for the ReceivePings flow.
type ReceivePingsContext interface {
}
type receivePingsFlow struct {
ReceivePingsContext
incomingRoute, outgoingRoute *router.Route
}
// ReceivePings handles all ping messages coming through incomingRoute.
// This function assumes that incomingRoute will only return MsgPing.
func ReceivePings(context ReceivePingsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &receivePingsFlow{
ReceivePingsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *receivePingsFlow) start() error {
for {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
pingMessage := message.(*appmessage.MsgPing)
pongMessage := appmessage.NewMsgPong(pingMessage.Nonce)
err = flow.outgoingRoute.Enqueue(pongMessage)
if err != nil {
return err
}
}
}

View File

@@ -0,0 +1,77 @@
package ping
import (
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/pkg/errors"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/random"
)
// SendPingsContext is the interface for the context needed for the SendPings flow.
type SendPingsContext interface {
ShutdownChan() <-chan struct{}
}
type sendPingsFlow struct {
SendPingsContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// SendPings starts sending MsgPings every pingInterval seconds to the
// given peer.
// This function assumes that incomingRoute will only return MsgPong.
func SendPings(context SendPingsContext, incomingRoute *router.Route, outgoingRoute *router.Route, peer *peerpkg.Peer) error {
flow := &sendPingsFlow{
SendPingsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *sendPingsFlow) start() error {
const pingInterval = 2 * time.Minute
ticker := time.NewTicker(pingInterval)
defer ticker.Stop()
for {
select {
case <-flow.ShutdownChan():
return nil
case <-ticker.C:
}
nonce, err := random.Uint64()
if err != nil {
return err
}
flow.peer.SetPingPending(nonce)
pingMessage := appmessage.NewMsgPing(nonce)
err = flow.outgoingRoute.Enqueue(pingMessage)
if err != nil {
return err
}
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
if errors.Is(err, router.ErrTimeout) {
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
}
return err
}
pongMessage := message.(*appmessage.MsgPong)
if pongMessage.Nonce != pingMessage.Nonce {
return protocolerrors.New(true, "nonce mismatch between ping and pong")
}
flow.peer.SetPingIdle()
}
}

View File

@@ -0,0 +1,209 @@
package v5
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/ping"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/rejects"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type protocolManager interface {
RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
messageTypes []appmessage.MessageCommand, isStopping *uint32,
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
Context() *flowcontext.FlowContext
}
// Register is used in order to register all the protocol flows to the given router.
func Register(m protocolManager, router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*common.Flow) {
flows = registerAddressFlows(m, router, isStopping, errChan)
flows = append(flows, registerBlockRelayFlows(m, router, isStopping, errChan)...)
flows = append(flows, registerPingFlows(m, router, isStopping, errChan)...)
flows = append(flows, registerTransactionRelayFlow(m, router, isStopping, errChan)...)
flows = append(flows, registerRejectsFlow(m, router, isStopping, errChan)...)
return flows
}
func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return addressexchange.ReceiveAddresses(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.SendVirtualSelectedParentInv(m.Context(), outgoingRoute, peer)
}),
m.RegisterFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator,
},
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRelayInvs(m.Context(), incomingRoute,
outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBD", router, []appmessage.MessageCommand{
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
appmessage.CmdBlockHeaders, appmessage.CmdIBDBlockLocatorHighestHash, appmessage.CmdBlockWithTrustedDataV4,
appmessage.CmdDoneBlocksWithTrustedData, appmessage.CmdIBDBlockLocatorHighestHashNotFound,
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
appmessage.CmdPruningPointProof,
appmessage.CmdTrustedData,
appmessage.CmdIBDChainBlockLocator,
},
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBD(m.Context(), incomingRoute,
outgoingRoute, peer)
},
),
m.RegisterFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRelayBlockRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleRequestBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestBlockLocator(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestHeaders", router,
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestHeaders(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBDBlockRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBDBlockRequests(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestPruningPointUTXOSet", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSet,
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestPruningPointUTXOSet(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone, appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBDBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBDBlockLocator(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleRequestIBDChainBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdRequestIBDChainBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestIBDChainBlockLocator(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestAnticone", router,
[]appmessage.MessageCommand{appmessage.CmdRequestAnticone}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestAnticone(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandlePruningPointProofRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandlePruningPointProofRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerPingFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return ping.ReceivePings(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return ping.SendPings(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerTransactionRelayFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return transactionrelay.HandleRelayedTransactions(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestTransactions", router,
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return transactionrelay.HandleRequestedTransactions(m.Context(), incomingRoute, outgoingRoute)
},
),
}
}
func registerRejectsFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("HandleRejects", router,
[]appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return rejects.HandleRejects(m.Context(), incomingRoute, outgoingRoute)
},
),
}
}

View File

@@ -0,0 +1,37 @@
package rejects
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleRejectsContext is the interface for the context needed for the HandleRejects flow.
type HandleRejectsContext interface {
}
type handleRejectsFlow struct {
HandleRejectsContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRejects handles all reject messages coming through incomingRoute.
// This function assumes that incomingRoute will only return MsgReject.
func HandleRejects(context HandleRejectsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRejectsFlow{
HandleRejectsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRejectsFlow) start() error {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
rejectMessage := message.(*appmessage.MsgReject)
return protocolerrors.Errorf(false, "got reject message: `%s`", rejectMessage.Reason)
}

View File

@@ -0,0 +1,24 @@
package testing
import (
"strings"
"testing"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/pkg/errors"
)
func checkFlowError(t *testing.T, err error, isProtocolError bool, shouldBan bool, contains string) {
pErr := protocolerrors.ProtocolError{}
if errors.As(err, &pErr) != isProtocolError {
t.Fatalf("Unexepcted error %+v", err)
}
if pErr.ShouldBan != shouldBan {
t.Fatalf("Exepcted shouldBan %t but got %t", shouldBan, pErr.ShouldBan)
}
if !strings.Contains(err.Error(), contains) {
t.Fatalf("Unexpected error. Expected error to contain '%s' but got: %+v", contains, err)
}
}

View File

@@ -0,0 +1,51 @@
package testing
import (
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
"testing"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type fakeReceiveAddressesContext struct{}
func (f fakeReceiveAddressesContext) AddressManager() *addressmanager.AddressManager {
return nil
}
func TestReceiveAddressesErrors(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
incomingRoute := router.NewRoute("incoming")
outgoingRoute := router.NewRoute("outgoing")
peer := peerpkg.New(nil)
errChan := make(chan error)
go func() {
errChan <- addressexchange.ReceiveAddresses(fakeReceiveAddressesContext{}, incomingRoute, outgoingRoute, peer)
}()
_, err := outgoingRoute.DequeueWithTimeout(time.Second)
if err != nil {
t.Fatalf("DequeueWithTimeout: %+v", err)
}
// Sending addressmanager.GetAddressesMax+1 addresses should trigger a ban
err = incomingRoute.Enqueue(appmessage.NewMsgAddresses(make([]*appmessage.NetAddress,
addressmanager.GetAddressesMax+1)))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
}
select {
case err := <-errChan:
checkFlowError(t, err, true, true, "address count exceeded")
case <-time.After(time.Second):
t.Fatalf("timed out after %s", time.Second)
}
})
}

View File

@@ -0,0 +1,4 @@
package testing
// Because of a bug in Go coverage fails if you have packages with test files only. See https://github.com/golang/go/issues/27333
// So this is a dummy non-test go file in the package.

View File

@@ -0,0 +1,209 @@
package transactionrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// TransactionsRelayContext is the interface for the context needed for the
// HandleRelayedTransactions and HandleRequestedTransactions flows.
type TransactionsRelayContext interface {
NetAdapter() *netadapter.NetAdapter
Domain() domain.Domain
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
OnTransactionAddedToMempool()
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
IsIBDRunning() bool
}
type handleRelayedTransactionsFlow struct {
TransactionsRelayContext
incomingRoute, outgoingRoute *router.Route
invsQueue []*appmessage.MsgInvTransaction
}
// HandleRelayedTransactions listens to appmessage.MsgInvTransaction messages, requests their corresponding transactions if they
// are missing, adds them to the mempool and propagates them to the rest of the network.
func HandleRelayedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRelayedTransactionsFlow{
TransactionsRelayContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
invsQueue: make([]*appmessage.MsgInvTransaction, 0),
}
return flow.start()
}
func (flow *handleRelayedTransactionsFlow) start() error {
for {
inv, err := flow.readInv()
if err != nil {
return err
}
if flow.IsIBDRunning() {
continue
}
requestedIDs, err := flow.requestInvTransactions(inv)
if err != nil {
return err
}
err = flow.receiveTransactions(requestedIDs)
if err != nil {
return err
}
}
}
func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
inv *appmessage.MsgInvTransaction) (requestedIDs []*externalapi.DomainTransactionID, err error) {
idsToRequest := make([]*externalapi.DomainTransactionID, 0, len(inv.TxIDs))
for _, txID := range inv.TxIDs {
if flow.isKnownTransaction(txID) {
continue
}
exists := flow.SharedRequestedTransactions().AddIfNotExists(txID)
if exists {
continue
}
idsToRequest = append(idsToRequest, txID)
}
if len(idsToRequest) == 0 {
return idsToRequest, nil
}
msgGetTransactions := appmessage.NewMsgRequestTransactions(idsToRequest)
err = flow.outgoingRoute.Enqueue(msgGetTransactions)
if err != nil {
flow.SharedRequestedTransactions().RemoveMany(idsToRequest)
return nil, err
}
return idsToRequest, nil
}
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
// Ask the transaction memory pool if the transaction is known
// to it in any form (main pool or orphan).
if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
return true
}
return false
}
func (flow *handleRelayedTransactionsFlow) readInv() (*appmessage.MsgInvTransaction, error) {
if len(flow.invsQueue) > 0 {
var inv *appmessage.MsgInvTransaction
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
return inv, nil
}
msg, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
inv, ok := msg.(*appmessage.MsgInvTransaction)
if !ok {
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay flow while "+
"expecting an inv message", msg.Command())
}
return inv, nil
}
func (flow *handleRelayedTransactionsFlow) broadcastAcceptedTransactions(acceptedTxIDs []*externalapi.DomainTransactionID) error {
return flow.EnqueueTransactionIDsForPropagation(acceptedTxIDs)
}
// readMsgTxOrNotFound returns the next msgTx or msgTransactionNotFound in incomingRoute,
// returning only one of the message types at a time.
//
// and populates invsQueue with any inv messages that meanwhile arrive.
func (flow *handleRelayedTransactionsFlow) readMsgTxOrNotFound() (
msgTx *appmessage.MsgTx, msgNotFound *appmessage.MsgTransactionNotFound, err error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, nil, err
}
switch message := message.(type) {
case *appmessage.MsgInvTransaction:
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgTx:
return message, nil, nil
case *appmessage.MsgTransactionNotFound:
return nil, message, nil
default:
return nil, nil, errors.Errorf("unexpected message %s", message.Command())
}
}
}
func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransactions []*externalapi.DomainTransactionID) error {
// In case the function returns earlier than expected, we want to make sure sharedRequestedTransactions is
// clean from any pending transactions.
defer flow.SharedRequestedTransactions().RemoveMany(requestedTransactions)
for _, expectedID := range requestedTransactions {
msgTx, msgTxNotFound, err := flow.readMsgTxOrNotFound()
if err != nil {
return err
}
if msgTxNotFound != nil {
if !msgTxNotFound.ID.Equal(expectedID) {
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
expectedID, msgTxNotFound.ID)
}
continue
}
tx := appmessage.MsgTxToDomainTransaction(msgTx)
txID := consensushashing.TransactionID(tx)
if !txID.Equal(expectedID) {
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
expectedID, txID)
}
acceptedTransactions, err :=
flow.Domain().MiningManager().ValidateAndInsertTransaction(tx, false, true)
if err != nil {
ruleErr := &mempool.RuleError{}
if !errors.As(err, ruleErr) {
return errors.Wrapf(err, "failed to process transaction %s", txID)
}
shouldBan := false
if txRuleErr := (&mempool.TxRuleError{}); errors.As(ruleErr.Err, txRuleErr) {
if txRuleErr.RejectCode == mempool.RejectInvalid {
shouldBan = true
}
}
if !shouldBan {
continue
}
return protocolerrors.Errorf(true, "rejected transaction %s: %s", txID, ruleErr)
}
err = flow.broadcastAcceptedTransactions(consensushashing.TransactionIDs(acceptedTransactions))
if err != nil {
return err
}
flow.OnTransactionAddedToMempool()
}
return nil
}

View File

@@ -0,0 +1,196 @@
package transactionrelay_test
import (
"errors"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
"strings"
"testing"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type mocTransactionsRelayContext struct {
netAdapter *netadapter.NetAdapter
domain domain.Domain
sharedRequestedTransactions *flowcontext.SharedRequestedTransactions
}
func (m *mocTransactionsRelayContext) NetAdapter() *netadapter.NetAdapter {
return m.netAdapter
}
func (m *mocTransactionsRelayContext) Domain() domain.Domain {
return m.domain
}
func (m *mocTransactionsRelayContext) SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions {
return m.sharedRequestedTransactions
}
func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error {
return nil
}
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
}
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
return false
}
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
// have the requested transactions in the mempool.
func TestHandleRelayedTransactionsNotFound(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
var log = logger.RegisterSubSystem("PROT")
var spawn = panics.GoroutineWrapperFunc(log)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRelayedTransactionsNotFound")
if err != nil {
t.Fatalf("Error setting up test consensus: %+v", err)
}
defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil {
t.Fatalf("Failed to create a NetAdapter: %v", err)
}
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
if err != nil {
t.Fatalf("Failed to set up a domain instance: %v", err)
}
context := &mocTransactionsRelayContext{
netAdapter: adapter,
domain: domainInstance,
sharedRequestedTransactions: sharedRequestedTransactions,
}
incomingRoute := router.NewRoute("incoming")
defer incomingRoute.Close()
peerIncomingRoute := router.NewRoute("outgoing")
defer peerIncomingRoute.Close()
txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02})
txIDs := []*externalapi.DomainTransactionID{txID1, txID2}
invMessage := appmessage.NewMsgInvTransaction(txIDs)
err = incomingRoute.Enqueue(invMessage)
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
// The goroutine is representing the peer's actions.
spawn("peerResponseToTheTransactionsRequest", func() {
msg, err := peerIncomingRoute.Dequeue()
if err != nil {
t.Fatalf("Dequeue: %v", err)
}
inv := msg.(*appmessage.MsgRequestTransactions)
if len(txIDs) != len(inv.IDs) {
t.Fatalf("TestHandleRelayedTransactions: expected %d transactions ID, but got %d", len(txIDs), len(inv.IDs))
}
for i, id := range inv.IDs {
if txIDs[i].String() != id.String() {
t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String())
}
err = incomingRoute.Enqueue(appmessage.NewMsgTransactionNotFound(txIDs[i]))
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
}
// Insert an unexpected message type to stop the infinity loop.
err = incomingRoute.Enqueue(&appmessage.MsgAddresses{})
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
})
err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, peerIncomingRoute)
// Since we inserted an unexpected message type to stop the infinity loop,
// we expect the error will be infected from this specific message and also the
// error will count as a protocol message.
if protocolErr := (protocolerrors.ProtocolError{}); err == nil || !errors.As(err, &protocolErr) {
t.Fatalf("Expected to protocol error")
} else {
if !protocolErr.ShouldBan {
t.Fatalf("Exepcted shouldBan true, but got false.")
}
if !strings.Contains(err.Error(), "unexpected Addresses [code 3] message in the block relay flow while expecting an inv message") {
t.Fatalf("Unexpected error: expected: an error due to existence of an Addresses message "+
"in the block relay flow, but got: %v", protocolErr.Cause)
}
}
})
}
// TestOnClosedIncomingRoute verifies that an appropriate error message will be returned when
// trying to dequeue a message from a closed route.
func TestOnClosedIncomingRoute(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestOnClosedIncomingRoute")
if err != nil {
t.Fatalf("Error setting up test consensus: %+v", err)
}
defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil {
t.Fatalf("Failed to creat a NetAdapter : %v", err)
}
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
if err != nil {
t.Fatalf("Failed to set up a domain instance: %v", err)
}
context := &mocTransactionsRelayContext{
netAdapter: adapter,
domain: domainInstance,
sharedRequestedTransactions: sharedRequestedTransactions,
}
incomingRoute := router.NewRoute("incoming")
outgoingRoute := router.NewRoute("outgoing")
defer outgoingRoute.Close()
txID := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
txIDs := []*externalapi.DomainTransactionID{txID}
err = incomingRoute.Enqueue(&appmessage.MsgInvTransaction{TxIDs: txIDs})
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
incomingRoute.Close()
err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, outgoingRoute)
if err == nil || !errors.Is(err, router.ErrRouteClosed) {
t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err)
}
})
}

View File

@@ -0,0 +1,59 @@
package transactionrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type handleRequestedTransactionsFlow struct {
TransactionsRelayContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestedTransactions listens to appmessage.MsgRequestTransactions messages, responding with the requested
// transactions if those are in the mempool.
// Missing transactions would be ignored
func HandleRequestedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRequestedTransactionsFlow{
TransactionsRelayContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestedTransactionsFlow) start() error {
for {
msgRequestTransactions, err := flow.readRequestTransactions()
if err != nil {
return err
}
for _, transactionID := range msgRequestTransactions.IDs {
tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
if !ok {
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
err := flow.outgoingRoute.Enqueue(msgTransactionNotFound)
if err != nil {
return err
}
continue
}
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
if err != nil {
return err
}
}
}
}
func (flow *handleRequestedTransactionsFlow) readRequestTransactions() (*appmessage.MsgRequestTransactions, error) {
msg, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
return msg.(*appmessage.MsgRequestTransactions), nil
}

View File

@@ -0,0 +1,91 @@
package transactionrelay_test
import (
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
"testing"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/panics"
"github.com/pkg/errors"
)
// TestHandleRequestedTransactionsNotFound tests the flow of HandleRequestedTransactions
// when the requested transactions don't found in the mempool.
func TestHandleRequestedTransactionsNotFound(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
var log = logger.RegisterSubSystem("PROT")
var spawn = panics.GoroutineWrapperFunc(log)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRequestedTransactionsNotFound")
if err != nil {
t.Fatalf("Error setting up test Consensus: %+v", err)
}
defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil {
t.Fatalf("Failed to create a NetAdapter: %v", err)
}
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
if err != nil {
t.Fatalf("Failed to set up a domain Instance: %v", err)
}
context := &mocTransactionsRelayContext{
netAdapter: adapter,
domain: domainInstance,
sharedRequestedTransactions: sharedRequestedTransactions,
}
incomingRoute := router.NewRoute("incoming")
outgoingRoute := router.NewRoute("outgoing")
defer outgoingRoute.Close()
txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02})
txIDs := []*externalapi.DomainTransactionID{txID1, txID2}
msg := appmessage.NewMsgRequestTransactions(txIDs)
err = incomingRoute.Enqueue(msg)
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
// The goroutine is representing the peer's actions.
spawn("peerResponseToTheTransactionsMessages", func() {
for i, id := range txIDs {
msg, err := outgoingRoute.Dequeue()
if err != nil {
t.Fatalf("Dequeue: %s", err)
}
outMsg := msg.(*appmessage.MsgTransactionNotFound)
if txIDs[i].String() != outMsg.ID.String() {
t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String())
}
}
// Closed the incomingRoute for stop the infinity loop.
incomingRoute.Close()
})
err = transactionrelay.HandleRequestedTransactions(context, incomingRoute, outgoingRoute)
// Make sure the error is due to the closed route.
if err == nil || !errors.Is(err, router.ErrRouteClosed) {
t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err)
}
})
}

View File

@@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
v4 "github.com/kaspanet/kaspad/app/protocol/flows/v4"
v5 "github.com/kaspanet/kaspad/app/protocol/flows/v5"
"sync"
"sync/atomic"
@@ -78,6 +79,8 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
switch peer.ProtocolVersion() {
case 4:
flows = v4.Register(m, router, errChan, &isStopping)
case 5:
flows = v5.Register(m, router, errChan, &isStopping)
default:
panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion()))
}

View File

@@ -56,21 +56,29 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
"invalid block")
}
_, selectedParentHash, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
_, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
if err != nil {
return err
}
isChainBlock, err := ctx.Domain.Consensus().IsChainBlock(blockHash)
if err != nil {
return err
}
block.VerboseData = &appmessage.RPCBlockVerboseData{
Hash: blockHash.String(),
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
ChildrenHashes: hashes.ToStrings(childrenHashes),
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
BlueScore: blockInfo.BlueScore,
Hash: blockHash.String(),
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
ChildrenHashes: hashes.ToStrings(childrenHashes),
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
BlueScore: blockInfo.BlueScore,
MergeSetBluesHashes: hashes.ToStrings(blockInfo.MergeSetBlues),
MergeSetRedsHashes: hashes.ToStrings(blockInfo.MergeSetReds),
IsChainBlock: isChainBlock,
}
// selectedParentHash will be nil in the genesis block
if selectedParentHash != nil {
block.VerboseData.SelectedParentHash = selectedParentHash.String()
if blockInfo.SelectedParent != nil {
block.VerboseData.SelectedParentHash = blockInfo.SelectedParent.String()
}
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {

View File

@@ -7,6 +7,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/version"
)
// HandleGetBlockTemplate handles the respectively named RPC command
@@ -25,7 +26,7 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
return nil, err
}
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey}
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version())}
templateBlock, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
if err != nil {

View File

@@ -1,3 +1,31 @@
Kaspad v0.11.14 - 2022-03-20
===========================
* Fix a bug in the new p2p v5 IBD chain negotiation (#1981)
Kaspad v0.11.13 - 2022-03-16
===========================
* Display progress of IBD process in Kaspad logs (#1938, #1939, #1949, #1977)
* Optimize DB writes during fresh IBD (#1937)
* Add AllowConnectionToDifferentVersions flag to kaspactl (#1940)
* Drop support for p2p v3 (#1942)
* Various transaction processing fixes and workarounds (#1943, #1946, #1971, #1974)
* Make kaspawallet store the utxos sorted by amount (#1947)
* Implement a `parse` sub command in the kaspawallet (#1953)
* Set MaxBlockLevels for non-mainnet networks to 250 (#1952)
* Add cache to DAA block window (#1948)
* kaspactl: string slice parser for GetUtxosByAddresses (#1955, first contribution by @icook)
* Add MergeSet and IsChainBlock to RPC (#1961)
* Ignore transaction invs during IBD (#1960)
* Optimize validation of expected header pruning point (#1962)
* Fix a bug in bounded marge depth validation (#1966)
* Don't relay blocks in virtual anticone (#1970)
* Add version to block template to allow tracking of miner's kaspad version (#1967)
* New p2p version: v5 (#1969)
* Fix IBD shared past negotiation to be non quadratic also in the worst-case (#1969, p2p v5)
* Send pruning point anticone in batches (#1973, p2p v5)
* Cleanup log output mistakes and try to be more clear to the user (#1976, #1978)
* Apply avoiding IBD logic from patch10 to p2p v4 IBD handling (#1979)
Kaspad v0.11.11 - 2022-01-27
===========================
* Fix for rare consensus bug regarding DAA window order. The bug only affected IBD from scratch and only today (#1934)

View File

@@ -3,6 +3,7 @@ package main
import (
"reflect"
"strconv"
"strings"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
@@ -149,12 +150,24 @@ func stringToValue(parameterDesc *parameterDescription, valueStr string) (reflec
value = pointer.Interface()
case reflect.Slice:
sliceType := parameterDesc.typeof.Elem()
if sliceType.Kind() != reflect.String {
return reflect.Value{},
errors.Errorf("Unsupported slice type '%s' for parameter '%s'",
sliceType,
parameterDesc.name)
}
if valueStr == "" {
value = []string{}
} else {
value = strings.Split(valueStr, ",")
}
// Int and uint are not supported because their size is platform-dependant
case reflect.Int,
reflect.Uint,
// Other types are not supported simply because they are not used in any command right now
// but support can be added if and when needed
reflect.Slice,
reflect.Func,
reflect.Interface,
reflect.Map,

View File

@@ -15,6 +15,7 @@ const (
createUnsignedTransactionSubCmd = "create-unsigned-transaction"
signSubCmd = "sign"
broadcastSubCmd = "broadcast"
parseSubCmd = "parse"
showAddressesSubCmd = "show-addresses"
newAddressSubCmd = "new-address"
dumpUnencryptedDataSubCmd = "dump-unencrypted-data"
@@ -79,6 +80,13 @@ type broadcastConfig struct {
config.NetworkFlags
}
type parseConfig struct {
Transaction string `long:"transaction" short:"t" description:"The transaction to parse (encoded in hex)"`
TransactionFile string `long:"transaction-file" short:"F" description:"The file containing the transaction to parse (encoded in hex)"`
Verbose bool `long:"verbose" short:"v" description:"Verbose: show transaction inputs"`
config.NetworkFlags
}
type showAddressesConfig struct {
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
config.NetworkFlags
@@ -133,6 +141,10 @@ func parseCommandLine() (subCommand string, config interface{}) {
parser.AddCommand(broadcastSubCmd, "Broadcast the given transaction",
"Broadcast the given transaction", broadcastConf)
parseConf := &parseConfig{}
parser.AddCommand(parseSubCmd, "Parse the given transaction and print its contents",
"Parse the given transaction and print its contents", parseConf)
showAddressesConf := &showAddressesConfig{DaemonAddress: defaultListen}
parser.AddCommand(showAddressesSubCmd, "Shows all generated public addresses of the current wallet",
"Shows all generated public addresses of the current wallet", showAddressesConf)
@@ -207,6 +219,13 @@ func parseCommandLine() (subCommand string, config interface{}) {
printErrorAndExit(err)
}
config = broadcastConf
case parseSubCmd:
combineNetworkFlags(&parseConf.NetworkFlags, &cfg.NetworkFlags)
err := parseConf.ResolveNetwork(parser)
if err != nil {
printErrorAndExit(err)
}
config = parseConf
case showAddressesSubCmd:
combineNetworkFlags(&showAddressesConf.NetworkFlags, &cfg.NetworkFlags)
err := showAddressesConf.ResolveNetwork(parser)

View File

@@ -19,6 +19,8 @@ func main() {
err = sign(config.(*signConfig))
case broadcastSubCmd:
err = broadcast(config.(*broadcastConfig))
case parseSubCmd:
err = parse(config.(*parseConfig))
case showAddressesSubCmd:
err = showAddresses(config.(*showAddressesConfig))
case newAddressSubCmd:

83
cmd/kaspawallet/parse.go Normal file
View File

@@ -0,0 +1,83 @@
package main
import (
"encoding/hex"
"fmt"
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/serialization"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/pkg/errors"
"io/ioutil"
"strings"
)
func parse(conf *parseConfig) error {
if conf.Transaction == "" && conf.TransactionFile == "" {
return errors.Errorf("Either --transaction or --transaction-file is required")
}
if conf.Transaction != "" && conf.TransactionFile != "" {
return errors.Errorf("Both --transaction and --transaction-file cannot be passed at the same time")
}
transactionHex := conf.Transaction
if conf.TransactionFile != "" {
transactionHexBytes, err := ioutil.ReadFile(conf.TransactionFile)
if err != nil {
return errors.Wrapf(err, "Could not read hex from %s", conf.TransactionFile)
}
transactionHex = strings.TrimSpace(string(transactionHexBytes))
}
transaction, err := hex.DecodeString(transactionHex)
if err != nil {
return err
}
partiallySignedTransaction, err := serialization.DeserializePartiallySignedTransaction(transaction)
if err != nil {
return err
}
fmt.Printf("Transaction ID: \t%s\n", consensushashing.TransactionID(partiallySignedTransaction.Tx))
fmt.Println()
allInputSompi := uint64(0)
for index, input := range partiallySignedTransaction.Tx.Inputs {
partiallySignedInput := partiallySignedTransaction.PartiallySignedInputs[index]
if conf.Verbose {
fmt.Printf("Input %d: \tOutpoint: %s:%d \tAmount: %.2f Kaspa\n", index, input.PreviousOutpoint.TransactionID,
input.PreviousOutpoint.Index, float64(partiallySignedInput.PrevOutput.Value)/float64(constants.SompiPerKaspa))
}
allInputSompi += partiallySignedInput.PrevOutput.Value
}
if conf.Verbose {
fmt.Println()
}
allOutputSompi := uint64(0)
for index, output := range partiallySignedTransaction.Tx.Outputs {
scriptPublicKeyType, scriptPublicKeyAddress, err := txscript.ExtractScriptPubKeyAddress(output.ScriptPublicKey, conf.ActiveNetParams)
if err != nil {
return err
}
addressString := scriptPublicKeyAddress.EncodeAddress()
if scriptPublicKeyType == txscript.NonStandardTy {
scriptPublicKeyHex := hex.EncodeToString(output.ScriptPublicKey.Script)
addressString = fmt.Sprintf("<Non-standard transaction script public key: %s>", scriptPublicKeyHex)
}
fmt.Printf("Output %d: \tRecipient: %s \tAmount: %.2f Kaspa\n",
index, addressString, float64(output.Value)/float64(constants.SompiPerKaspa))
allOutputSompi += output.Value
}
fmt.Println()
fmt.Printf("Fee:\t%d Sompi\n", allInputSompi-allOutputSompi)
return nil
}

View File

@@ -286,13 +286,15 @@ func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalap
blockInfo.BlueScore = ghostdagData.BlueScore()
blockInfo.BlueWork = ghostdagData.BlueWork()
blockInfo.SelectedParent = ghostdagData.SelectedParent()
blockInfo.MergeSetBlues = ghostdagData.MergeSetBlues()
blockInfo.MergeSetReds = ghostdagData.MergeSetReds()
return blockInfo, nil
}
func (s *consensus) GetBlockRelations(blockHash *externalapi.DomainHash) (
parents []*externalapi.DomainHash, selectedParent *externalapi.DomainHash,
children []*externalapi.DomainHash, err error) {
parents []*externalapi.DomainHash, children []*externalapi.DomainHash, err error) {
s.lock.Lock()
defer s.lock.Unlock()
@@ -301,15 +303,10 @@ func (s *consensus) GetBlockRelations(blockHash *externalapi.DomainHash) (
blockRelation, err := s.blockRelationStores[0].BlockRelation(s.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, nil, nil, err
return nil, nil, err
}
blockGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, blockHash, false)
if err != nil {
return nil, nil, nil, err
}
return blockRelation.Parents, blockGHOSTDAGData.SelectedParent(), blockRelation.Children, nil
return blockRelation.Parents, blockRelation.Children, nil
}
func (s *consensus) GetBlockAcceptanceData(blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) {
@@ -346,6 +343,25 @@ func (s *consensus) GetHashesBetween(lowHash, highHash *externalapi.DomainHash,
return s.syncManager.GetHashesBetween(stagingArea, lowHash, highHash, maxBlocks)
}
func (s *consensus) GetAnticone(blockHash, contextHash *externalapi.DomainHash,
maxBlocks uint64) (hashes []*externalapi.DomainHash, err error) {
s.lock.Lock()
defer s.lock.Unlock()
stagingArea := model.NewStagingArea()
err = s.validateBlockHashExists(stagingArea, blockHash)
if err != nil {
return nil, err
}
err = s.validateBlockHashExists(stagingArea, contextHash)
if err != nil {
return nil, err
}
return s.syncManager.GetAnticone(stagingArea, blockHash, contextHash, maxBlocks)
}
func (s *consensus) GetMissingBlockBodyHashes(highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
s.lock.Lock()
defer s.lock.Unlock()
@@ -704,7 +720,7 @@ func (s *consensus) Anticone(blockHash *externalapi.DomainHash) ([]*externalapi.
return nil, err
}
return s.dagTraversalManager.AnticoneFromBlocks(stagingArea, tips, blockHash)
return s.dagTraversalManager.AnticoneFromBlocks(stagingArea, tips, blockHash, 0)
}
func (s *consensus) EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error) {
@@ -824,3 +840,16 @@ func (s *consensus) TrustedGHOSTDAGData(blockHash *externalapi.DomainHash) (*ext
return ghostdagData, nil
}
func (s *consensus) IsChainBlock(blockHash *externalapi.DomainHash) (bool, error) {
s.lock.Lock()
defer s.lock.Unlock()
stagingArea := model.NewStagingArea()
virtualGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, model.VirtualBlockHash, false)
if err != nil {
return false, err
}
return s.dagTopologyManagers[0].IsInSelectedParentChainOf(stagingArea, blockHash, virtualGHOSTDAGData.SelectedParent())
}

View File

@@ -0,0 +1,44 @@
package blockwindowheapslicestore
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
type shardKey struct {
hash externalapi.DomainHash
windowSize int
}
type blockWindowHeapSliceStagingShard struct {
store *blockWindowHeapSliceStore
toAdd map[shardKey][]*externalapi.BlockGHOSTDAGDataHashPair
}
func (bss *blockWindowHeapSliceStore) stagingShard(stagingArea *model.StagingArea) *blockWindowHeapSliceStagingShard {
return stagingArea.GetOrCreateShard(bss.shardID, func() model.StagingShard {
return &blockWindowHeapSliceStagingShard{
store: bss,
toAdd: make(map[shardKey][]*externalapi.BlockGHOSTDAGDataHashPair),
}
}).(*blockWindowHeapSliceStagingShard)
}
func (bsss *blockWindowHeapSliceStagingShard) Commit(_ model.DBTransaction) error {
for key, heapSlice := range bsss.toAdd {
bsss.store.cache.Add(&key.hash, key.windowSize, heapSlice)
}
return nil
}
func (bsss *blockWindowHeapSliceStagingShard) isStaged() bool {
return len(bsss.toAdd) != 0
}
func newShardKey(hash *externalapi.DomainHash, windowSize int) shardKey {
return shardKey{
hash: *hash,
windowSize: windowSize,
}
}

View File

@@ -0,0 +1,47 @@
package blockwindowheapslicestore
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucachehashandwindowsizetoblockghostdagdatahashpairs"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/util/staging"
"github.com/pkg/errors"
)
type blockWindowHeapSliceStore struct {
shardID model.StagingShardID
cache *lrucachehashandwindowsizetoblockghostdagdatahashpairs.LRUCache
}
// New instantiates a new WindowHeapSliceStore
func New(cacheSize int, preallocate bool) model.WindowHeapSliceStore {
return &blockWindowHeapSliceStore{
shardID: staging.GenerateShardingID(),
cache: lrucachehashandwindowsizetoblockghostdagdatahashpairs.New(cacheSize, preallocate),
}
}
// Stage stages the given blockStatus for the given blockHash
func (bss *blockWindowHeapSliceStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, windowSize int, heapSlice []*externalapi.BlockGHOSTDAGDataHashPair) {
stagingShard := bss.stagingShard(stagingArea)
stagingShard.toAdd[newShardKey(blockHash, windowSize)] = heapSlice
}
func (bss *blockWindowHeapSliceStore) IsStaged(stagingArea *model.StagingArea) bool {
return bss.stagingShard(stagingArea).isStaged()
}
func (bss *blockWindowHeapSliceStore) Get(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, error) {
stagingShard := bss.stagingShard(stagingArea)
if heapSlice, ok := stagingShard.toAdd[newShardKey(blockHash, windowSize)]; ok {
return heapSlice, nil
}
if heapSlice, ok := bss.cache.Get(blockHash, windowSize); ok {
return heapSlice, nil
}
return nil, errors.Wrap(database.ErrNotFound, "Window heap slice not found")
}

View File

@@ -1,12 +1,12 @@
package consensus
import (
"github.com/kaspanet/kaspad/domain/consensus/datastructures/blockwindowheapslicestore"
"github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager"
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"io/ioutil"
"os"
"sync"
@@ -145,9 +145,10 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
finalityStore := finalitystore.New(prefixBucket, 200, preallocateCaches)
headersSelectedChainStore := headersselectedchainstore.New(prefixBucket, pruningWindowSizeForCaches, preallocateCaches)
daaBlocksStore := daablocksstore.New(prefixBucket, pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches)
windowHeapSliceStore := blockwindowheapslicestore.New(2000, preallocateCaches)
blockRelationStores, reachabilityDataStores, ghostdagDataStores := dagStores(config, prefixBucket, pruningWindowSizePlusFinalityDepthForCache, pruningWindowSizeForCaches, preallocateCaches)
reachabilityManagers, dagTopologyManagers, ghostdagManagers, dagTraversalManagers := f.dagProcesses(config, dbManager, blockHeaderStore, daaWindowStore, blockRelationStores, reachabilityDataStores, ghostdagDataStores)
reachabilityManagers, dagTopologyManagers, ghostdagManagers, dagTraversalManagers := f.dagProcesses(config, dbManager, blockHeaderStore, daaWindowStore, windowHeapSliceStore, blockRelationStores, reachabilityDataStores, ghostdagDataStores)
blockRelationStore := blockRelationStores[0]
reachabilityDataStore := reachabilityDataStores[0]
@@ -158,7 +159,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
dagTraversalManager := dagTraversalManagers[0]
// Processes
parentsManager := parentssanager.New(config.GenesisHash)
parentsManager := parentssanager.New(config.GenesisHash, config.MaxBlockLevel)
blockParentBuilder := blockparentbuilder.New(
dbManager,
blockHeaderStore,
@@ -168,6 +169,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
pruningStore,
config.GenesisHash,
config.MaxBlockLevel,
)
pastMedianTimeManager := f.pastMedianTimeConsructor(
config.TimestampDeviationTolerance,
@@ -304,6 +306,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
config.TimestampDeviationTolerance,
config.TargetTimePerBlock,
config.IgnoreHeaderMass,
config.MaxBlockLevel,
dbManager,
difficultyManager,
@@ -370,6 +373,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
blockProcessor := blockprocessor.New(
genesisHash,
config.TargetTimePerBlock,
config.MaxBlockLevel,
dbManager,
consensusStateManager,
pruningManager,
@@ -417,6 +421,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
genesisHash,
config.K,
config.PruningProofM,
config.MaxBlockLevel,
)
c := &consensus{
@@ -568,16 +573,16 @@ func dagStores(config *Config,
pruningWindowSizePlusFinalityDepthForCache, pruningWindowSizeForCaches int,
preallocateCaches bool) ([]model.BlockRelationStore, []model.ReachabilityDataStore, []model.GHOSTDAGDataStore) {
blockRelationStores := make([]model.BlockRelationStore, constants.MaxBlockLevel+1)
reachabilityDataStores := make([]model.ReachabilityDataStore, constants.MaxBlockLevel+1)
ghostdagDataStores := make([]model.GHOSTDAGDataStore, constants.MaxBlockLevel+1)
blockRelationStores := make([]model.BlockRelationStore, config.MaxBlockLevel+1)
reachabilityDataStores := make([]model.ReachabilityDataStore, config.MaxBlockLevel+1)
ghostdagDataStores := make([]model.GHOSTDAGDataStore, config.MaxBlockLevel+1)
ghostdagDataCacheSize := pruningWindowSizeForCaches * 2
if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize {
ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize
}
for i := 0; i <= constants.MaxBlockLevel; i++ {
for i := 0; i <= config.MaxBlockLevel; i++ {
prefixBucket := prefixBucket.Bucket([]byte{byte(i)})
if i == 0 {
blockRelationStores[i] = blockrelationstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
@@ -597,6 +602,7 @@ func (f *factory) dagProcesses(config *Config,
dbManager model.DBManager,
blockHeaderStore model.BlockHeaderStore,
daaWindowStore model.BlocksWithTrustedDataDAAWindowStore,
windowHeapSliceStore model.WindowHeapSliceStore,
blockRelationStores []model.BlockRelationStore,
reachabilityDataStores []model.ReachabilityDataStore,
ghostdagDataStores []model.GHOSTDAGDataStore) (
@@ -606,12 +612,12 @@ func (f *factory) dagProcesses(config *Config,
[]model.DAGTraversalManager,
) {
reachabilityManagers := make([]model.ReachabilityManager, constants.MaxBlockLevel+1)
dagTopologyManagers := make([]model.DAGTopologyManager, constants.MaxBlockLevel+1)
ghostdagManagers := make([]model.GHOSTDAGManager, constants.MaxBlockLevel+1)
dagTraversalManagers := make([]model.DAGTraversalManager, constants.MaxBlockLevel+1)
reachabilityManagers := make([]model.ReachabilityManager, config.MaxBlockLevel+1)
dagTopologyManagers := make([]model.DAGTopologyManager, config.MaxBlockLevel+1)
ghostdagManagers := make([]model.GHOSTDAGManager, config.MaxBlockLevel+1)
dagTraversalManagers := make([]model.DAGTraversalManager, config.MaxBlockLevel+1)
for i := 0; i <= constants.MaxBlockLevel; i++ {
for i := 0; i <= config.MaxBlockLevel; i++ {
reachabilityManagers[i] = reachabilitymanager.New(
dbManager,
ghostdagDataStores[i],
@@ -638,6 +644,7 @@ func (f *factory) dagProcesses(config *Config,
reachabilityDataStores[i],
ghostdagManagers[i],
daaWindowStore,
windowHeapSliceStore,
config.GenesisHash,
config.DifficultyAdjustmentWindowSize)
}

View File

@@ -5,3 +5,7 @@ import "github.com/pkg/errors"
// ErrBlockNotInSelectedParentChain is returned from CreateHeadersSelectedChainBlockLocator if one of the parameters
// passed to it are not in the headers selected parent chain
var ErrBlockNotInSelectedParentChain = errors.New("Block is not in selected parent chain")
// ErrReachedMaxTraversalAllowed is returned from AnticoneFromBlocks if `maxTraversalAllowed` was specified
// and the traversal passed it
var ErrReachedMaxTraversalAllowed = errors.New("Traversal searching for anticone passed the maxTraversalAllowed limit")

View File

@@ -69,7 +69,7 @@ type BaseBlockHeader interface {
BlueScore() uint64
BlueWork() *big.Int
PruningPoint() *DomainHash
BlockLevel() int
BlockLevel(maxBlockLevel int) int
Equal(other BaseBlockHeader) bool
}

View File

@@ -4,18 +4,24 @@ import "math/big"
// BlockInfo contains various information about a specific block
type BlockInfo struct {
Exists bool
BlockStatus BlockStatus
BlueScore uint64
BlueWork *big.Int
Exists bool
BlockStatus BlockStatus
BlueScore uint64
BlueWork *big.Int
SelectedParent *DomainHash
MergeSetBlues []*DomainHash
MergeSetReds []*DomainHash
}
// Clone returns a clone of BlockInfo
func (bi *BlockInfo) Clone() *BlockInfo {
return &BlockInfo{
Exists: bi.Exists,
BlockStatus: bi.BlockStatus.Clone(),
BlueScore: bi.BlueScore,
BlueWork: new(big.Int).Set(bi.BlueWork),
Exists: bi.Exists,
BlockStatus: bi.BlockStatus.Clone(),
BlueScore: bi.BlueScore,
BlueWork: new(big.Int).Set(bi.BlueWork),
SelectedParent: bi.SelectedParent,
MergeSetBlues: CloneHashes(bi.MergeSetBlues),
MergeSetReds: CloneHashes(bi.MergeSetReds),
}
}

View File

@@ -14,31 +14,83 @@ func initTestBlockInfoStructsForClone() []*BlockInfo {
BlockStatus(0x01),
0,
big.NewInt(0),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
true,
BlockStatus(0x02),
0,
big.NewInt(0),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
true,
1,
1,
big.NewInt(0),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
true,
255,
2,
big.NewInt(0),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
true,
0,
3,
big.NewInt(0),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
true,
BlockStatus(0x01),
0,
big.NewInt(1),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
false,
BlockStatus(0x01),
0,
big.NewInt(1),
NewDomainHashFromByteArray(&[DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}),
[]*DomainHash{
NewDomainHashFromByteArray(&[DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}),
NewDomainHashFromByteArray(&[DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}),
},
[]*DomainHash{
NewDomainHashFromByteArray(&[DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04}),
NewDomainHashFromByteArray(&[DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}),
},
},
}
return tests

View File

@@ -16,10 +16,11 @@ type Consensus interface {
GetBlockEvenIfHeaderOnly(blockHash *DomainHash) (*DomainBlock, error)
GetBlockHeader(blockHash *DomainHash) (BlockHeader, error)
GetBlockInfo(blockHash *DomainHash) (*BlockInfo, error)
GetBlockRelations(blockHash *DomainHash) (parents []*DomainHash, selectedParent *DomainHash, children []*DomainHash, err error)
GetBlockRelations(blockHash *DomainHash) (parents []*DomainHash, children []*DomainHash, err error)
GetBlockAcceptanceData(blockHash *DomainHash) (AcceptanceData, error)
GetHashesBetween(lowHash, highHash *DomainHash, maxBlocks uint64) (hashes []*DomainHash, actualHighHash *DomainHash, err error)
GetAnticone(blockHash, contextHash *DomainHash, maxBlocks uint64) (hashes []*DomainHash, err error)
GetMissingBlockBodyHashes(highHash *DomainHash) ([]*DomainHash, error)
GetPruningPointUTXOs(expectedPruningPointHash *DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
GetVirtualUTXOs(expectedVirtualParents []*DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
@@ -50,4 +51,5 @@ type Consensus interface {
TrustedDataDataDAAHeader(trustedBlockHash, daaBlockHash *DomainHash, daaBlockWindowIndex uint64) (*TrustedDataDataDAAHeader, error)
TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash *DomainHash) ([]*DomainHash, error)
TrustedGHOSTDAGData(blockHash *DomainHash) (*BlockGHOSTDAGData, error)
IsChainBlock(blockHash *DomainHash) (bool, error)
}

View File

@@ -0,0 +1,11 @@
package model
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// WindowHeapSliceStore caches the slices that are needed for the heap implementation of DAGTraversalManager.BlockWindow
type WindowHeapSliceStore interface {
Store
Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, windowSize int, pairs []*externalapi.BlockGHOSTDAGDataHashPair)
IsStaged(stagingArea *StagingArea) bool
Get(stagingArea *StagingArea, blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, error)
}

View File

@@ -10,7 +10,7 @@ type DAGTraversalManager interface {
// from lowHash (exclusive) to highHash (inclusive) over highHash's selected parent chain
SelectedChildIterator(stagingArea *StagingArea, highHash, lowHash *externalapi.DomainHash, includeLowHash bool) (BlockIterator, error)
SelectedChild(stagingArea *StagingArea, highHash, lowHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
AnticoneFromBlocks(stagingArea *StagingArea, tips []*externalapi.DomainHash, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
AnticoneFromBlocks(stagingArea *StagingArea, tips []*externalapi.DomainHash, blockHash *externalapi.DomainHash, maxTraversalAllowed uint64) ([]*externalapi.DomainHash, error)
AnticoneFromVirtualPOV(stagingArea *StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
BlockWindow(stagingArea *StagingArea, highHash *externalapi.DomainHash, windowSize int) ([]*externalapi.DomainHash, error)
DAABlockWindow(stagingArea *StagingArea, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)

View File

@@ -6,6 +6,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
type SyncManager interface {
GetHashesBetween(stagingArea *StagingArea, lowHash, highHash *externalapi.DomainHash, maxBlocks uint64) (
hashes []*externalapi.DomainHash, actualHighHash *externalapi.DomainHash, err error)
GetAnticone(stagingArea *StagingArea, blockHash, contextHash *externalapi.DomainHash, maxBlocks uint64) (hashes []*externalapi.DomainHash, err error)
GetMissingBlockBodyHashes(stagingArea *StagingArea, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
CreateBlockLocator(stagingArea *StagingArea, lowHash, highHash *externalapi.DomainHash, limit uint32) (
externalapi.BlockLocator, error)

View File

@@ -49,6 +49,7 @@ type TestConsensus interface {
*externalapi.VirtualChangeSet, error)
MineJSON(r io.Reader, blockType MineJSONBlockType) (tips []*externalapi.DomainHash, err error)
ToJSON(w io.Writer) error
RenderDAGToDot(filename string) error

View File

@@ -16,7 +16,8 @@ type blockParentBuilder struct {
reachabilityDataStore model.ReachabilityDataStore
pruningStore model.PruningStore
genesisHash *externalapi.DomainHash
genesisHash *externalapi.DomainHash
maxBlockLevel int
}
// New creates a new instance of a BlockParentBuilder
@@ -30,6 +31,7 @@ func New(
pruningStore model.PruningStore,
genesisHash *externalapi.DomainHash,
maxBlockLevel int,
) model.BlockParentBuilder {
return &blockParentBuilder{
databaseContext: databaseContext,
@@ -40,6 +42,7 @@ func New(
reachabilityDataStore: reachabilityDataStore,
pruningStore: pruningStore,
genesisHash: genesisHash,
maxBlockLevel: maxBlockLevel,
}
}
@@ -102,7 +105,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
// all the block levels they occupy
for _, directParentHeader := range directParentHeaders {
directParentHash := consensushashing.HeaderHash(directParentHeader)
blockLevel := directParentHeader.BlockLevel()
blockLevel := directParentHeader.BlockLevel(bpb.maxBlockLevel)
for i := 0; i <= blockLevel; i++ {
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)

View File

@@ -14,6 +14,7 @@ import (
type blockProcessor struct {
genesisHash *externalapi.DomainHash
targetTimePerBlock time.Duration
maxBlockLevel int
databaseContext model.DBManager
blockLogger *blocklogger.BlockLogger
@@ -52,6 +53,7 @@ type blockProcessor struct {
func New(
genesisHash *externalapi.DomainHash,
targetTimePerBlock time.Duration,
maxBlockLevel int,
databaseContext model.DBManager,
consensusStateManager model.ConsensusStateManager,
@@ -86,6 +88,7 @@ func New(
return &blockProcessor{
genesisHash: genesisHash,
targetTimePerBlock: targetTimePerBlock,
maxBlockLevel: maxBlockLevel,
databaseContext: databaseContext,
blockLogger: blocklogger.NewBlockLogger(),
pruningManager: pruningManager,

View File

@@ -259,7 +259,7 @@ func (bp *blockProcessor) updateReachabilityReindexRoot(stagingArea *model.Stagi
return err
}
headersSelectedTipHeaderBlockLevel := headersSelectedTipHeader.BlockLevel()
headersSelectedTipHeaderBlockLevel := headersSelectedTipHeader.BlockLevel(bp.maxBlockLevel)
for blockLevel := 0; blockLevel <= headersSelectedTipHeaderBlockLevel; blockLevel++ {
err := bp.reachabilityManagers[blockLevel].UpdateReindexRoot(stagingArea, headersSelectedTip)
if err != nil {

View File

@@ -62,7 +62,7 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
return err
}
if !hasReachabilityData {
blockLevel := header.BlockLevel()
blockLevel := header.BlockLevel(v.maxBlockLevel)
for i := 0; i <= blockLevel; i++ {
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
if err != nil {

View File

@@ -23,6 +23,7 @@ type blockValidator struct {
timestampDeviationTolerance int
targetTimePerBlock time.Duration
ignoreHeaderMass bool
maxBlockLevel int
databaseContext model.DBReader
difficultyManager model.DifficultyManager
@@ -60,6 +61,7 @@ func New(powMax *big.Int,
timestampDeviationTolerance int,
targetTimePerBlock time.Duration,
ignoreHeaderMass bool,
maxBlockLevel int,
databaseContext model.DBReader,
@@ -97,6 +99,7 @@ func New(powMax *big.Int,
mergeSetSizeLimit: mergeSetSizeLimit,
maxBlockParents: maxBlockParents,
ignoreHeaderMass: ignoreHeaderMass,
maxBlockLevel: maxBlockLevel,
timestampDeviationTolerance: timestampDeviationTolerance,
targetTimePerBlock: targetTimePerBlock,

View File

@@ -69,7 +69,7 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
header externalapi.BlockHeader,
isBlockWithTrustedData bool) error {
for level := 0; level <= header.BlockLevel(); level++ {
for level := 0; level <= header.BlockLevel(v.maxBlockLevel); level++ {
var parents []*externalapi.DomainHash
for _, parent := range v.parentsManager.ParentsAtLevel(header, level) {
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
@@ -118,7 +118,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
return err
}
blockLevel := header.BlockLevel()
blockLevel := header.BlockLevel(v.maxBlockLevel)
for i := 1; i <= blockLevel; i++ {
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
if err != nil {

View File

@@ -336,12 +336,12 @@ func (csm *consensusStateManager) boundedMergeBreakingParents(stagingArea *model
log.Debugf("Checking whether parent %s breaks the bounded merge set", parent)
isBadRedInPast := false
for _, badRedBlock := range badReds {
isBadRedInPast, err = csm.dagTopologyManager.IsAncestorOf(stagingArea, parent, badRedBlock)
isBadRedInPast, err = csm.dagTopologyManager.IsAncestorOf(stagingArea, badRedBlock, parent)
if err != nil {
return nil, err
}
if isBadRedInPast {
log.Debugf("Parent %s is an ancestor of bad red %s", parent, badRedBlock)
log.Debugf("Parent %s is a descendant of bad red %s", parent, badRedBlock)
break
}
}

View File

@@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/pkg/errors"
)
func (dtm *dagTraversalManager) AnticoneFromVirtualPOV(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (
@@ -14,16 +15,18 @@ func (dtm *dagTraversalManager) AnticoneFromVirtualPOV(stagingArea *model.Stagin
return nil, err
}
return dtm.AnticoneFromBlocks(stagingArea, virtualParents, blockHash)
return dtm.AnticoneFromBlocks(stagingArea, virtualParents, blockHash, 0)
}
func (dtm *dagTraversalManager) AnticoneFromBlocks(stagingArea *model.StagingArea, tips []*externalapi.DomainHash, blockHash *externalapi.DomainHash) (
func (dtm *dagTraversalManager) AnticoneFromBlocks(stagingArea *model.StagingArea, tips []*externalapi.DomainHash,
blockHash *externalapi.DomainHash, maxTraversalAllowed uint64) (
[]*externalapi.DomainHash, error) {
anticone := []*externalapi.DomainHash{}
queue := tips
visited := hashset.New()
traversalCounter := uint64(0)
for len(queue) > 0 {
var current *externalapi.DomainHash
current, queue = queue[0], queue[1:]
@@ -48,6 +51,14 @@ func (dtm *dagTraversalManager) AnticoneFromBlocks(stagingArea *model.StagingAre
return nil, err
}
// We count the number of blocks in past(tips) \setminus past(blockHash).
// We don't use `len(visited)` since it includes some maximal blocks in past(blockHash) as well.
traversalCounter++
if maxTraversalAllowed > 0 && traversalCounter > maxTraversalAllowed {
return nil, errors.Wrapf(model.ErrReachedMaxTraversalAllowed,
"Passed max allowed traversal (%d > %d)", traversalCounter, maxTraversalAllowed)
}
if !blockIsAncestorOfCurrent {
anticone = append(anticone, current)
}

View File

@@ -152,6 +152,18 @@ func (dtm *dagTraversalManager) newSizedUpHeap(stagingArea *model.StagingArea, c
return &h
}
func (dtm *dagTraversalManager) newSizedUpHeapFromSlice(stagingArea *model.StagingArea, slice []*externalapi.BlockGHOSTDAGDataHashPair) *sizedUpBlockHeap {
sliceClone := make([]*externalapi.BlockGHOSTDAGDataHashPair, len(slice), cap(slice))
copy(sliceClone, slice)
h := sizedUpBlockHeap{
impl: upHeap{baseHeap{slice: sliceClone, ghostdagManager: dtm.ghostdagManager}},
ghostdagStore: dtm.ghostdagDataStore,
dbContext: dtm.databaseContext,
stagingArea: stagingArea,
}
return &h
}
// len returns the length of this heap
func (sbh *sizedUpBlockHeap) len() int {
return sbh.impl.Len()

View File

@@ -18,6 +18,7 @@ type dagTraversalManager struct {
daaWindowStore model.BlocksWithTrustedDataDAAWindowStore
genesisHash *externalapi.DomainHash
difficultyAdjustmentWindowSize int
windowHeapSliceStore model.WindowHeapSliceStore
}
// New instantiates a new DAGTraversalManager
@@ -28,6 +29,7 @@ func New(
reachabilityDataStore model.ReachabilityDataStore,
ghostdagManager model.GHOSTDAGManager,
daaWindowStore model.BlocksWithTrustedDataDAAWindowStore,
windowHeapSliceStore model.WindowHeapSliceStore,
genesisHash *externalapi.DomainHash,
difficultyAdjustmentWindowSize int) model.DAGTraversalManager {
return &dagTraversalManager{
@@ -40,6 +42,7 @@ func New(
genesisHash: genesisHash,
difficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
windowHeapSliceStore: windowHeapSliceStore,
}
}

View File

@@ -16,7 +16,7 @@ func (dtm *dagTraversalManager) DAABlockWindow(stagingArea *model.StagingArea, h
func (dtm *dagTraversalManager) BlockWindow(stagingArea *model.StagingArea, highHash *externalapi.DomainHash,
windowSize int) ([]*externalapi.DomainHash, error) {
windowHeap, err := dtm.calculateBlockWindowHeap(stagingArea, highHash, windowSize)
windowHeap, err := dtm.blockWindowHeap(stagingArea, highHash, windowSize)
if err != nil {
return nil, err
}
@@ -28,6 +28,28 @@ func (dtm *dagTraversalManager) BlockWindow(stagingArea *model.StagingArea, high
return window, nil
}
func (dtm *dagTraversalManager) blockWindowHeap(stagingArea *model.StagingArea,
highHash *externalapi.DomainHash, windowSize int) (*sizedUpBlockHeap, error) {
windowHeapSlice, err := dtm.windowHeapSliceStore.Get(stagingArea, highHash, windowSize)
sliceNotCached := database.IsNotFoundError(err)
if !sliceNotCached && err != nil {
return nil, err
}
if !sliceNotCached {
return dtm.newSizedUpHeapFromSlice(stagingArea, windowHeapSlice), nil
}
heap, err := dtm.calculateBlockWindowHeap(stagingArea, highHash, windowSize)
if err != nil {
return nil, err
}
if !highHash.Equal(model.VirtualBlockHash) {
dtm.windowHeapSliceStore.Stage(stagingArea, highHash, windowSize, heap.impl.slice)
}
return heap, nil
}
func (dtm *dagTraversalManager) calculateBlockWindowHeap(stagingArea *model.StagingArea,
highHash *externalapi.DomainHash, windowSize int) (*sizedUpBlockHeap, error) {
@@ -45,18 +67,54 @@ func (dtm *dagTraversalManager) calculateBlockWindowHeap(stagingArea *model.Stag
return nil, err
}
// If the block has a trusted DAA window attached, we just take it as is and don't use cache of selected parent to
// build the window. This is because tryPushMergeSet might not be able to find all the GHOSTDAG data that is
// associated with the block merge set.
_, err = dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, 0)
isNonTrustedBlock := database.IsNotFoundError(err)
if !isNonTrustedBlock && err != nil {
return nil, err
}
if isNonTrustedBlock && currentGHOSTDAGData.SelectedParent() != nil {
windowHeapSlice, err := dtm.windowHeapSliceStore.Get(stagingArea, currentGHOSTDAGData.SelectedParent(), windowSize)
selectedParentNotCached := database.IsNotFoundError(err)
if !selectedParentNotCached && err != nil {
return nil, err
}
if !selectedParentNotCached {
windowHeap := dtm.newSizedUpHeapFromSlice(stagingArea, windowHeapSlice)
if !currentGHOSTDAGData.SelectedParent().Equal(dtm.genesisHash) {
selectedParentGHOSTDAGData, err := dtm.ghostdagDataStore.Get(
dtm.databaseContext, stagingArea, currentGHOSTDAGData.SelectedParent(), false)
if err != nil {
return nil, err
}
_, err = dtm.tryPushMergeSet(windowHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData)
if err != nil {
return nil, err
}
}
return windowHeap, nil
}
}
// Walk down the chain until you finish or find a trusted block and then take complete the rest
// of the window with the trusted window.
for {
if currentGHOSTDAGData.SelectedParent().Equal(dtm.genesisHash) {
break
}
_, err := dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, 0)
isNotFoundError := database.IsNotFoundError(err)
if !isNotFoundError && err != nil {
currentIsNonTrustedBlock := database.IsNotFoundError(err)
if !currentIsNonTrustedBlock && err != nil {
return nil, err
}
if !isNotFoundError {
if !currentIsNonTrustedBlock {
for i := uint64(0); ; i++ {
daaBlock, err := dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, i)
if database.IsNotFoundError(err) {
@@ -83,47 +141,60 @@ func (dtm *dagTraversalManager) calculateBlockWindowHeap(stagingArea *model.Stag
if err != nil {
return nil, err
}
added, err := windowHeap.tryPushWithGHOSTDAGData(currentGHOSTDAGData.SelectedParent(), selectedParentGHOSTDAGData)
done, err := dtm.tryPushMergeSet(windowHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData)
if err != nil {
return nil, err
}
// If the window is full and the selected parent is less than the minimum then we break
// because this means that there cannot be any more blocks in the past with higher blueWork
if !added {
if done {
break
}
// Now we go over the merge set.
// Remove the SP from the blue merge set because we already added it.
mergeSetBlues := currentGHOSTDAGData.MergeSetBlues()[1:]
// Go over the merge set in reverse because it's ordered in reverse by blueWork.
for i := len(mergeSetBlues) - 1; i >= 0; i-- {
added, err := windowHeap.tryPush(mergeSetBlues[i])
if err != nil {
return nil, err
}
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
if !added {
break
}
}
mergeSetReds := currentGHOSTDAGData.MergeSetReds()
for i := len(mergeSetReds) - 1; i >= 0; i-- {
added, err := windowHeap.tryPush(mergeSetReds[i])
if err != nil {
return nil, err
}
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
if !added {
break
}
}
current = currentGHOSTDAGData.SelectedParent()
currentGHOSTDAGData = selectedParentGHOSTDAGData
}
return windowHeap, nil
}
func (dtm *dagTraversalManager) tryPushMergeSet(windowHeap *sizedUpBlockHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData *externalapi.BlockGHOSTDAGData) (bool, error) {
added, err := windowHeap.tryPushWithGHOSTDAGData(currentGHOSTDAGData.SelectedParent(), selectedParentGHOSTDAGData)
if err != nil {
return false, err
}
// If the window is full and the selected parent is less than the minimum then we break
// because this means that there cannot be any more blocks in the past with higher blueWork
if !added {
return true, nil
}
// Now we go over the merge set.
// Remove the SP from the blue merge set because we already added it.
mergeSetBlues := currentGHOSTDAGData.MergeSetBlues()[1:]
// Go over the merge set in reverse because it's ordered in reverse by blueWork.
for i := len(mergeSetBlues) - 1; i >= 0; i-- {
added, err := windowHeap.tryPush(mergeSetBlues[i])
if err != nil {
return false, err
}
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
if !added {
break
}
}
mergeSetReds := currentGHOSTDAGData.MergeSetReds()
for i := len(mergeSetReds) - 1; i >= 0; i-- {
added, err := windowHeap.tryPush(mergeSetReds[i])
if err != nil {
return false, err
}
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
if !added {
break
}
}
return false, nil
}

View File

@@ -3,17 +3,18 @@ package parentssanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
)
type parentsManager struct {
genesisHash *externalapi.DomainHash
genesisHash *externalapi.DomainHash
maxBlockLevel int
}
// New instantiates a new ParentsManager
func New(genesisHash *externalapi.DomainHash) model.ParentsManager {
func New(genesisHash *externalapi.DomainHash, maxBlockLevel int) model.ParentsManager {
return &parentsManager{
genesisHash: genesisHash,
genesisHash: genesisHash,
maxBlockLevel: maxBlockLevel,
}
}
@@ -31,7 +32,7 @@ func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, le
}
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {
numParents := constants.MaxBlockLevel + 1
numParents := pm.maxBlockLevel + 1
parents := make([]externalapi.BlockLevelParents, numParents)
for i := 0; i < numParents; i++ {
parents[i] = pm.ParentsAtLevel(blockHeader, i)

View File

@@ -38,8 +38,8 @@ func TestPruning(t *testing.T) {
"dag-for-test-pruning.json": {
dagconfig.MainnetParams.Name: "503",
dagconfig.TestnetParams.Name: "502",
dagconfig.DevnetParams.Name: "503",
dagconfig.SimnetParams.Name: "502",
dagconfig.DevnetParams.Name: "502",
dagconfig.SimnetParams.Name: "503",
},
}

View File

@@ -995,7 +995,13 @@ func (pm *pruningManager) ExpectedHeaderPruningPoint(stagingArea *model.StagingA
return nil, err
}
if hasPruningPointInItsSelectedChain && pm.finalityScore(ghostdagData.BlueScore()) > pm.finalityScore(selectedParentPruningPointHeader.BlueScore()+pm.pruningDepth) {
// Note: the pruning point from the POV of the current block is the first block in its chain that is in depth of pm.pruningDepth and
// its finality score is greater than the previous pruning point. This is why the diff between finalityScore(selectedParent.blueScore + 1) * finalityInterval
// and the current block blue score is less than pm.pruningDepth we can know for sure that this block didn't trigger a pruning point change.
minRequiredBlueScoreForNextPruningPoint := (pm.finalityScore(selectedParentPruningPointHeader.BlueScore()) + 1) * pm.finalityInterval
if hasPruningPointInItsSelectedChain &&
minRequiredBlueScoreForNextPruningPoint+pm.pruningDepth <= ghostdagData.BlueScore() {
var suggestedLowHash *externalapi.DomainHash
hasReachabilityData, err := pm.reachabilityDataStore.HasReachabilityData(pm.databaseContext, stagingArea, selectedParentHeader.PruningPoint())
if err != nil {
@@ -1003,7 +1009,15 @@ func (pm *pruningManager) ExpectedHeaderPruningPoint(stagingArea *model.StagingA
}
if hasReachabilityData {
suggestedLowHash = selectedParentHeader.PruningPoint()
// nextPruningPointAndCandidateByBlockHash needs suggestedLowHash to be in the future of the pruning point because
// otherwise reachability selected chain data is unreliable.
isInFutureOfCurrentPruningPoint, err := pm.dagTopologyManager.IsAncestorOf(stagingArea, pruningPoint, selectedParentHeader.PruningPoint())
if err != nil {
return nil, err
}
if isInFutureOfCurrentPruningPoint {
suggestedLowHash = selectedParentHeader.PruningPoint()
}
}
nextOrCurrentPruningPoint, _, err = pm.nextPruningPointAndCandidateByBlockHash(stagingArea, blockHash, suggestedLowHash)

View File

@@ -13,7 +13,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/processes/reachabilitymanager"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/logger"
@@ -41,6 +40,7 @@ type pruningProofManager struct {
genesisHash *externalapi.DomainHash
k externalapi.KType
pruningProofM uint64
maxBlockLevel int
cachedPruningPoint *externalapi.DomainHash
cachedProof *externalapi.PruningPointProof
@@ -66,6 +66,7 @@ func New(
genesisHash *externalapi.DomainHash,
k externalapi.KType,
pruningProofM uint64,
maxBlockLevel int,
) model.PruningProofManager {
return &pruningProofManager{
@@ -86,6 +87,7 @@ func New(
genesisHash: genesisHash,
k: k,
pruningProofM: pruningProofM,
maxBlockLevel: maxBlockLevel,
}
}
@@ -134,7 +136,7 @@ func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.Stagin
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
headersByLevel := make(map[int][]externalapi.BlockHeader)
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
pruningPointLevel := pruningPointHeader.BlockLevel()
pruningPointLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel)
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
var selectedTip *externalapi.DomainHash
if blockLevel <= pruningPointLevel {
@@ -310,7 +312,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
level0Headers := pruningPointProof.Headers[0]
pruningPointHeader := level0Headers[len(level0Headers)-1]
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
pruningPointBlockLevel := pruningPointHeader.BlockLevel()
pruningPointBlockLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel)
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
if maxLevel >= len(pruningPointProof.Headers) {
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
@@ -354,9 +356,9 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
var selectedTip *externalapi.DomainHash
for i, header := range headers {
blockHash := consensushashing.HeaderHash(header)
if header.BlockLevel() < blockLevel {
if header.BlockLevel(ppm.maxBlockLevel) < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
"expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel)
}
blockHeaderStore.Stage(stagingArea, blockHash, header)
@@ -581,9 +583,9 @@ func (ppm *pruningProofManager) dagProcesses(
[]model.GHOSTDAGManager,
) {
reachabilityManagers := make([]model.ReachabilityManager, constants.MaxBlockLevel+1)
dagTopologyManagers := make([]model.DAGTopologyManager, constants.MaxBlockLevel+1)
ghostdagManagers := make([]model.GHOSTDAGManager, constants.MaxBlockLevel+1)
reachabilityManagers := make([]model.ReachabilityManager, ppm.maxBlockLevel+1)
dagTopologyManagers := make([]model.DAGTopologyManager, ppm.maxBlockLevel+1)
ghostdagManagers := make([]model.GHOSTDAGManager, ppm.maxBlockLevel+1)
for i := 0; i <= maxLevel; i++ {
reachabilityManagers[i] = reachabilitymanager.New(
@@ -627,9 +629,9 @@ func (ppm *pruningProofManager) ApplyPruningPointProof(pruningPointProof *extern
stagingArea := model.NewStagingArea()
blockHash := consensushashing.HeaderHash(header)
if header.BlockLevel() < blockLevel {
if header.BlockLevel(ppm.maxBlockLevel) < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
"expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel)
}
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)

View File

@@ -145,7 +145,7 @@ func (sm *syncManager) missingBlockBodyHashes(stagingArea *model.StagingArea, hi
lowHash = selectedChild
}
if !foundHeaderOnlyBlock {
if lowHash == highHash {
if lowHash.Equal(highHash) {
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
// In these cases - return an empty list of blocks to sync
@@ -153,7 +153,7 @@ func (sm *syncManager) missingBlockBodyHashes(stagingArea *model.StagingArea, hi
}
// TODO: Once block children are fixed (https://github.com/kaspanet/kaspad/issues/1499),
// this error should be returned rather the logged
log.Errorf("no header-only blocks between %s and %s",
log.Errorf("No header-only blocks between %s and %s",
lowHash, highHash)
}

View File

@@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
)
type syncManager struct {
@@ -69,6 +70,21 @@ func (sm *syncManager) GetHashesBetween(stagingArea *model.StagingArea, lowHash,
return sm.antiPastHashesBetween(stagingArea, lowHash, highHash, maxBlocks)
}
func (sm *syncManager) GetAnticone(stagingArea *model.StagingArea, blockHash, contextHash *externalapi.DomainHash, maxBlocks uint64) (hashes []*externalapi.DomainHash, err error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "GetAnticone")
defer onEnd()
isContextAncestorOfBlock, err := sm.dagTopologyManager.IsAncestorOf(stagingArea, contextHash, blockHash)
if err != nil {
return nil, err
}
if isContextAncestorOfBlock {
return nil, errors.Errorf("expected block %s to not be in future of %s",
blockHash,
contextHash)
}
return sm.dagTraversalManager.AnticoneFromBlocks(stagingArea, []*externalapi.DomainHash{contextHash}, blockHash, maxBlocks)
}
func (sm *syncManager) GetMissingBlockBodyHashes(stagingArea *model.StagingArea, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "GetMissingBlockBodyHashes")
defer onEnd()

View File

@@ -2,6 +2,9 @@ package consensus
import (
"encoding/json"
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"io"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
@@ -108,13 +111,13 @@ func (tc *testConsensus) AddUTXOInvalidBlock(parentHashes []*externalapi.DomainH
return consensushashing.BlockHash(block), virtualChangeSet, nil
}
func (tc *testConsensus) MineJSON(r io.Reader, blockType testapi.MineJSONBlockType) (tips []*externalapi.DomainHash, err error) {
// jsonBlock is a json representation of a block in mine format
type jsonBlock struct {
ID string `json:"id"`
Parents []string `json:"parents"`
}
// jsonBlock is a json representation of a block in mine format
type jsonBlock struct {
ID string `json:"id"`
Parents []string `json:"parents"`
}
func (tc *testConsensus) MineJSON(r io.Reader, blockType testapi.MineJSONBlockType) (tips []*externalapi.DomainHash, err error) {
tipSet := map[externalapi.DomainHash]*externalapi.DomainHash{}
tipSet[*tc.dagParams.GenesisHash] = tc.dagParams.GenesisHash
@@ -182,6 +185,64 @@ func (tc *testConsensus) MineJSON(r io.Reader, blockType testapi.MineJSONBlockTy
return tips, nil
}
func (tc *testConsensus) ToJSON(w io.Writer) error {
hashToID := make(map[externalapi.DomainHash]string)
lastID := 0
encoder := json.NewEncoder(w)
visited := hashset.New()
queue := tc.dagTraversalManager.NewUpHeap(model.NewStagingArea())
err := queue.Push(tc.dagParams.GenesisHash)
if err != nil {
return err
}
blocksToAdd := make([]jsonBlock, 0)
for queue.Len() > 0 {
current := queue.Pop()
if visited.Contains(current) {
continue
}
visited.Add(current)
if current.Equal(model.VirtualBlockHash) {
continue
}
header, err := tc.blockHeaderStore.BlockHeader(tc.databaseContext, model.NewStagingArea(), current)
if err != nil {
return err
}
directParents := header.DirectParents()
parentIDs := make([]string, len(directParents))
for i, parent := range directParents {
parentIDs[i] = hashToID[*parent]
}
lastIDStr := fmt.Sprintf("%d", lastID)
blocksToAdd = append(blocksToAdd, jsonBlock{
ID: lastIDStr,
Parents: parentIDs,
})
hashToID[*current] = lastIDStr
lastID++
children, err := tc.dagTopologyManagers[0].Children(model.NewStagingArea(), current)
if err != nil {
return err
}
err = queue.PushSlice(children)
if err != nil {
return err
}
}
return encoder.Encode(blocksToAdd)
}
func (tc *testConsensus) BuildUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, error) {
// Require write lock because BuildBlockWithParents stages temporary data
tc.lock.Lock()

View File

@@ -179,9 +179,9 @@ func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader {
return bh.clone()
}
func (bh *blockHeader) BlockLevel() int {
func (bh *blockHeader) BlockLevel(maxBlockLevel int) int {
if !bh.isBlockLevelCached {
bh.blockLevel = pow.BlockLevel(bh)
bh.blockLevel = pow.BlockLevel(bh, maxBlockLevel)
bh.isBlockLevelCached = true
}

View File

@@ -35,9 +35,4 @@ const (
// LockTimeThreshold is the number below which a lock time is
// interpreted to be a DAA score.
LockTimeThreshold = 5e11 // Tue Nov 5 00:53:20 1985 UTC
// MaxBlockLevel is the maximum possible block level.
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
// This means that any block that has a level lower or equal to genesis will be level 0.
MaxBlockLevel = 225
)

View File

@@ -0,0 +1,79 @@
package lrucachehashandwindowsizetoblockghostdagdatahashpairs
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
type lruKey struct {
blockHash externalapi.DomainHash
windowSize int
}
func newKey(blockHash *externalapi.DomainHash, windowSize int) lruKey {
return lruKey{
blockHash: *blockHash,
windowSize: windowSize,
}
}
// LRUCache is a least-recently-used cache from
// lruKey to *externalapi.BlockGHOSTDAGDataHashPair
type LRUCache struct {
cache map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair
capacity int
}
// New creates a new LRUCache
func New(capacity int, preallocate bool) *LRUCache {
var cache map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair
if preallocate {
cache = make(map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair, capacity+1)
} else {
cache = make(map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair)
}
return &LRUCache{
cache: cache,
capacity: capacity,
}
}
// Add adds an entry to the LRUCache
func (c *LRUCache) Add(blockHash *externalapi.DomainHash, windowSize int, value []*externalapi.BlockGHOSTDAGDataHashPair) {
key := newKey(blockHash, windowSize)
c.cache[key] = value
if len(c.cache) > c.capacity {
c.evictRandom()
}
}
// Get returns the entry for the given key, or (nil, false) otherwise
func (c *LRUCache) Get(blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, bool) {
key := newKey(blockHash, windowSize)
value, ok := c.cache[key]
if !ok {
return nil, false
}
return value, true
}
// Has returns whether the LRUCache contains the given key
func (c *LRUCache) Has(blockHash *externalapi.DomainHash, windowSize int) bool {
key := newKey(blockHash, windowSize)
_, ok := c.cache[key]
return ok
}
// Remove removes the entry for the the given key. Does nothing if
// the entry does not exist
func (c *LRUCache) Remove(blockHash *externalapi.DomainHash, windowSize int) {
key := newKey(blockHash, windowSize)
delete(c.cache, key)
}
func (c *LRUCache) evictRandom() {
var keyToEvict lruKey
for key := range c.cache {
keyToEvict = key
break
}
c.Remove(&keyToEvict.blockHash, keyToEvict.windowSize)
}

View File

@@ -3,7 +3,6 @@ package pow
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
"github.com/kaspanet/kaspad/domain/consensus/utils/serialization"
"github.com/kaspanet/kaspad/util/difficulty"
@@ -96,15 +95,15 @@ func toBig(hash *externalapi.DomainHash) *big.Int {
}
// BlockLevel returns the block level of the given header.
func BlockLevel(header externalapi.BlockHeader) int {
func BlockLevel(header externalapi.BlockHeader, maxBlockLevel int) int {
// Genesis is defined to be the root of all blocks at all levels, so we define it to be the maximal
// block level.
if len(header.DirectParents()) == 0 {
return constants.MaxBlockLevel
return maxBlockLevel
}
proofOfWorkValue := NewState(header.ToMutable()).CalculateProofOfWorkValue()
level := constants.MaxBlockLevel - proofOfWorkValue.BitLen()
level := maxBlockLevel - proofOfWorkValue.BitLen()
// If the block has a level lower than genesis make it zero.
if level < 0 {
level = 0

View File

@@ -185,6 +185,9 @@ type Params struct {
DisallowDirectBlocksOnTopOfGenesis bool
IgnoreHeaderMass bool
// MaxBlockLevel is the maximum possible block level.
MaxBlockLevel int
}
// NormalizeRPCServerAddress returns addr with the current network default
@@ -279,16 +282,20 @@ var MainnetParams = Params{
PruningProofM: defaultPruningProofM,
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
DisallowDirectBlocksOnTopOfGenesis: true,
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
// This means that any block that has a level lower or equal to genesis will be level 0.
MaxBlockLevel: 225,
}
// TestnetParams defines the network parameters for the test Kaspa network.
var TestnetParams = Params{
K: defaultGHOSTDAGK,
Name: "kaspa-testnet-8",
Name: "kaspa-testnet-9",
Net: appmessage.Testnet,
RPCPort: "16210",
DefaultPort: "16211",
DNSSeeds: []string{"testnet-8-dnsseed.daglabs-dev.com"},
DNSSeeds: []string{"testnet-9-dnsseed.daglabs-dev.com"},
// DAG parameters
GenesisBlock: &testnetGenesisBlock,
@@ -339,6 +346,8 @@ var TestnetParams = Params{
PruningProofM: defaultPruningProofM,
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
IgnoreHeaderMass: true,
MaxBlockLevel: 250,
}
// SimnetParams defines the network parameters for the simulation test Kaspa
@@ -402,6 +411,8 @@ var SimnetParams = Params{
CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength,
PruningProofM: defaultPruningProofM,
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
MaxBlockLevel: 250,
}
// DevnetParams defines the network parameters for the development Kaspa network.
@@ -462,6 +473,8 @@ var DevnetParams = Params{
PruningProofM: defaultPruningProofM,
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
IgnoreHeaderMass: true,
MaxBlockLevel: 250,
}
// ErrDuplicateNet describes an error where the parameters for a Kaspa

View File

@@ -135,7 +135,7 @@ func (btb *blockTemplateBuilder) GetBlockTemplate(coinbaseData *consensusexterna
invalidTxsErr := ruleerrors.ErrInvalidTransactionsInNewBlock{}
if errors.As(err, &invalidTxsErr) {
log.Criticalf("consensusReference.Consensus().BuildBlock returned invalid txs in GetBlockTemplate: %s", err)
log.Criticalf("consensusReference.Consensus().BuildBlock returned invalid txs in GetBlockTemplate")
invalidTxs := make([]*consensusexternalapi.DomainTransaction, 0, len(invalidTxsErr.InvalidTransactions))
for _, tx := range invalidTxsErr.InvalidTransactions {
invalidTxs = append(invalidTxs, tx.Transaction)

View File

@@ -18,7 +18,7 @@ func (tobf *TransactionsOrderedByFeeRate) GetByIndex(index int) *MempoolTransact
// Push inserts a transaction into the set, placing it in the correct place to preserve order
func (tobf *TransactionsOrderedByFeeRate) Push(transaction *MempoolTransaction) error {
index, err := tobf.findTransactionIndex(transaction)
index, _, err := tobf.findTransactionIndex(transaction)
if err != nil {
return err
}
@@ -29,18 +29,21 @@ func (tobf *TransactionsOrderedByFeeRate) Push(transaction *MempoolTransaction)
return nil
}
// ErrTransactionNotFound is returned bt tobf.TransactionsOrderedByFeeRate
var ErrTransactionNotFound = errors.New("Couldn't find transaction in mp.orderedTransactionsByFeeRate")
// Remove removes the given transaction from the set.
// Returns an error if transaction does not exist in the set, or if the given transaction does not have mass
// and fee filled in.
func (tobf *TransactionsOrderedByFeeRate) Remove(transaction *MempoolTransaction) error {
index, err := tobf.findTransactionIndex(transaction)
index, wasFound, err := tobf.findTransactionIndex(transaction)
if err != nil {
return err
}
txID := transaction.TransactionID()
if !tobf.slice[index].TransactionID().Equal(txID) {
return errors.Errorf("Couldn't find %s in mp.orderedTransactionsByFeeRate", txID)
if !wasFound {
return errors.Wrapf(ErrTransactionNotFound,
"Couldn't find %s in mp.orderedTransactionsByFeeRate", transaction.TransactionID())
}
return tobf.RemoveAtIndex(index)
@@ -56,15 +59,18 @@ func (tobf *TransactionsOrderedByFeeRate) RemoveAtIndex(index int) error {
return nil
}
func (tobf *TransactionsOrderedByFeeRate) findTransactionIndex(transaction *MempoolTransaction) (int, error) {
// findTransactionIndex finds the given transaction inside the list of transactions ordered by fee rate.
// If the transaction was not found, will return wasFound=false and index=the index at which transaction can be inserted
// while preserving the order.
func (tobf *TransactionsOrderedByFeeRate) findTransactionIndex(transaction *MempoolTransaction) (index int, wasFound bool, err error) {
if transaction.Transaction().Fee == 0 || transaction.Transaction().Mass == 0 {
return 0, errors.Errorf("findTxIndexInOrderedTransactionsByFeeRate expects a transaction with " +
return 0, false, errors.Errorf("findTransactionIndex expects a transaction with " +
"populated fee and mass")
}
txID := transaction.TransactionID()
txFeeRate := float64(transaction.Transaction().Fee) / float64(transaction.Transaction().Mass)
return sort.Search(len(tobf.slice), func(i int) bool {
index = sort.Search(len(tobf.slice), func(i int) bool {
iElement := tobf.slice[i]
elementFeeRate := float64(iElement.Transaction().Fee) / float64(iElement.Transaction().Mass)
if elementFeeRate > txFeeRate {
@@ -76,5 +82,10 @@ func (tobf *TransactionsOrderedByFeeRate) findTransactionIndex(transaction *Memp
}
return false
}), nil
})
wasFound = index != len(tobf.slice) && // sort.Search returns len(tobf.slice) if nothing was found
tobf.slice[index].TransactionID().Equal(transaction.TransactionID())
return index, wasFound, nil
}

View File

@@ -3,6 +3,8 @@ package mempool
import (
"time"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool/model"
)
@@ -79,7 +81,12 @@ func (tp *transactionsPool) removeTransaction(transaction *model.MempoolTransact
err := tp.transactionsOrderedByFeeRate.Remove(transaction)
if err != nil {
return err
if errors.Is(err, model.ErrTransactionNotFound) {
log.Errorf("Transaction %s not found in tp.transactionsOrderedByFeeRate. This should never happen but sometime does",
transaction.TransactionID())
} else {
return err
}
}
delete(tp.highPriorityTransactions, *transaction.TransactionID())

View File

@@ -52,7 +52,7 @@ const (
defaultSigCacheMaxSize = 100000
sampleConfigFilename = "sample-kaspad.conf"
defaultMaxUTXOCacheSize = 5000000000
defaultProtocolVersion = 4
defaultProtocolVersion = 5
)
var (

View File

@@ -47,6 +47,10 @@ message KaspadMessage {
ReadyMessage ready = 50;
BlockWithTrustedDataV4Message blockWithTrustedDataV4 = 51;
TrustedDataMessage trustedData = 52;
RequestIBDChainBlockLocatorMessage requestIBDChainBlockLocator = 53;
IbdChainBlockLocatorMessage ibdChainBlockLocator = 54;
RequestAnticoneMessage requestAnticone = 55;
RequestNextPruningPointAndItsAnticoneBlocksMessage requestNextPruningPointAndItsAnticoneBlocks = 56;
GetCurrentNetworkRequestMessage getCurrentNetworkRequest = 1001;
GetCurrentNetworkResponseMessage getCurrentNetworkResponse = 1002;

Some files were not shown because too many files have changed in this diff Show More