mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 19:22:53 +00:00
Compare commits
2 Commits
pp-anticon
...
patch2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef1a3c0dce | ||
|
|
1cedc720ac |
@@ -69,10 +69,6 @@ const (
|
||||
CmdReady
|
||||
CmdTrustedData
|
||||
CmdBlockWithTrustedDataV4
|
||||
CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||
CmdRequestIBDChainBlockLocator
|
||||
CmdIBDChainBlockLocator
|
||||
CmdRequestAnticone
|
||||
|
||||
// rpc
|
||||
CmdGetCurrentNetworkRequestMessage
|
||||
@@ -199,10 +195,6 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdReady: "Ready",
|
||||
CmdTrustedData: "TrustedData",
|
||||
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
|
||||
CmdRequestNextPruningPointAndItsAnticoneBlocks: "RequestNextPruningPointAndItsAnticoneBlocks",
|
||||
CmdRequestIBDChainBlockLocator: "RequestIBDChainBlockLocator",
|
||||
CmdIBDChainBlockLocator: "IBDChainBlockLocator",
|
||||
CmdRequestAnticone: "RequestAnticone",
|
||||
}
|
||||
|
||||
// RPCMessageCommandToString maps all MessageCommands to their string representation
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||
// locator message. It is used to find the blockLocator of a peer that is
|
||||
// syncing with you.
|
||||
type MsgIBDChainBlockLocator struct {
|
||||
baseMessage
|
||||
BlockLocatorHashes []*externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgIBDChainBlockLocator) Command() MessageCommand {
|
||||
return CmdIBDChainBlockLocator
|
||||
}
|
||||
|
||||
// NewMsgIBDChainBlockLocator returns a new kaspa locator message that conforms to
|
||||
// the Message interface. See MsgBlockLocator for details.
|
||||
func NewMsgIBDChainBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgIBDChainBlockLocator {
|
||||
return &MsgIBDChainBlockLocator{
|
||||
BlockLocatorHashes: locatorHashes,
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgRequestAnticone implements the Message interface and represents a kaspa
|
||||
// RequestHeaders message. It is used to request the set past(ContextHash) \cap anticone(BlockHash)
|
||||
type MsgRequestAnticone struct {
|
||||
baseMessage
|
||||
BlockHash *externalapi.DomainHash
|
||||
ContextHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestAnticone) Command() MessageCommand {
|
||||
return CmdRequestAnticone
|
||||
}
|
||||
|
||||
// NewMsgRequestAnticone returns a new kaspa RequestPastDiff message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgRequestAnticone(blockHash, contextHash *externalapi.DomainHash) *MsgRequestAnticone {
|
||||
return &MsgRequestAnticone{
|
||||
BlockHash: blockHash,
|
||||
ContextHash: contextHash,
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgRequestIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||
// IBDRequestChainBlockLocator message. It is used to request a block locator between low
|
||||
// and high hash.
|
||||
// The locator is returned via a locator message (MsgIBDChainBlockLocator).
|
||||
type MsgRequestIBDChainBlockLocator struct {
|
||||
baseMessage
|
||||
HighHash *externalapi.DomainHash
|
||||
LowHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestIBDChainBlockLocator) Command() MessageCommand {
|
||||
return CmdRequestIBDChainBlockLocator
|
||||
}
|
||||
|
||||
// NewMsgIBDRequestChainBlockLocator returns a new IBDRequestChainBlockLocator message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgIBDRequestChainBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestIBDChainBlockLocator {
|
||||
return &MsgRequestIBDChainBlockLocator{
|
||||
HighHash: highHash,
|
||||
LowHash: lowHash,
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// MsgRequestNextPruningPointAndItsAnticoneBlocks implements the Message interface and represents a kaspa
|
||||
// RequestNextPruningPointAndItsAnticoneBlocks message. It is used to notify the IBD syncer peer to send
|
||||
// more blocks from the pruning anticone.
|
||||
//
|
||||
// This message has no payload.
|
||||
type MsgRequestNextPruningPointAndItsAnticoneBlocks struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestNextPruningPointAndItsAnticoneBlocks) Command() MessageCommand {
|
||||
return CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||
}
|
||||
|
||||
// NewMsgRequestNextPruningPointAndItsAnticoneBlocks returns a new kaspa RequestNextPruningPointAndItsAnticoneBlocks message that conforms to the
|
||||
// Message interface.
|
||||
func NewMsgRequestNextPruningPointAndItsAnticoneBlocks() *MsgRequestNextPruningPointAndItsAnticoneBlocks {
|
||||
return &MsgRequestNextPruningPointAndItsAnticoneBlocks{}
|
||||
}
|
||||
@@ -150,7 +150,7 @@ func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
||||
return false
|
||||
}
|
||||
f.ibdPeer = ibdPeer
|
||||
log.Infof("IBD started with peer %s", ibdPeer)
|
||||
log.Infof("IBD started")
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package flowcontext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
@@ -10,11 +9,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPingTimeout signifies that a ping operation timed out.
|
||||
ErrPingTimeout = protocolerrors.New(false, "timeout expired on ping")
|
||||
)
|
||||
|
||||
// HandleError handles an error from a flow,
|
||||
// It sends the error to errChan if isStopping == 0 and increments isStopping
|
||||
//
|
||||
@@ -27,15 +21,8 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||
panic(err)
|
||||
}
|
||||
if errors.Is(err, ErrPingTimeout) {
|
||||
// Avoid printing the call stack on ping timeouts, since users get panicked and this case is not interesting
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
} else {
|
||||
// Explain to the user that this is not a panic, but only a protocol error with a specific peer
|
||||
logFrame := strings.Repeat("=", 52)
|
||||
log.Errorf("Non-critical peer protocol error from %s, printing the full stack for debug purposes: \n%s\n%+v \n%s",
|
||||
flowName, logFrame, err, logFrame)
|
||||
}
|
||||
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
}
|
||||
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
|
||||
@@ -20,7 +20,7 @@ var (
|
||||
// connected peer may support.
|
||||
minAcceptableProtocolVersion = uint32(4)
|
||||
|
||||
maxAcceptableProtocolVersion = uint32(5)
|
||||
maxAcceptableProtocolVersion = uint32(4)
|
||||
)
|
||||
|
||||
type receiveVersionFlow struct {
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
|
||||
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
|
||||
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
|
||||
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
|
||||
if ibdBatchSize > router.DefaultMaxMessages {
|
||||
t.Fatalf("IBD batch size (%d) must be smaller than or equal to router.DefaultMaxMessages (%d)",
|
||||
ibdBatchSize, router.DefaultMaxMessages)
|
||||
}
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found (v4)", hash)
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
@@ -132,10 +131,6 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
@@ -158,33 +153,11 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
oldVirtualParents := hashset.New()
|
||||
for _, parent := range oldVirtualInfo.ParentHashes {
|
||||
oldVirtualParents.Add(parent)
|
||||
}
|
||||
|
||||
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
log.Debugf("Relaying block %s", inv.Hash)
|
||||
err = flow.relayBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, parent := range newVirtualInfo.ParentHashes {
|
||||
if oldVirtualParents.Contains(parent) {
|
||||
continue
|
||||
}
|
||||
|
||||
block, err := flow.Domain().Consensus().GetBlock(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("Relaying block %s", blockHash)
|
||||
err = flow.relayBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
@@ -285,10 +258,7 @@ func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([
|
||||
if errors.As(err, missingParentsError) {
|
||||
return missingParentsError.MissingParentHashes, nil, nil
|
||||
}
|
||||
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
|
||||
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
}
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
return nil, virtualChangeSet, nil
|
||||
@@ -399,10 +369,6 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
|
||||
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
||||
}
|
||||
|
||||
if len(orphanRoots) == 0 {
|
||||
// In some rare cases we get here when there are no orphan roots already
|
||||
return nil
|
||||
}
|
||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
||||
|
||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
||||
|
||||
@@ -10,9 +10,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
|
||||
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
|
||||
const ibdBatchSize = 100
|
||||
const ibdBatchSize = router.DefaultMaxMessages
|
||||
|
||||
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestHeadersContext interface {
|
||||
|
||||
@@ -13,9 +13,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
"github.com/pkg/errors"
|
||||
"math/big"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -66,34 +64,6 @@ func (flow *handleIBDFlow) start() error {
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
||||
highHash := consensushashing.BlockHash(block)
|
||||
|
||||
// Temp code to avoid IBD from lagging nodes publishing their side-chain. This patch
|
||||
// is applied only to p2p v4 since the implemented IBD negotiation has quadratic complexity in this worst-case.
|
||||
// See IBD logic of p2p v5 for further details.
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err == nil {
|
||||
virtualSelectedParentHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualSelectedParent)
|
||||
if err == nil {
|
||||
// We first check that DAA score of the relay block is at distance of more than DAA window size.
|
||||
// This indicates a side-chain which is not in the future of any block in the current virtual DAA window.
|
||||
if virtualSelectedParentHeader.DAAScore() > block.Header.DAAScore()+2641 {
|
||||
// We then find the 'unit' of current virtual difficulty. We check if the relay block is at least
|
||||
// at distance of 180 such units. This signals another condition for a pow-weak side-chain.
|
||||
virtualDifficulty := difficulty.CalcWork(virtualSelectedParentHeader.Bits())
|
||||
var virtualSub, difficultyMul big.Int
|
||||
if difficultyMul.Mul(virtualDifficulty, big.NewInt(180)).
|
||||
Cmp(virtualSub.Sub(virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())) < 0 {
|
||||
log.Criticalf("Avoiding IBD triggered by relay %s with %d DAA score diff and lower blue work (%d, %d)",
|
||||
highHash,
|
||||
virtualSelectedParentHeader.DAAScore()-block.Header.DAAScore(),
|
||||
virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
||||
if !wasIBDNotRunning {
|
||||
log.Debugf("IBD is already running")
|
||||
@@ -106,14 +76,36 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
}()
|
||||
|
||||
log.Infof("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
log.Infof("Syncing blocks up to %s", highHash)
|
||||
log.Infof("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highHash := consensushashing.BlockHash(block)
|
||||
log.Criticalf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
log.Criticalf("Syncing blocks up to %s", highHash)
|
||||
log.Criticalf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
log.Criticalf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
checkpoint, err := externalapi.NewDomainHashFromString("05ff0f2e1d201dcaee7c5e567cc2c1d42ca3cce9fefbd3b519dc68b5bb89d0b9")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := flow.Domain().Consensus().GetBlockInfo(checkpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.Exists {
|
||||
isInSelectedParentChainOf, err := flow.Domain().Consensus().IsInSelectedParentChainOf(checkpoint, highestSharedBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !isInSelectedParentChainOf {
|
||||
log.Criticalf("Stopped IBD because the checkpoint %s is not in the selected chain of %s", checkpoint, highestSharedBlockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
|
||||
if err != nil {
|
||||
@@ -337,6 +329,7 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
}
|
||||
})
|
||||
|
||||
count := 0
|
||||
for {
|
||||
select {
|
||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||
@@ -353,7 +346,7 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
return nil
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
_, err := flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -394,7 +387,7 @@ func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeader
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) (bool, error) {
|
||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||
block := &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
@@ -404,27 +397,26 @@ func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlo
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
return false, errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
||||
} else {
|
||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||
return false, protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
|
||||
|
||||
@@ -10,10 +10,6 @@ type ibdProgressReporter struct {
|
||||
}
|
||||
|
||||
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
|
||||
if highDAAScore <= lowDAAScore {
|
||||
// Avoid a zero or negative diff
|
||||
highDAAScore = lowDAAScore + 1
|
||||
}
|
||||
return &ibdProgressReporter{
|
||||
lowDAAScore: lowDAAScore,
|
||||
highDAAScore: highDAAScore,
|
||||
@@ -27,16 +23,7 @@ func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName
|
||||
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
|
||||
ipr.processed += processedDelta
|
||||
|
||||
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
|
||||
if highestProcessedDAAScore > ipr.highDAAScore {
|
||||
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
|
||||
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
relativeDAAScore := uint64(0)
|
||||
if highestProcessedDAAScore > ipr.lowDAAScore {
|
||||
// Avoid a negative diff
|
||||
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
relativeDAAScore := highestProcessedDAAScore - ipr.lowDAAScore
|
||||
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
|
||||
if progressPercent > ipr.lastReportedProgressPercent {
|
||||
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
|
||||
|
||||
@@ -24,7 +24,6 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash,
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus.", flow.peer)
|
||||
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
||||
if deleteStagingConsensusErr != nil {
|
||||
return deleteStagingConsensusErr
|
||||
@@ -33,8 +32,6 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash,
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
|
||||
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
|
||||
err = flow.Domain().CommitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -347,7 +344,6 @@ func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consens
|
||||
log.Info("Fetching the pruning point UTXO set")
|
||||
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
||||
if err != nil {
|
||||
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@ package ping
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
@@ -63,9 +61,6 @@ func (flow *sendPingsFlow) start() error {
|
||||
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
if errors.Is(err, router.ErrTimeout) {
|
||||
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
pongMessage := message.(*appmessage.MsgPong)
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
package addressexchange
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// ReceiveAddressesContext is the interface for the context needed for the ReceiveAddresses flow.
|
||||
type ReceiveAddressesContext interface {
|
||||
AddressManager() *addressmanager.AddressManager
|
||||
}
|
||||
|
||||
// ReceiveAddresses asks a peer for more addresses if needed.
|
||||
func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||
peer *peerpkg.Peer) error {
|
||||
|
||||
subnetworkID := peer.SubnetworkID()
|
||||
msgGetAddresses := appmessage.NewMsgRequestAddresses(false, subnetworkID)
|
||||
err := outgoingRoute.Enqueue(msgGetAddresses)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
message, err := incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgAddresses := message.(*appmessage.MsgAddresses)
|
||||
if len(msgAddresses.AddressList) > addressmanager.GetAddressesMax {
|
||||
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
|
||||
}
|
||||
|
||||
return context.AddressManager().AddAddresses(msgAddresses.AddressList...)
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
package addressexchange
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// SendAddressesContext is the interface for the context needed for the SendAddresses flow.
|
||||
type SendAddressesContext interface {
|
||||
AddressManager() *addressmanager.AddressManager
|
||||
}
|
||||
|
||||
// SendAddresses sends addresses to a peer that requests it.
|
||||
func SendAddresses(context SendAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
|
||||
for {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addresses := context.AddressManager().Addresses()
|
||||
msgAddresses := appmessage.NewMsgAddresses(shuffleAddresses(addresses))
|
||||
|
||||
err = outgoingRoute.Enqueue(msgAddresses)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shuffleAddresses randomizes the given addresses sent if there are more than the maximum allowed in one message.
|
||||
func shuffleAddresses(addresses []*appmessage.NetAddress) []*appmessage.NetAddress {
|
||||
addressCount := len(addresses)
|
||||
|
||||
if addressCount < appmessage.MaxAddressesPerMsg {
|
||||
return addresses
|
||||
}
|
||||
|
||||
shuffleAddresses := make([]*appmessage.NetAddress, addressCount)
|
||||
copy(shuffleAddresses, addresses)
|
||||
|
||||
rand.Shuffle(addressCount, func(i, j int) {
|
||||
shuffleAddresses[i], shuffleAddresses[j] = shuffleAddresses[j], shuffleAddresses[i]
|
||||
})
|
||||
|
||||
// Truncate it to the maximum size.
|
||||
shuffleAddresses = shuffleAddresses[:appmessage.MaxAddressesPerMsg]
|
||||
return shuffleAddresses
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
|
||||
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
|
||||
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
|
||||
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
|
||||
if ibdBatchSize >= router.DefaultMaxMessages {
|
||||
t.Fatalf("IBD batch size (%d) must be smaller than router.DefaultMaxMessages (%d)",
|
||||
ibdBatchSize, router.DefaultMaxMessages)
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.DomainHash, limit uint32) error {
|
||||
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, limit)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
case *appmessage.MsgBlockLocator:
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleIBDBlockLocatorContext is the interface for the context needed for the HandleIBDBlockLocator flow.
|
||||
type HandleIBDBlockLocatorContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandleIBDBlockLocator listens to appmessage.MsgIBDBlockLocator messages and sends
|
||||
// the highest known block that's in the selected parent chain of `targetHash` to the
|
||||
// requesting peer.
|
||||
func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
for {
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ibdBlockLocatorMessage := message.(*appmessage.MsgIBDBlockLocator)
|
||||
|
||||
targetHash := ibdBlockLocatorMessage.TargetHash
|
||||
log.Debugf("Received IBDBlockLocator from %s with targetHash %s", peer, targetHash)
|
||||
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(targetHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
|
||||
"with an unknown targetHash %s", targetHash)
|
||||
}
|
||||
|
||||
foundHighestHashInTheSelectedParentChainOfTargetHash := false
|
||||
for _, blockLocatorHash := range ibdBlockLocatorMessage.BlockLocatorHashes {
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(blockLocatorHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The IBD block locator is checking only existing blocks with bodies.
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
continue
|
||||
}
|
||||
|
||||
isBlockLocatorHashInSelectedParentChainOfHighHash, err :=
|
||||
context.Domain().Consensus().IsInSelectedParentChainOf(blockLocatorHash, targetHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isBlockLocatorHashInSelectedParentChainOfHighHash {
|
||||
continue
|
||||
}
|
||||
|
||||
foundHighestHashInTheSelectedParentChainOfTargetHash = true
|
||||
log.Debugf("Found a known hash %s amongst peer %s's "+
|
||||
"blockLocator that's in the selected parent chain of targetHash %s", blockLocatorHash, peer, targetHash)
|
||||
|
||||
ibdBlockLocatorHighestHashMessage := appmessage.NewMsgIBDBlockLocatorHighestHash(blockLocatorHash)
|
||||
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if !foundHighestHashInTheSelectedParentChainOfTargetHash {
|
||||
log.Warnf("no hash was found in the blockLocator "+
|
||||
"that was in the selected parent chain of targetHash %s", targetHash)
|
||||
|
||||
ibdBlockLocatorHighestHashNotFoundMessage := appmessage.NewMsgIBDBlockLocatorHighestHashNotFound()
|
||||
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashNotFoundMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleIBDBlockRequestsContext is the interface for the context needed for the HandleIBDBlockRequests flow.
|
||||
type HandleIBDBlockRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandleIBDBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
|
||||
// their corresponding blocks to the requesting peer.
|
||||
func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
for {
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgRequestIBDBlocks := message.(*appmessage.MsgRequestIBDBlocks)
|
||||
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
|
||||
for i, hash := range msgRequestIBDBlocks.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found (v5)", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
||||
ibdBlockMessage := appmessage.NewMsgIBDBlock(blockMessage)
|
||||
err = outgoingRoute.Enqueue(ibdBlockMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("sent %d out of %d", i+1, len(msgRequestIBDBlocks.Hashes))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RequestIBDChainBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
|
||||
type RequestIBDChainBlockLocatorContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestIBDChainBlockLocatorFlow struct {
|
||||
RequestIBDChainBlockLocatorContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestIBDChainBlockLocator handles getBlockLocator messages
|
||||
func HandleRequestIBDChainBlockLocator(context RequestIBDChainBlockLocatorContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestIBDChainBlockLocatorFlow{
|
||||
RequestIBDChainBlockLocatorContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) start() error {
|
||||
for {
|
||||
highHash, lowHash, err := flow.receiveRequestIBDChainBlockLocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received getIBDChainBlockLocator with highHash: %s, lowHash: %s", highHash, lowHash)
|
||||
|
||||
var locator externalapi.BlockLocator
|
||||
if highHash == nil || lowHash == nil {
|
||||
locator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
} else {
|
||||
locator, err = flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
// The chain has been modified, signal it by sending an empty locator
|
||||
locator, err = externalapi.BlockLocator{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Debugf("Received error from CreateHeadersSelectedChainBlockLocator: %s", err)
|
||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||
"locator between %s and %s", lowHash, highHash)
|
||||
}
|
||||
|
||||
err = flow.sendIBDChainBlockLocator(locator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) receiveRequestIBDChainBlockLocator() (highHash, lowHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgGetBlockLocator := message.(*appmessage.MsgRequestIBDChainBlockLocator)
|
||||
|
||||
return msgGetBlockLocator.HighHash, msgGetBlockLocator.LowHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) sendIBDChainBlockLocator(locator externalapi.BlockLocator) error {
|
||||
msgIBDChainBlockLocator := appmessage.NewMsgIBDChainBlockLocator(locator)
|
||||
err := flow.outgoingRoute.Enqueue(msgIBDChainBlockLocator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
|
||||
type PruningPointAndItsAnticoneRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
var isBusy uint32
|
||||
|
||||
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
|
||||
// the pruning point and its anticone to the requesting peer.
|
||||
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
err := func() error {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
|
||||
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
|
||||
}
|
||||
defer atomic.StoreUint32(&isBusy, 0)
|
||||
|
||||
log.Debugf("Got request for pruning point and its anticone from %s", peer)
|
||||
|
||||
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Criticalf("Pruning point anticone size is %d", len(pruningPointHeaders))
|
||||
|
||||
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
|
||||
for i, header := range pruningPointHeaders {
|
||||
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
windowSize := context.Config().NetParams().DifficultyAdjustmentWindowSize
|
||||
daaWindowBlocks := make([]*externalapi.TrustedDataDataDAAHeader, 0, windowSize)
|
||||
daaWindowHashesToIndex := make(map[externalapi.DomainHash]int, windowSize)
|
||||
trustedDataDAABlockIndexes := make(map[externalapi.DomainHash][]uint64)
|
||||
|
||||
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, 0)
|
||||
ghostdagDataHashToIndex := make(map[externalapi.DomainHash]int)
|
||||
trustedDataGHOSTDAGDataIndexes := make(map[externalapi.DomainHash][]uint64)
|
||||
for _, blockHash := range pointAndItsAnticone {
|
||||
blockDAAWindowHashes, err := context.Domain().Consensus().BlockDAAWindowHashes(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trustedDataDAABlockIndexes[*blockHash] = make([]uint64, 0, windowSize)
|
||||
for i, daaBlockHash := range blockDAAWindowHashes {
|
||||
index, exists := daaWindowHashesToIndex[*daaBlockHash]
|
||||
if !exists {
|
||||
trustedDataDataDAAHeader, err := context.Domain().Consensus().TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daaWindowBlocks = append(daaWindowBlocks, trustedDataDataDAAHeader)
|
||||
index = len(daaWindowBlocks) - 1
|
||||
daaWindowHashesToIndex[*daaBlockHash] = index
|
||||
}
|
||||
|
||||
trustedDataDAABlockIndexes[*blockHash] = append(trustedDataDAABlockIndexes[*blockHash], uint64(index))
|
||||
}
|
||||
|
||||
ghostdagDataBlockHashes, err := context.Domain().Consensus().TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trustedDataGHOSTDAGDataIndexes[*blockHash] = make([]uint64, 0, context.Config().NetParams().K)
|
||||
for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes {
|
||||
index, exists := ghostdagDataHashToIndex[*ghostdagDataBlockHash]
|
||||
if !exists {
|
||||
data, err := context.Domain().Consensus().TrustedGHOSTDAGData(ghostdagDataBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ghostdagData = append(ghostdagData, &externalapi.BlockGHOSTDAGDataHashPair{
|
||||
Hash: ghostdagDataBlockHash,
|
||||
GHOSTDAGData: data,
|
||||
})
|
||||
index = len(ghostdagData) - 1
|
||||
ghostdagDataHashToIndex[*ghostdagDataBlockHash] = index
|
||||
}
|
||||
|
||||
trustedDataGHOSTDAGDataIndexes[*blockHash] = append(trustedDataGHOSTDAGDataIndexes[*blockHash], uint64(index))
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainTrustedDataToTrustedData(daaWindowBlocks, ghostdagData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, blockHash := range pointAndItsAnticone {
|
||||
block, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if (i+1)%ibdBatchSize == 0 {
|
||||
// No timeout here, as we don't care if the syncee takes its time computing,
|
||||
// since it only blocks this dedicated flow
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := message.(*appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks); !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks, message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sent pruning point and its anticone to %s", peer)
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// PruningPointProofRequestsContext is the interface for the context needed for the HandlePruningPointProofRequests flow.
|
||||
type PruningPointProofRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandlePruningPointProofRequests listens to appmessage.MsgRequestPruningPointProof messages and sends
|
||||
// the pruning point proof to the requesting peer.
|
||||
func HandlePruningPointProofRequests(context PruningPointProofRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Got request for pruning point proof from %s", peer)
|
||||
|
||||
pruningPointProof, err := context.Domain().Consensus().BuildPruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pruningPointProofMessage := appmessage.DomainPruningPointProofToMsgPruningPointProof(pruningPointProof)
|
||||
err = outgoingRoute.Enqueue(pruningPointProofMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sent pruning point proof to %s", peer)
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RelayBlockRequestsContext is the interface for the context needed for the HandleRelayBlockRequests flow.
|
||||
type RelayBlockRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandleRelayBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
|
||||
// their corresponding blocks to the requesting peer.
|
||||
func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
getRelayBlocksMessage := message.(*appmessage.MsgRequestRelayBlocks)
|
||||
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
|
||||
for _, hash := range getRelayBlocksMessage.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Relayed block with hash %s", hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,416 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// orphanResolutionRange is the maximum amount of blockLocator hashes
|
||||
// to search for known blocks. See isBlockInOrphanResolutionRange for
|
||||
// further details
|
||||
var orphanResolutionRange uint32 = 5
|
||||
|
||||
// RelayInvsContext is the interface for the context needed for the HandleRelayInvs flow.
|
||||
type RelayInvsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
||||
Broadcast(message appmessage.Message) error
|
||||
AddOrphan(orphanBlock *externalapi.DomainBlock)
|
||||
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
|
||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||
IsIBDRunning() bool
|
||||
IsRecoverableError(err error) bool
|
||||
}
|
||||
|
||||
type handleRelayInvsFlow struct {
|
||||
RelayInvsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
invsQueue []*appmessage.MsgInvRelayBlock
|
||||
}
|
||||
|
||||
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
||||
// are missing, adds them to the DAG and propagates them to the rest of the network.
|
||||
func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||
peer *peerpkg.Peer) error {
|
||||
|
||||
flow := &handleRelayInvsFlow{
|
||||
RelayInvsContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
||||
}
|
||||
err := flow.start()
|
||||
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
||||
close(peer.IBDRequestChannel())
|
||||
return err
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) start() error {
|
||||
for {
|
||||
log.Debugf("Waiting for inv")
|
||||
inv, err := flow.readInv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Got relay inv for block %s", inv.Hash)
|
||||
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(inv.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
||||
if blockInfo.BlockStatus == externalapi.StatusInvalid {
|
||||
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
|
||||
inv.Hash)
|
||||
}
|
||||
log.Debugf("Block %s already exists. continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsOrphan(inv.Hash) {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
|
||||
err := flow.AddOrphanRootsToQueue(inv.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Block relay is disabled during IBD
|
||||
if flow.IsIBDRunning() {
|
||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Requesting block %s", inv.Hash)
|
||||
block, exists, err := flow.requestBlock(inv.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
log.Debugf("Aborting requesting block %s because it already exists", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) {
|
||||
log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block))
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Infof("Ignoring duplicate block %s", inv.Hash)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(missingParents) > 0 {
|
||||
log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents)
|
||||
err := flow.processOrphan(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
oldVirtualParents := hashset.New()
|
||||
for _, parent := range oldVirtualInfo.ParentHashes {
|
||||
oldVirtualParents.Add(parent)
|
||||
}
|
||||
|
||||
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, parent := range newVirtualInfo.ParentHashes {
|
||||
if oldVirtualParents.Contains(parent) {
|
||||
continue
|
||||
}
|
||||
|
||||
block, err := flow.Domain().Consensus().GetBlock(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("Relaying block %s", blockHash)
|
||||
err = flow.relayBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||
consensushashing.BlockHash(block))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
"expecting an inv message", msg.Command())
|
||||
}
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||
exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash)
|
||||
if exists {
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
|
||||
// clean from any pending blocks.
|
||||
defer flow.SharedRequestedBlocks().Remove(requestHash)
|
||||
|
||||
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
|
||||
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
msgBlock, err := flow.readMsgBlock()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if !blockHash.Equal(requestHash) {
|
||||
return nil, false, protocolerrors.Errorf(true, "got unrequested block %s", blockHash)
|
||||
}
|
||||
|
||||
return block, false, nil
|
||||
}
|
||||
|
||||
// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive.
|
||||
//
|
||||
// Note: this function assumes msgChan can contain only appmessage.MsgInvRelayBlock and appmessage.MsgBlock messages.
|
||||
func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock, err error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
case *appmessage.MsgBlock:
|
||||
return message, nil
|
||||
default:
|
||||
return nil, errors.Errorf("unexpected message %s", message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
}
|
||||
|
||||
missingParentsError := &ruleerrors.ErrMissingParents{}
|
||||
if errors.As(err, missingParentsError) {
|
||||
return missingParentsError.MissingParentHashes, nil, nil
|
||||
}
|
||||
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
|
||||
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
}
|
||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
return nil, virtualChangeSet, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) error {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
|
||||
// Return if the block has been orphaned from elsewhere already
|
||||
if flow.IsOrphan(blockHash) {
|
||||
log.Debugf("Skipping orphan processing for block %s because it is already an orphan", blockHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the block to the orphan set if it's within orphan resolution range
|
||||
isBlockInOrphanResolutionRange, err := flow.isBlockInOrphanResolutionRange(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBlockInOrphanResolutionRange {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", blockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Block %s is within orphan resolution range. "+
|
||||
"Adding it to the orphan set", blockHash)
|
||||
flow.AddOrphan(block)
|
||||
log.Debugf("Requesting block %s missing ancestors", blockHash)
|
||||
return flow.AddOrphanRootsToQueue(blockHash)
|
||||
}
|
||||
|
||||
// Start IBD unless we already are in IBD
|
||||
log.Debugf("Block %s is out of orphan resolution range. "+
|
||||
"Attempting to start IBD against it.", blockHash)
|
||||
|
||||
// Send the block to IBD flow via the IBDRequestChannel.
|
||||
// Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it
|
||||
select {
|
||||
case flow.peer.IBDRequestChannel() <- block:
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool {
|
||||
parents := block.Header.DirectParents()
|
||||
return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash)
|
||||
}
|
||||
|
||||
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
|
||||
// retrieved via the unorphaning mechanism or via IBD. This method sends a
|
||||
// getBlockLocator request to the peer with a limit of orphanResolutionRange.
|
||||
// In the response, if we know none of the hashes, we should retrieve the given
|
||||
// blockHash via IBD. Otherwise, via unorphaning.
|
||||
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
err := flow.sendGetBlockLocator(blockHash, orphanResolutionRange)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
blockLocatorHashes, err := flow.receiveBlockLocator()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, blockLocatorHash := range blockLocatorHashes {
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockLocatorHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.DomainHash) error {
|
||||
orphanRoots, orphanExists, err := flow.GetOrphanRoots(orphan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !orphanExists {
|
||||
log.Infof("Orphan block %s was missing from the orphan pool while requesting for its roots. This "+
|
||||
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
||||
}
|
||||
|
||||
if len(orphanRoots) == 0 {
|
||||
// In some rare cases we get here when there are no orphan roots already
|
||||
return nil
|
||||
}
|
||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
||||
|
||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
||||
for i, root := range orphanRoots {
|
||||
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
||||
invMessages[i] = appmessage.NewMsgInvBlock(root)
|
||||
}
|
||||
|
||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
||||
return nil
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// RequestAnticoneContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestAnticoneContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
type handleRequestAnticoneFlow struct {
|
||||
RequestAnticoneContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peer.Peer
|
||||
}
|
||||
|
||||
// HandleRequestAnticone handles RequestAnticone messages
|
||||
func HandleRequestAnticone(context RequestAnticoneContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
flow := &handleRequestAnticoneFlow{
|
||||
RequestAnticoneContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestAnticoneFlow) start() error {
|
||||
for {
|
||||
blockHash, contextHash, err := receiveRequestAnticone(flow.incomingRoute)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received requestAnticone with blockHash: %s, contextHash: %s", blockHash, contextHash)
|
||||
log.Debugf("Getting past(%s) cap anticone(%s) for peer %s", contextHash, blockHash, flow.peer)
|
||||
|
||||
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
|
||||
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
|
||||
// we relay blocks only if they enter virtual's mergeset. We add 2 for a small margin error.
|
||||
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
|
||||
flow.Config().ActiveNetParams.MergeSetSizeLimit+2)
|
||||
if err != nil {
|
||||
return protocolerrors.Wrap(true, err, "Failed querying anticone")
|
||||
}
|
||||
log.Debugf("Got %d header hashes in past(%s) cap anticone(%s)", len(blockHashes), contextHash, blockHash)
|
||||
|
||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
|
||||
}
|
||||
|
||||
// We sort the headers in bottom-up topological order before sending
|
||||
sort.Slice(blockHeaders, func(i, j int) bool {
|
||||
return blockHeaders[i].BlueWork.Cmp(blockHeaders[j].BlueWork) < 0
|
||||
})
|
||||
|
||||
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
|
||||
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func receiveRequestAnticone(incomingRoute *router.Route) (blockHash *externalapi.DomainHash,
|
||||
contextHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgRequestAnticone := message.(*appmessage.MsgRequestAnticone)
|
||||
|
||||
return msgRequestAnticone.BlockHash, msgRequestAnticone.ContextHash, nil
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// RequestBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
|
||||
type RequestBlockLocatorContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestBlockLocatorFlow struct {
|
||||
RequestBlockLocatorContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestBlockLocator handles getBlockLocator messages
|
||||
func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestBlockLocatorFlow{
|
||||
RequestBlockLocatorContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) start() error {
|
||||
for {
|
||||
highHash, limit, err := flow.receiveGetBlockLocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received getBlockLocator with highHash: %s, limit: %d", highHash, limit)
|
||||
|
||||
locator, err := flow.Domain().Consensus().CreateBlockLocatorFromPruningPoint(highHash, limit)
|
||||
if err != nil || len(locator) == 0 {
|
||||
if err != nil {
|
||||
log.Debugf("Received error from CreateBlockLocatorFromPruningPoint: %s", err)
|
||||
}
|
||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||
"locator between the pruning point and %s", highHash)
|
||||
}
|
||||
|
||||
err = flow.sendBlockLocator(locator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (highHash *externalapi.DomainHash, limit uint32, err error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
|
||||
|
||||
return msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {
|
||||
msgBlockLocator := appmessage.NewMsgBlockLocator(locator)
|
||||
err := flow.outgoingRoute.Enqueue(msgBlockLocator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
|
||||
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
|
||||
const ibdBatchSize = 99
|
||||
|
||||
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestHeadersContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestHeadersFlow struct {
|
||||
RequestHeadersContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peer.Peer
|
||||
}
|
||||
|
||||
// HandleRequestHeaders handles RequestHeaders messages
|
||||
func HandleRequestHeaders(context RequestHeadersContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
flow := &handleRequestHeadersFlow{
|
||||
RequestHeadersContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestHeadersFlow) start() error {
|
||||
for {
|
||||
lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
|
||||
isLowSelectedAncestorOfHigh, err := flow.Domain().Consensus().IsInSelectedParentChainOf(lowHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isLowSelectedAncestorOfHigh {
|
||||
return protocolerrors.Errorf(true, "Expected %s to be on the selected chain of %s",
|
||||
lowHash, highHash)
|
||||
}
|
||||
|
||||
for !lowHash.Equal(highHash) {
|
||||
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
||||
|
||||
// GetHashesBetween is a relatively heavy operation so we limit it
|
||||
// in order to avoid locking the consensus for too long
|
||||
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
||||
const maxBlocks = 1 << 10
|
||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Got %d header hashes above lowHash %s", len(blockHashes), lowHash)
|
||||
|
||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
|
||||
}
|
||||
|
||||
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
|
||||
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := message.(*appmessage.MsgRequestNextHeaders); !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextHeaders, message.Command())
|
||||
}
|
||||
|
||||
// The next lowHash is the last element in blockHashes
|
||||
lowHash = blockHashes[len(blockHashes)-1]
|
||||
}
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func receiveRequestHeaders(incomingRoute *router.Route) (lowHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgRequestIBDBlocks := message.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
return msgRequestIBDBlocks.LowHash, msgRequestIBDBlocks.HighHash, nil
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleRequestPruningPointUTXOSetContext is the interface for the context needed for the HandleRequestPruningPointUTXOSet flow.
|
||||
type HandleRequestPruningPointUTXOSetContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestPruningPointUTXOSetFlow struct {
|
||||
HandleRequestPruningPointUTXOSetContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestPruningPointUTXOSet listens to appmessage.MsgRequestPruningPointUTXOSet messages and sends
|
||||
// the pruning point UTXO set and block body.
|
||||
func HandleRequestPruningPointUTXOSet(context HandleRequestPruningPointUTXOSetContext, incomingRoute,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestPruningPointUTXOSetFlow{
|
||||
HandleRequestPruningPointUTXOSetContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) start() error {
|
||||
for {
|
||||
msgRequestPruningPointUTXOSet, err := flow.waitForRequestPruningPointUTXOSetMessages()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.handleRequestPruningPointUTXOSetMessage(msgRequestPruningPointUTXOSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) handleRequestPruningPointUTXOSetMessage(
|
||||
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetFlow")
|
||||
defer onEnd()
|
||||
|
||||
log.Debugf("Got request for pruning point UTXO set")
|
||||
|
||||
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSet)
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXOSetMessages() (
|
||||
*appmessage.MsgRequestPruningPointUTXOSet, error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
|
||||
if !ok {
|
||||
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
|
||||
}
|
||||
return msgRequestPruningPointUTXOSet, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
|
||||
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
||||
|
||||
// Send the UTXO set in `step`-sized chunks
|
||||
const step = 1000
|
||||
var fromOutpoint *externalapi.DomainOutpoint
|
||||
chunksSent := 0
|
||||
for {
|
||||
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash, fromOutpoint, step)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Retrieved %d UTXOs for pruning block %s",
|
||||
len(pruningPointUTXOs), msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
outpointAndUTXOEntryPairs :=
|
||||
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
finished := len(pruningPointUTXOs) < step
|
||||
if finished && chunksSent%ibdBatchSize != 0 {
|
||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
}
|
||||
|
||||
if len(pruningPointUTXOs) > 0 {
|
||||
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
|
||||
}
|
||||
chunksSent++
|
||||
|
||||
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
|
||||
if chunksSent%ibdBatchSize == 0 {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
|
||||
if !ok {
|
||||
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||
return protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
|
||||
}
|
||||
|
||||
if finished {
|
||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,723 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// IBDContext is the interface for the context needed for the HandleIBD flow.
|
||||
type IBDContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||
UnsetIBDRunning()
|
||||
IsRecoverableError(err error) bool
|
||||
}
|
||||
|
||||
type handleIBDFlow struct {
|
||||
IBDContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
}
|
||||
|
||||
// HandleIBD handles IBD
|
||||
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||
peer *peerpkg.Peer) error {
|
||||
|
||||
flow := &handleIBDFlow{
|
||||
IBDContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) start() error {
|
||||
for {
|
||||
// Wait for IBD requests triggered by other flows
|
||||
block, ok := <-flow.peer.IBDRequestChannel()
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
err := flow.runIBDIfNotRunning(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
||||
if !wasIBDNotRunning {
|
||||
log.Debugf("IBD is already running")
|
||||
return nil
|
||||
}
|
||||
|
||||
isFinishedSuccessfully := false
|
||||
defer func() {
|
||||
flow.UnsetIBDRunning()
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
}()
|
||||
|
||||
relayBlockHash := consensushashing.BlockHash(block)
|
||||
|
||||
log.Debugf("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
|
||||
log.Debugf("Syncing blocks up to %s", relayBlockHash)
|
||||
log.Debugf("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
|
||||
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(
|
||||
block, highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !shouldSync {
|
||||
return nil
|
||||
}
|
||||
|
||||
if shouldDownloadHeadersProof {
|
||||
log.Infof("Starting IBD with headers proof")
|
||||
err := flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", relayBlockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(
|
||||
flow.Domain().Consensus(),
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We start by syncing missing bodies over the syncer selected chain
|
||||
err = flow.syncMissingBlockBodies(syncerHeaderSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relayBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Relay block might be in the anticone of syncer selected tip, thus
|
||||
// check his chain for missing bodies as well.
|
||||
// Note: this operation can be slightly optimized to avoid the full chain search since relay block
|
||||
// is in syncer virtual mergeset which has bounded size.
|
||||
if relayBlockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
err = flow.syncMissingBlockBodies(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Finished syncing blocks up to %s", relayBlockHash)
|
||||
isFinishedSuccessfully = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) {
|
||||
/*
|
||||
Algorithm:
|
||||
Request full selected chain block locator from syncer
|
||||
Find the highest block which we know
|
||||
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
|
||||
*/
|
||||
|
||||
// Empty hashes indicate that the full chain is queried
|
||||
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer,
|
||||
len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
syncerHeaderSelectedTipHash := locatorHashes[0]
|
||||
var highestKnownSyncerChainHash *externalapi.DomainHash
|
||||
chainNegotiationRestartCounter := 0
|
||||
chainNegotiationZoomCounts := 0
|
||||
initialLocatorLen := len(locatorHashes)
|
||||
for {
|
||||
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
|
||||
for _, syncerChainHash := range locatorHashes {
|
||||
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if info.Exists {
|
||||
currentHighestKnownSyncerChainHash = syncerChainHash
|
||||
break
|
||||
}
|
||||
lowestUnknownSyncerChainHash = syncerChainHash
|
||||
}
|
||||
// No unknown blocks, break. Note this can only happen in the first iteration
|
||||
if lowestUnknownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// No shared block, break
|
||||
if currentHighestKnownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = nil
|
||||
break
|
||||
}
|
||||
// No point in zooming further
|
||||
if len(locatorHashes) == 1 {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// Zoom in
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(
|
||||
lowestUnknownSyncerChainHash,
|
||||
currentHighestKnownSyncerChainHash, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) > 0 {
|
||||
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
|
||||
!locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+
|
||||
"hashes to match the locator bounds")
|
||||
}
|
||||
|
||||
chainNegotiationZoomCounts++
|
||||
log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
if len(locatorHashes) == 2 {
|
||||
// We found our search target
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
|
||||
if chainNegotiationZoomCounts > initialLocatorLen*2 {
|
||||
// Since the zoom-in always queries two consecutive entries in the previous locator, it is
|
||||
// expected to decrease in size at least every two iterations
|
||||
return nil, nil, protocolerrors.Errorf(true,
|
||||
"IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d",
|
||||
chainNegotiationZoomCounts, initialLocatorLen)
|
||||
}
|
||||
|
||||
} else { // Empty locator signals a restart due to chain changes
|
||||
chainNegotiationZoomCounts = 0
|
||||
chainNegotiationRestartCounter++
|
||||
if chainNegotiationRestartCounter > 32 {
|
||||
return nil, nil, protocolerrors.Errorf(false,
|
||||
"IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
|
||||
}
|
||||
log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter)
|
||||
|
||||
// An empty locator signals that the syncer chain was modified and no longer contains one of
|
||||
// the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
initialLocatorLen = len(locatorHashes)
|
||||
// Reset syncer's header selected tip
|
||||
syncerHeaderSelectedTipHash = locatorHashes[0]
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Found highest known syncer chain block %s from peer %s",
|
||||
highestKnownSyncerChainHash, flow.peer)
|
||||
|
||||
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
||||
successString := "successfully"
|
||||
if !isFinishedSuccessfully {
|
||||
successString = "(interrupted)"
|
||||
}
|
||||
log.Infof("IBD with peer %s finished %s", flow.peer, successString)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) getSyncerChainBlockLocator(
|
||||
highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) {
|
||||
|
||||
requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash)
|
||||
err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDChainBlockLocator:
|
||||
if len(message.BlockLocatorHashes) > 64 {
|
||||
return nil, protocolerrors.Errorf(true,
|
||||
"Got block locator of size %d>64 while expecting locator to have size "+
|
||||
"which is logarithmic in DAG size (which should never exceed 2^64)",
|
||||
len(message.BlockLocatorHashes))
|
||||
}
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDChainBlockLocator, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus,
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash *externalapi.DomainHash,
|
||||
highBlockDAAScoreHint uint64) error {
|
||||
|
||||
log.Infof("Downloading headers from %s", flow.peer)
|
||||
|
||||
if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) {
|
||||
// No need to get syncer selected tip headers, so sync relay past and return
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
|
||||
err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScoreHint, "block headers")
|
||||
|
||||
// Keep a short queue of BlockHeadersMessages so that there's
|
||||
// never a moment when the node is not validating and inserting
|
||||
// headers
|
||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
||||
errChan := make(chan error)
|
||||
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
|
||||
for {
|
||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if doneIBD {
|
||||
close(blockHeadersMessageChan)
|
||||
return
|
||||
}
|
||||
if len(blockHeadersMessage.BlockHeaders) == 0 {
|
||||
// The syncer should have sent a done message if the search completed, and not an empty list
|
||||
errChan <- protocolerrors.Errorf(true, "Received an empty headers message from peer %s", flow.peer)
|
||||
return
|
||||
}
|
||||
|
||||
blockHeadersMessageChan <- blockHeadersMessage
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1]
|
||||
progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore)
|
||||
case err := <-errChan:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
// Finished downloading syncer selected tip blocks,
|
||||
// check if we already have the triggering relayBlockHash
|
||||
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
// Send a special header request for the selected tip anticone. This is expected to
|
||||
// be a small set, as it is bounded to the size of virtual's mergeset.
|
||||
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
_, anticoneDone, err = flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
for _, header := range anticoneHeadersMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the relayBlockHash has still not been received, the peer is misbehaving
|
||||
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestAnticone(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
|
||||
msgRequestAnticone := appmessage.NewMsgRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
return flow.outgoingRoute.Enqueue(msgRequestAnticone)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(
|
||||
highestKnownSyncerChainHash, syncerHeaderSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgRequestHeaders := appmessage.NewMsgRequstHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgRequestHeaders)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.BlockHeadersMessage:
|
||||
return message, false, nil
|
||||
case *appmessage.MsgDoneHeaders:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s",
|
||||
appmessage.CmdBlockHeaders,
|
||||
appmessage.CmdDoneHeaders,
|
||||
message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||
block := &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: nil,
|
||||
}
|
||||
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
||||
} else {
|
||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
|
||||
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
|
||||
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
|
||||
"tip is smaller than the current selected tip")
|
||||
}
|
||||
|
||||
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
|
||||
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
|
||||
return protocolerrors.Errorf(false, "difference between the timestamps of "+
|
||||
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
||||
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
||||
defer onEnd()
|
||||
|
||||
receivedChunkCount := 0
|
||||
receivedUTXOCount := 0
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgPruningPointUTXOSetChunk:
|
||||
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
||||
domainOutpointAndUTXOEntryPairs :=
|
||||
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
||||
|
||||
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
receivedChunkCount++
|
||||
if receivedChunkCount%ibdBatchSize == 0 {
|
||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
receivedChunkCount, receivedUTXOCount)
|
||||
|
||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
||||
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
||||
return true, nil
|
||||
|
||||
case *appmessage.MsgUnexpectedPruningPoint:
|
||||
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
|
||||
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
||||
return false, nil
|
||||
|
||||
default:
|
||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(hashes) == 0 {
|
||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
||||
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
||||
log.Debugf("No missing block body hashes found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
|
||||
highestProcessedDAAScore := lowBlockHeader.DAAScore()
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
||||
} else {
|
||||
hashesToRequest = hashes[offset:]
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, expectedHash := range hashesToRequest {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if !expectedHash.Equal(blockHash) {
|
||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||
continue
|
||||
}
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestProcessedDAAScore = block.Header.DAAScore()
|
||||
}
|
||||
|
||||
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
return flow.resolveVirtual(highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||
consensushashing.BlockHash(block))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
|
||||
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if i%10 == 0 {
|
||||
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var percents int
|
||||
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||
percents = 100
|
||||
} else {
|
||||
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||
}
|
||||
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||
}
|
||||
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isCompletelyResolved {
|
||||
log.Infof("Resolved virtual")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
type ibdProgressReporter struct {
|
||||
lowDAAScore uint64
|
||||
highDAAScore uint64
|
||||
objectName string
|
||||
totalDAAScoreDifference uint64
|
||||
lastReportedProgressPercent int
|
||||
processed int
|
||||
}
|
||||
|
||||
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
|
||||
if highDAAScore <= lowDAAScore {
|
||||
// Avoid a zero or negative diff
|
||||
highDAAScore = lowDAAScore + 1
|
||||
}
|
||||
return &ibdProgressReporter{
|
||||
lowDAAScore: lowDAAScore,
|
||||
highDAAScore: highDAAScore,
|
||||
objectName: objectName,
|
||||
totalDAAScoreDifference: highDAAScore - lowDAAScore,
|
||||
lastReportedProgressPercent: 0,
|
||||
processed: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
|
||||
ipr.processed += processedDelta
|
||||
|
||||
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
|
||||
if highestProcessedDAAScore > ipr.highDAAScore {
|
||||
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
|
||||
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
relativeDAAScore := uint64(0)
|
||||
if highestProcessedDAAScore > ipr.lowDAAScore {
|
||||
// Avoid a negative diff
|
||||
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
|
||||
if progressPercent > ipr.lastReportedProgressPercent {
|
||||
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
|
||||
ipr.lastReportedProgressPercent = progressPercent
|
||||
}
|
||||
}
|
||||
@@ -1,428 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
err := flow.Domain().InitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(syncerHeaderSelectedTipHash, relayBlockHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
if !flow.IsRecoverableError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus.", flow.peer)
|
||||
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
||||
if deleteStagingConsensusErr != nil {
|
||||
return deleteStagingConsensusErr
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
|
||||
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
|
||||
err = flow.Domain().CommitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnPruningPointUTXOSetOverride()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
|
||||
relayBlock *externalapi.DomainBlock,
|
||||
highestKnownSyncerChainHash *externalapi.DomainHash) (shouldDownload, shouldSync bool, err error) {
|
||||
|
||||
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
|
||||
if highestKnownSyncerChainHash != nil {
|
||||
highestSharedBlockFound = true
|
||||
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
isPruningPointInSharedBlockChain, err = flow.Domain().Consensus().IsInSelectedParentChainOf(
|
||||
pruningPoint, highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
// Note: in the case where `highestSharedBlockFound == true && isPruningPointInSharedBlockChain == false`
|
||||
// we might have here info which is relevant to finality conflict decisions. This should be taken into
|
||||
// account when we improve this aspect.
|
||||
if !highestSharedBlockFound || !isPruningPointInSharedBlockChain {
|
||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
if hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore {
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
|
||||
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if relayBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return relayBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
||||
log.Infof("Downloading the pruning point proof from %s", flow.peer)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointProof())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(10 * time.Minute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pruningPointProofMessage, ok := message.(*appmessage.MsgPruningPointProof)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdPruningPointProof, message.Command())
|
||||
}
|
||||
pruningPointProof := appmessage.MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage)
|
||||
err = flow.Domain().Consensus().ValidatePruningPointProof(pruningPointProof)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, protocolerrors.Wrapf(true, err, "pruning point proof validation failed")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = flow.Domain().StagingConsensus().ApplyPruningPointProof(pruningPointProof)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash,
|
||||
highBlockDAAScore uint64) error {
|
||||
|
||||
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointsAndPruningPointAnticone(proofPruningPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Remove this condition once there's more proper way to check finality violation
|
||||
// in the headers proof.
|
||||
if proofPruningPoint.Equal(flow.Config().NetParams().GenesisHash) {
|
||||
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(),
|
||||
syncerHeaderSelectedTipHash, proofPruningPoint, relayBlockHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Headers downloaded from peer %s", flow.peer)
|
||||
|
||||
relayBlockInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !relayBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
|
||||
}
|
||||
|
||||
err = flow.validatePruningPointFutureHeaderTimestamps()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Syncing the current pruning point UTXO set")
|
||||
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet(flow.Domain().StagingConsensus(), proofPruningPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !syncedPruningPointUTXOSetSuccessfully {
|
||||
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Finished syncing the current pruning point UTXO set")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error {
|
||||
log.Infof("Downloading the past pruning points and the pruning point anticone from %s", flow.peer)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.validateAndInsertPruningPoints(proofPruningPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgTrustedData, ok := message.(*appmessage.MsgTrustedData)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdTrustedData, message.Command())
|
||||
}
|
||||
|
||||
pruningPointWithMetaData, done, err := flow.receiveBlockWithTrustedData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
return protocolerrors.Errorf(true, "got `done` message before receiving the pruning point")
|
||||
}
|
||||
|
||||
if !pruningPointWithMetaData.Block.Header.BlockHash().Equal(proofPruningPoint) {
|
||||
return protocolerrors.Errorf(true, "first block with trusted data is not the pruning point")
|
||||
}
|
||||
|
||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData, msgTrustedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
break
|
||||
}
|
||||
|
||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData, msgTrustedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We're using i+2 because we want to check if the next block will belong to the next batch, but we already downloaded
|
||||
// the pruning point outside the loop so we use i+2 instead of i+1.
|
||||
if (i+2)%ibdBatchSize == 0 {
|
||||
log.Infof("Downloaded %d blocks from the pruning point anticone", i+1)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextPruningPointAndItsAnticoneBlocks())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Finished downloading pruning point and its anticone from %s. Total blocks downloaded: %d", flow.peer, i+1)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processBlockWithTrustedData(
|
||||
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedDataV4, data *appmessage.MsgTrustedData) error {
|
||||
|
||||
blockWithTrustedData := &externalapi.BlockWithTrustedData{
|
||||
Block: appmessage.MsgBlockToDomainBlock(block.Block),
|
||||
DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(block.DAAWindowIndices)),
|
||||
GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(block.GHOSTDAGDataIndices)),
|
||||
}
|
||||
|
||||
for _, index := range block.DAAWindowIndices {
|
||||
blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, appmessage.TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(data.DAAWindow[index]))
|
||||
}
|
||||
|
||||
for _, index := range block.GHOSTDAGDataIndices {
|
||||
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
|
||||
}
|
||||
|
||||
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||
return err
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
switch downCastedMessage := message.(type) {
|
||||
case *appmessage.MsgBlockWithTrustedDataV4:
|
||||
return downCastedMessage, false, nil
|
||||
case *appmessage.MsgDoneBlocksWithTrustedData:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s",
|
||||
(&appmessage.MsgBlockWithTrustedData{}).Command(),
|
||||
(&appmessage.MsgDoneBlocksWithTrustedData{}).Command(),
|
||||
downCastedMessage.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgPruningPoints, ok := message.(*appmessage.MsgPruningPoints)
|
||||
if !ok {
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdPruningPoints, message.Command())
|
||||
}
|
||||
|
||||
return msgPruningPoints, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error {
|
||||
currentPruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if currentPruningPoint.Equal(proofPruningPoint) {
|
||||
return protocolerrors.Errorf(true, "the proposed pruning point is the same as the current pruning point")
|
||||
}
|
||||
|
||||
pruningPoints, err := flow.receivePruningPoints()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := make([]externalapi.BlockHeader, len(pruningPoints.Headers))
|
||||
for i, header := range pruningPoints.Headers {
|
||||
headers[i] = appmessage.BlockHeaderToDomainBlockHeader(header)
|
||||
}
|
||||
|
||||
arePruningPointsViolatingFinality, err := flow.Domain().Consensus().ArePruningPointsViolatingFinality(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if arePruningPointsViolatingFinality {
|
||||
// TODO: Find a better way to deal with finality conflicts.
|
||||
return protocolerrors.Errorf(false, "pruning points are violating finality")
|
||||
}
|
||||
|
||||
lastPruningPoint := consensushashing.HeaderHash(headers[len(headers)-1])
|
||||
if !lastPruningPoint.Equal(proofPruningPoint) {
|
||||
return protocolerrors.Errorf(true, "the proof pruning point is not equal to the last pruning "+
|
||||
"point in the list")
|
||||
}
|
||||
|
||||
err = flow.Domain().StagingConsensus().ImportPruningPoints(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus,
|
||||
pruningPoint *externalapi.DomainHash) (bool, error) {
|
||||
|
||||
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint)
|
||||
isValid, err := flow.Domain().StagingConsensus().IsValidPruningPoint(pruningPoint)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !isValid {
|
||||
return false, protocolerrors.Errorf(true, "invalid pruning point %s", pruningPoint)
|
||||
}
|
||||
|
||||
log.Info("Fetching the pruning point UTXO set")
|
||||
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
||||
if err != nil {
|
||||
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !isSuccessful {
|
||||
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Info("Fetched the new pruning point UTXO set")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
|
||||
defer func() {
|
||||
err := flow.Domain().StagingConsensus().ClearImportedPruningPointData()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
|
||||
}
|
||||
}()
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSet(pruningPointHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(consensus, pruningPointHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !receivedAll {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err = flow.Domain().StagingConsensus().ValidateAndInsertImportedPruningPoint(pruningPointHash)
|
||||
if err != nil {
|
||||
// TODO: Find a better way to deal with finality conflicts.
|
||||
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
|
||||
return false, nil
|
||||
}
|
||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
@@ -1,35 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// SendVirtualSelectedParentInvContext is the interface for the context needed for the SendVirtualSelectedParentInv flow.
|
||||
type SendVirtualSelectedParentInvContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
// SendVirtualSelectedParentInv sends a peer the selected parent hash of the virtual
|
||||
func SendVirtualSelectedParentInv(context SendVirtualSelectedParentInvContext,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
virtualSelectedParent, err := context.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if virtualSelectedParent.Equal(context.Config().NetParams().GenesisHash) {
|
||||
log.Debugf("Skipping sending the virtual selected parent hash to peer %s because it's the genesis", peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("Sending virtual selected parent hash %s to peer %s", virtualSelectedParent, peer)
|
||||
|
||||
virtualSelectedParentInv := appmessage.NewMsgInvBlock(virtualSelectedParent)
|
||||
return outgoingRoute.Enqueue(virtualSelectedParentInv)
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package ping
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// ReceivePingsContext is the interface for the context needed for the ReceivePings flow.
|
||||
type ReceivePingsContext interface {
|
||||
}
|
||||
|
||||
type receivePingsFlow struct {
|
||||
ReceivePingsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// ReceivePings handles all ping messages coming through incomingRoute.
|
||||
// This function assumes that incomingRoute will only return MsgPing.
|
||||
func ReceivePings(context ReceivePingsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
|
||||
flow := &receivePingsFlow{
|
||||
ReceivePingsContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *receivePingsFlow) start() error {
|
||||
for {
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pingMessage := message.(*appmessage.MsgPing)
|
||||
|
||||
pongMessage := appmessage.NewMsgPong(pingMessage.Nonce)
|
||||
err = flow.outgoingRoute.Enqueue(pongMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
package ping
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/random"
|
||||
)
|
||||
|
||||
// SendPingsContext is the interface for the context needed for the SendPings flow.
|
||||
type SendPingsContext interface {
|
||||
ShutdownChan() <-chan struct{}
|
||||
}
|
||||
|
||||
type sendPingsFlow struct {
|
||||
SendPingsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
}
|
||||
|
||||
// SendPings starts sending MsgPings every pingInterval seconds to the
|
||||
// given peer.
|
||||
// This function assumes that incomingRoute will only return MsgPong.
|
||||
func SendPings(context SendPingsContext, incomingRoute *router.Route, outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
flow := &sendPingsFlow{
|
||||
SendPingsContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *sendPingsFlow) start() error {
|
||||
const pingInterval = 2 * time.Minute
|
||||
ticker := time.NewTicker(pingInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-flow.ShutdownChan():
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flow.peer.SetPingPending(nonce)
|
||||
|
||||
pingMessage := appmessage.NewMsgPing(nonce)
|
||||
err = flow.outgoingRoute.Enqueue(pingMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
if errors.Is(err, router.ErrTimeout) {
|
||||
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
pongMessage := message.(*appmessage.MsgPong)
|
||||
if pongMessage.Nonce != pingMessage.Nonce {
|
||||
return protocolerrors.New(true, "nonce mismatch between ping and pong")
|
||||
}
|
||||
flow.peer.SetPingIdle()
|
||||
}
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
package v5
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
type protocolManager interface {
|
||||
RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
|
||||
isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
|
||||
messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
Context() *flowcontext.FlowContext
|
||||
}
|
||||
|
||||
// Register is used in order to register all the protocol flows to the given router.
|
||||
func Register(m protocolManager, router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*common.Flow) {
|
||||
flows = registerAddressFlows(m, router, isStopping, errChan)
|
||||
flows = append(flows, registerBlockRelayFlows(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerPingFlows(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerTransactionRelayFlow(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerRejectsFlow(m, router, isStopping, errChan)...)
|
||||
|
||||
return flows
|
||||
}
|
||||
|
||||
func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return addressexchange.ReceiveAddresses(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
|
||||
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.SendVirtualSelectedParentInv(m.Context(), outgoingRoute, peer)
|
||||
}),
|
||||
|
||||
m.RegisterFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayInvs(m.Context(), incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBD", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdBlockHeaders, appmessage.CmdIBDBlockLocatorHighestHash, appmessage.CmdBlockWithTrustedDataV4,
|
||||
appmessage.CmdDoneBlocksWithTrustedData, appmessage.CmdIBDBlockLocatorHighestHashNotFound,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
|
||||
appmessage.CmdPruningPointProof,
|
||||
appmessage.CmdTrustedData,
|
||||
appmessage.CmdIBDChainBlockLocator,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBD(m.Context(), incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayBlockRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestBlockLocator(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestHeaders", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestHeaders(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBDBlockRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBDBlockRequests(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestPruningPointUTXOSet", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSet,
|
||||
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestPruningPointUTXOSet(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone, appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBDBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBDBlockLocator(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestIBDChainBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDChainBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestIBDChainBlockLocator(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestAnticone", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestAnticone}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestAnticone(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointProofRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointProofRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerPingFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ping.ReceivePings(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ping.SendPings(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerTransactionRelayFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRelayedTransactions(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
m.RegisterFlow("HandleRequestTransactions", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRequestedTransactions(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerRejectsFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlow("HandleRejects", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return rejects.HandleRejects(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package rejects
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleRejectsContext is the interface for the context needed for the HandleRejects flow.
|
||||
type HandleRejectsContext interface {
|
||||
}
|
||||
|
||||
type handleRejectsFlow struct {
|
||||
HandleRejectsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRejects handles all reject messages coming through incomingRoute.
|
||||
// This function assumes that incomingRoute will only return MsgReject.
|
||||
func HandleRejects(context HandleRejectsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
|
||||
flow := &handleRejectsFlow{
|
||||
HandleRejectsContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRejectsFlow) start() error {
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rejectMessage := message.(*appmessage.MsgReject)
|
||||
|
||||
return protocolerrors.Errorf(false, "got reject message: `%s`", rejectMessage.Reason)
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func checkFlowError(t *testing.T, err error, isProtocolError bool, shouldBan bool, contains string) {
|
||||
pErr := protocolerrors.ProtocolError{}
|
||||
if errors.As(err, &pErr) != isProtocolError {
|
||||
t.Fatalf("Unexepcted error %+v", err)
|
||||
}
|
||||
|
||||
if pErr.ShouldBan != shouldBan {
|
||||
t.Fatalf("Exepcted shouldBan %t but got %t", shouldBan, pErr.ShouldBan)
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), contains) {
|
||||
t.Fatalf("Unexpected error. Expected error to contain '%s' but got: %+v", contains, err)
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
type fakeReceiveAddressesContext struct{}
|
||||
|
||||
func (f fakeReceiveAddressesContext) AddressManager() *addressmanager.AddressManager {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestReceiveAddressesErrors(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
incomingRoute := router.NewRoute("incoming")
|
||||
outgoingRoute := router.NewRoute("outgoing")
|
||||
peer := peerpkg.New(nil)
|
||||
errChan := make(chan error)
|
||||
go func() {
|
||||
errChan <- addressexchange.ReceiveAddresses(fakeReceiveAddressesContext{}, incomingRoute, outgoingRoute, peer)
|
||||
}()
|
||||
|
||||
_, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
|
||||
// Sending addressmanager.GetAddressesMax+1 addresses should trigger a ban
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgAddresses(make([]*appmessage.NetAddress,
|
||||
addressmanager.GetAddressesMax+1)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
checkFlowError(t, err, true, true, "address count exceeded")
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("timed out after %s", time.Second)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
package testing
|
||||
|
||||
// Because of a bug in Go coverage fails if you have packages with test files only. See https://github.com/golang/go/issues/27333
|
||||
// So this is a dummy non-test go file in the package.
|
||||
@@ -1,209 +0,0 @@
|
||||
package transactionrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// TransactionsRelayContext is the interface for the context needed for the
|
||||
// HandleRelayedTransactions and HandleRequestedTransactions flows.
|
||||
type TransactionsRelayContext interface {
|
||||
NetAdapter() *netadapter.NetAdapter
|
||||
Domain() domain.Domain
|
||||
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
|
||||
OnTransactionAddedToMempool()
|
||||
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
|
||||
IsIBDRunning() bool
|
||||
}
|
||||
|
||||
type handleRelayedTransactionsFlow struct {
|
||||
TransactionsRelayContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
invsQueue []*appmessage.MsgInvTransaction
|
||||
}
|
||||
|
||||
// HandleRelayedTransactions listens to appmessage.MsgInvTransaction messages, requests their corresponding transactions if they
|
||||
// are missing, adds them to the mempool and propagates them to the rest of the network.
|
||||
func HandleRelayedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
|
||||
flow := &handleRelayedTransactionsFlow{
|
||||
TransactionsRelayContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
invsQueue: make([]*appmessage.MsgInvTransaction, 0),
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) start() error {
|
||||
for {
|
||||
inv, err := flow.readInv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsIBDRunning() {
|
||||
continue
|
||||
}
|
||||
|
||||
requestedIDs, err := flow.requestInvTransactions(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.receiveTransactions(requestedIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
|
||||
inv *appmessage.MsgInvTransaction) (requestedIDs []*externalapi.DomainTransactionID, err error) {
|
||||
|
||||
idsToRequest := make([]*externalapi.DomainTransactionID, 0, len(inv.TxIDs))
|
||||
for _, txID := range inv.TxIDs {
|
||||
if flow.isKnownTransaction(txID) {
|
||||
continue
|
||||
}
|
||||
exists := flow.SharedRequestedTransactions().AddIfNotExists(txID)
|
||||
if exists {
|
||||
continue
|
||||
}
|
||||
idsToRequest = append(idsToRequest, txID)
|
||||
}
|
||||
|
||||
if len(idsToRequest) == 0 {
|
||||
return idsToRequest, nil
|
||||
}
|
||||
|
||||
msgGetTransactions := appmessage.NewMsgRequestTransactions(idsToRequest)
|
||||
err = flow.outgoingRoute.Enqueue(msgGetTransactions)
|
||||
if err != nil {
|
||||
flow.SharedRequestedTransactions().RemoveMany(idsToRequest)
|
||||
return nil, err
|
||||
}
|
||||
return idsToRequest, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
|
||||
// Ask the transaction memory pool if the transaction is known
|
||||
// to it in any form (main pool or orphan).
|
||||
if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) readInv() (*appmessage.MsgInvTransaction, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvTransaction
|
||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inv, ok := msg.(*appmessage.MsgInvTransaction)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay flow while "+
|
||||
"expecting an inv message", msg.Command())
|
||||
}
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) broadcastAcceptedTransactions(acceptedTxIDs []*externalapi.DomainTransactionID) error {
|
||||
return flow.EnqueueTransactionIDsForPropagation(acceptedTxIDs)
|
||||
}
|
||||
|
||||
// readMsgTxOrNotFound returns the next msgTx or msgTransactionNotFound in incomingRoute,
|
||||
// returning only one of the message types at a time.
|
||||
//
|
||||
// and populates invsQueue with any inv messages that meanwhile arrive.
|
||||
func (flow *handleRelayedTransactionsFlow) readMsgTxOrNotFound() (
|
||||
msgTx *appmessage.MsgTx, msgNotFound *appmessage.MsgTransactionNotFound, err error) {
|
||||
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvTransaction:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
case *appmessage.MsgTx:
|
||||
return message, nil, nil
|
||||
case *appmessage.MsgTransactionNotFound:
|
||||
return nil, message, nil
|
||||
default:
|
||||
return nil, nil, errors.Errorf("unexpected message %s", message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransactions []*externalapi.DomainTransactionID) error {
|
||||
// In case the function returns earlier than expected, we want to make sure sharedRequestedTransactions is
|
||||
// clean from any pending transactions.
|
||||
defer flow.SharedRequestedTransactions().RemoveMany(requestedTransactions)
|
||||
for _, expectedID := range requestedTransactions {
|
||||
msgTx, msgTxNotFound, err := flow.readMsgTxOrNotFound()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if msgTxNotFound != nil {
|
||||
if !msgTxNotFound.ID.Equal(expectedID) {
|
||||
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
|
||||
expectedID, msgTxNotFound.ID)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
tx := appmessage.MsgTxToDomainTransaction(msgTx)
|
||||
txID := consensushashing.TransactionID(tx)
|
||||
if !txID.Equal(expectedID) {
|
||||
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
|
||||
expectedID, txID)
|
||||
}
|
||||
|
||||
acceptedTransactions, err :=
|
||||
flow.Domain().MiningManager().ValidateAndInsertTransaction(tx, false, true)
|
||||
if err != nil {
|
||||
ruleErr := &mempool.RuleError{}
|
||||
if !errors.As(err, ruleErr) {
|
||||
return errors.Wrapf(err, "failed to process transaction %s", txID)
|
||||
}
|
||||
|
||||
shouldBan := false
|
||||
if txRuleErr := (&mempool.TxRuleError{}); errors.As(ruleErr.Err, txRuleErr) {
|
||||
if txRuleErr.RejectCode == mempool.RejectInvalid {
|
||||
shouldBan = true
|
||||
}
|
||||
}
|
||||
|
||||
if !shouldBan {
|
||||
continue
|
||||
}
|
||||
|
||||
return protocolerrors.Errorf(true, "rejected transaction %s: %s", txID, ruleErr)
|
||||
}
|
||||
err = flow.broadcastAcceptedTransactions(consensushashing.TransactionIDs(acceptedTransactions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flow.OnTransactionAddedToMempool()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,196 +0,0 @@
|
||||
package transactionrelay_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
type mocTransactionsRelayContext struct {
|
||||
netAdapter *netadapter.NetAdapter
|
||||
domain domain.Domain
|
||||
sharedRequestedTransactions *flowcontext.SharedRequestedTransactions
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) NetAdapter() *netadapter.NetAdapter {
|
||||
return m.netAdapter
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) Domain() domain.Domain {
|
||||
return m.domain
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions {
|
||||
return m.sharedRequestedTransactions
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
|
||||
// have the requested transactions in the mempool.
|
||||
func TestHandleRelayedTransactionsNotFound(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
factory := consensus.NewFactory()
|
||||
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRelayedTransactionsNotFound")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up test consensus: %+v", err)
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
|
||||
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a NetAdapter: %v", err)
|
||||
}
|
||||
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set up a domain instance: %v", err)
|
||||
}
|
||||
context := &mocTransactionsRelayContext{
|
||||
netAdapter: adapter,
|
||||
domain: domainInstance,
|
||||
sharedRequestedTransactions: sharedRequestedTransactions,
|
||||
}
|
||||
incomingRoute := router.NewRoute("incoming")
|
||||
defer incomingRoute.Close()
|
||||
peerIncomingRoute := router.NewRoute("outgoing")
|
||||
defer peerIncomingRoute.Close()
|
||||
|
||||
txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02})
|
||||
txIDs := []*externalapi.DomainTransactionID{txID1, txID2}
|
||||
invMessage := appmessage.NewMsgInvTransaction(txIDs)
|
||||
err = incomingRoute.Enqueue(invMessage)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
|
||||
}
|
||||
// The goroutine is representing the peer's actions.
|
||||
spawn("peerResponseToTheTransactionsRequest", func() {
|
||||
msg, err := peerIncomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
t.Fatalf("Dequeue: %v", err)
|
||||
}
|
||||
inv := msg.(*appmessage.MsgRequestTransactions)
|
||||
|
||||
if len(txIDs) != len(inv.IDs) {
|
||||
t.Fatalf("TestHandleRelayedTransactions: expected %d transactions ID, but got %d", len(txIDs), len(inv.IDs))
|
||||
}
|
||||
|
||||
for i, id := range inv.IDs {
|
||||
if txIDs[i].String() != id.String() {
|
||||
t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String())
|
||||
}
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgTransactionNotFound(txIDs[i]))
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
|
||||
}
|
||||
}
|
||||
// Insert an unexpected message type to stop the infinity loop.
|
||||
err = incomingRoute.Enqueue(&appmessage.MsgAddresses{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, peerIncomingRoute)
|
||||
// Since we inserted an unexpected message type to stop the infinity loop,
|
||||
// we expect the error will be infected from this specific message and also the
|
||||
// error will count as a protocol message.
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); err == nil || !errors.As(err, &protocolErr) {
|
||||
t.Fatalf("Expected to protocol error")
|
||||
} else {
|
||||
if !protocolErr.ShouldBan {
|
||||
t.Fatalf("Exepcted shouldBan true, but got false.")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "unexpected Addresses [code 3] message in the block relay flow while expecting an inv message") {
|
||||
t.Fatalf("Unexpected error: expected: an error due to existence of an Addresses message "+
|
||||
"in the block relay flow, but got: %v", protocolErr.Cause)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestOnClosedIncomingRoute verifies that an appropriate error message will be returned when
|
||||
// trying to dequeue a message from a closed route.
|
||||
func TestOnClosedIncomingRoute(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
|
||||
factory := consensus.NewFactory()
|
||||
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestOnClosedIncomingRoute")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up test consensus: %+v", err)
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
|
||||
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to creat a NetAdapter : %v", err)
|
||||
}
|
||||
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set up a domain instance: %v", err)
|
||||
}
|
||||
context := &mocTransactionsRelayContext{
|
||||
netAdapter: adapter,
|
||||
domain: domainInstance,
|
||||
sharedRequestedTransactions: sharedRequestedTransactions,
|
||||
}
|
||||
incomingRoute := router.NewRoute("incoming")
|
||||
outgoingRoute := router.NewRoute("outgoing")
|
||||
defer outgoingRoute.Close()
|
||||
|
||||
txID := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
txIDs := []*externalapi.DomainTransactionID{txID}
|
||||
|
||||
err = incomingRoute.Enqueue(&appmessage.MsgInvTransaction{TxIDs: txIDs})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
|
||||
}
|
||||
incomingRoute.Close()
|
||||
err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, outgoingRoute)
|
||||
if err == nil || !errors.Is(err, router.ErrRouteClosed) {
|
||||
t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package transactionrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
type handleRequestedTransactionsFlow struct {
|
||||
TransactionsRelayContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestedTransactions listens to appmessage.MsgRequestTransactions messages, responding with the requested
|
||||
// transactions if those are in the mempool.
|
||||
// Missing transactions would be ignored
|
||||
func HandleRequestedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
|
||||
flow := &handleRequestedTransactionsFlow{
|
||||
TransactionsRelayContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestedTransactionsFlow) start() error {
|
||||
for {
|
||||
msgRequestTransactions, err := flow.readRequestTransactions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, transactionID := range msgRequestTransactions.IDs {
|
||||
tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
|
||||
|
||||
if !ok {
|
||||
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
|
||||
err := flow.outgoingRoute.Enqueue(msgTransactionNotFound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestedTransactionsFlow) readRequestTransactions() (*appmessage.MsgRequestTransactions, error) {
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msg.(*appmessage.MsgRequestTransactions), nil
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package transactionrelay_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// TestHandleRequestedTransactionsNotFound tests the flow of HandleRequestedTransactions
|
||||
// when the requested transactions don't found in the mempool.
|
||||
func TestHandleRequestedTransactionsNotFound(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
factory := consensus.NewFactory()
|
||||
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRequestedTransactionsNotFound")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up test Consensus: %+v", err)
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
|
||||
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a NetAdapter: %v", err)
|
||||
}
|
||||
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set up a domain Instance: %v", err)
|
||||
}
|
||||
context := &mocTransactionsRelayContext{
|
||||
netAdapter: adapter,
|
||||
domain: domainInstance,
|
||||
sharedRequestedTransactions: sharedRequestedTransactions,
|
||||
}
|
||||
incomingRoute := router.NewRoute("incoming")
|
||||
outgoingRoute := router.NewRoute("outgoing")
|
||||
defer outgoingRoute.Close()
|
||||
|
||||
txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02})
|
||||
txIDs := []*externalapi.DomainTransactionID{txID1, txID2}
|
||||
msg := appmessage.NewMsgRequestTransactions(txIDs)
|
||||
err = incomingRoute.Enqueue(msg)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
|
||||
}
|
||||
// The goroutine is representing the peer's actions.
|
||||
spawn("peerResponseToTheTransactionsMessages", func() {
|
||||
for i, id := range txIDs {
|
||||
msg, err := outgoingRoute.Dequeue()
|
||||
if err != nil {
|
||||
t.Fatalf("Dequeue: %s", err)
|
||||
}
|
||||
outMsg := msg.(*appmessage.MsgTransactionNotFound)
|
||||
if txIDs[i].String() != outMsg.ID.String() {
|
||||
t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String())
|
||||
}
|
||||
}
|
||||
// Closed the incomingRoute for stop the infinity loop.
|
||||
incomingRoute.Close()
|
||||
})
|
||||
|
||||
err = transactionrelay.HandleRequestedTransactions(context, incomingRoute, outgoingRoute)
|
||||
// Make sure the error is due to the closed route.
|
||||
if err == nil || !errors.Is(err, router.ErrRouteClosed) {
|
||||
t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
|
||||
v4 "github.com/kaspanet/kaspad/app/protocol/flows/v4"
|
||||
v5 "github.com/kaspanet/kaspad/app/protocol/flows/v5"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
@@ -79,8 +78,6 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
switch peer.ProtocolVersion() {
|
||||
case 4:
|
||||
flows = v4.Register(m, router, errChan, &isStopping)
|
||||
case 5:
|
||||
flows = v5.Register(m, router, errChan, &isStopping)
|
||||
default:
|
||||
panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion()))
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
)
|
||||
|
||||
// HandleGetBlockTemplate handles the respectively named RPC command
|
||||
@@ -26,7 +25,7 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
return nil, err
|
||||
}
|
||||
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version())}
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey}
|
||||
|
||||
templateBlock, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,31 +1,3 @@
|
||||
Kaspad v0.11.14 - 2022-03-20
|
||||
===========================
|
||||
* Fix a bug in the new p2p v5 IBD chain negotiation (#1981)
|
||||
|
||||
Kaspad v0.11.13 - 2022-03-16
|
||||
===========================
|
||||
* Display progress of IBD process in Kaspad logs (#1938, #1939, #1949, #1977)
|
||||
* Optimize DB writes during fresh IBD (#1937)
|
||||
* Add AllowConnectionToDifferentVersions flag to kaspactl (#1940)
|
||||
* Drop support for p2p v3 (#1942)
|
||||
* Various transaction processing fixes and workarounds (#1943, #1946, #1971, #1974)
|
||||
* Make kaspawallet store the utxos sorted by amount (#1947)
|
||||
* Implement a `parse` sub command in the kaspawallet (#1953)
|
||||
* Set MaxBlockLevels for non-mainnet networks to 250 (#1952)
|
||||
* Add cache to DAA block window (#1948)
|
||||
* kaspactl: string slice parser for GetUtxosByAddresses (#1955, first contribution by @icook)
|
||||
* Add MergeSet and IsChainBlock to RPC (#1961)
|
||||
* Ignore transaction invs during IBD (#1960)
|
||||
* Optimize validation of expected header pruning point (#1962)
|
||||
* Fix a bug in bounded marge depth validation (#1966)
|
||||
* Don't relay blocks in virtual anticone (#1970)
|
||||
* Add version to block template to allow tracking of miner's kaspad version (#1967)
|
||||
* New p2p version: v5 (#1969)
|
||||
* Fix IBD shared past negotiation to be non quadratic also in the worst-case (#1969, p2p v5)
|
||||
* Send pruning point anticone in batches (#1973, p2p v5)
|
||||
* Cleanup log output mistakes and try to be more clear to the user (#1976, #1978)
|
||||
* Apply avoiding IBD logic from patch10 to p2p v4 IBD handling (#1979)
|
||||
|
||||
Kaspad v0.11.11 - 2022-01-27
|
||||
===========================
|
||||
* Fix for rare consensus bug regarding DAA window order. The bug only affected IBD from scratch and only today (#1934)
|
||||
|
||||
@@ -343,25 +343,6 @@ func (s *consensus) GetHashesBetween(lowHash, highHash *externalapi.DomainHash,
|
||||
return s.syncManager.GetHashesBetween(stagingArea, lowHash, highHash, maxBlocks)
|
||||
}
|
||||
|
||||
func (s *consensus) GetAnticone(blockHash, contextHash *externalapi.DomainHash,
|
||||
maxBlocks uint64) (hashes []*externalapi.DomainHash, err error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
err = s.validateBlockHashExists(stagingArea, blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = s.validateBlockHashExists(stagingArea, contextHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.syncManager.GetAnticone(stagingArea, blockHash, contextHash, maxBlocks)
|
||||
}
|
||||
|
||||
func (s *consensus) GetMissingBlockBodyHashes(highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
@@ -720,7 +701,7 @@ func (s *consensus) Anticone(blockHash *externalapi.DomainHash) ([]*externalapi.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.dagTraversalManager.AnticoneFromBlocks(stagingArea, tips, blockHash, 0)
|
||||
return s.dagTraversalManager.AnticoneFromBlocks(stagingArea, tips, blockHash)
|
||||
}
|
||||
|
||||
func (s *consensus) EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error) {
|
||||
|
||||
@@ -5,7 +5,3 @@ import "github.com/pkg/errors"
|
||||
// ErrBlockNotInSelectedParentChain is returned from CreateHeadersSelectedChainBlockLocator if one of the parameters
|
||||
// passed to it are not in the headers selected parent chain
|
||||
var ErrBlockNotInSelectedParentChain = errors.New("Block is not in selected parent chain")
|
||||
|
||||
// ErrReachedMaxTraversalAllowed is returned from AnticoneFromBlocks if `maxTraversalAllowed` was specified
|
||||
// and the traversal passed it
|
||||
var ErrReachedMaxTraversalAllowed = errors.New("Traversal searching for anticone passed the maxTraversalAllowed limit")
|
||||
|
||||
@@ -20,7 +20,6 @@ type Consensus interface {
|
||||
GetBlockAcceptanceData(blockHash *DomainHash) (AcceptanceData, error)
|
||||
|
||||
GetHashesBetween(lowHash, highHash *DomainHash, maxBlocks uint64) (hashes []*DomainHash, actualHighHash *DomainHash, err error)
|
||||
GetAnticone(blockHash, contextHash *DomainHash, maxBlocks uint64) (hashes []*DomainHash, err error)
|
||||
GetMissingBlockBodyHashes(highHash *DomainHash) ([]*DomainHash, error)
|
||||
GetPruningPointUTXOs(expectedPruningPointHash *DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
|
||||
GetVirtualUTXOs(expectedVirtualParents []*DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
|
||||
|
||||
@@ -10,7 +10,7 @@ type DAGTraversalManager interface {
|
||||
// from lowHash (exclusive) to highHash (inclusive) over highHash's selected parent chain
|
||||
SelectedChildIterator(stagingArea *StagingArea, highHash, lowHash *externalapi.DomainHash, includeLowHash bool) (BlockIterator, error)
|
||||
SelectedChild(stagingArea *StagingArea, highHash, lowHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
|
||||
AnticoneFromBlocks(stagingArea *StagingArea, tips []*externalapi.DomainHash, blockHash *externalapi.DomainHash, maxTraversalAllowed uint64) ([]*externalapi.DomainHash, error)
|
||||
AnticoneFromBlocks(stagingArea *StagingArea, tips []*externalapi.DomainHash, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
|
||||
AnticoneFromVirtualPOV(stagingArea *StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
|
||||
BlockWindow(stagingArea *StagingArea, highHash *externalapi.DomainHash, windowSize int) ([]*externalapi.DomainHash, error)
|
||||
DAABlockWindow(stagingArea *StagingArea, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
|
||||
|
||||
@@ -6,7 +6,6 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
type SyncManager interface {
|
||||
GetHashesBetween(stagingArea *StagingArea, lowHash, highHash *externalapi.DomainHash, maxBlocks uint64) (
|
||||
hashes []*externalapi.DomainHash, actualHighHash *externalapi.DomainHash, err error)
|
||||
GetAnticone(stagingArea *StagingArea, blockHash, contextHash *externalapi.DomainHash, maxBlocks uint64) (hashes []*externalapi.DomainHash, err error)
|
||||
GetMissingBlockBodyHashes(stagingArea *StagingArea, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
|
||||
CreateBlockLocator(stagingArea *StagingArea, lowHash, highHash *externalapi.DomainHash, limit uint32) (
|
||||
externalapi.BlockLocator, error)
|
||||
|
||||
@@ -49,7 +49,6 @@ type TestConsensus interface {
|
||||
*externalapi.VirtualChangeSet, error)
|
||||
|
||||
MineJSON(r io.Reader, blockType MineJSONBlockType) (tips []*externalapi.DomainHash, err error)
|
||||
ToJSON(w io.Writer) error
|
||||
|
||||
RenderDAGToDot(filename string) error
|
||||
|
||||
|
||||
@@ -336,12 +336,12 @@ func (csm *consensusStateManager) boundedMergeBreakingParents(stagingArea *model
|
||||
log.Debugf("Checking whether parent %s breaks the bounded merge set", parent)
|
||||
isBadRedInPast := false
|
||||
for _, badRedBlock := range badReds {
|
||||
isBadRedInPast, err = csm.dagTopologyManager.IsAncestorOf(stagingArea, badRedBlock, parent)
|
||||
isBadRedInPast, err = csm.dagTopologyManager.IsAncestorOf(stagingArea, parent, badRedBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isBadRedInPast {
|
||||
log.Debugf("Parent %s is a descendant of bad red %s", parent, badRedBlock)
|
||||
log.Debugf("Parent %s is an ancestor of bad red %s", parent, badRedBlock)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (dtm *dagTraversalManager) AnticoneFromVirtualPOV(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (
|
||||
@@ -15,18 +14,16 @@ func (dtm *dagTraversalManager) AnticoneFromVirtualPOV(stagingArea *model.Stagin
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dtm.AnticoneFromBlocks(stagingArea, virtualParents, blockHash, 0)
|
||||
return dtm.AnticoneFromBlocks(stagingArea, virtualParents, blockHash)
|
||||
}
|
||||
|
||||
func (dtm *dagTraversalManager) AnticoneFromBlocks(stagingArea *model.StagingArea, tips []*externalapi.DomainHash,
|
||||
blockHash *externalapi.DomainHash, maxTraversalAllowed uint64) (
|
||||
func (dtm *dagTraversalManager) AnticoneFromBlocks(stagingArea *model.StagingArea, tips []*externalapi.DomainHash, blockHash *externalapi.DomainHash) (
|
||||
[]*externalapi.DomainHash, error) {
|
||||
|
||||
anticone := []*externalapi.DomainHash{}
|
||||
queue := tips
|
||||
visited := hashset.New()
|
||||
|
||||
traversalCounter := uint64(0)
|
||||
for len(queue) > 0 {
|
||||
var current *externalapi.DomainHash
|
||||
current, queue = queue[0], queue[1:]
|
||||
@@ -51,14 +48,6 @@ func (dtm *dagTraversalManager) AnticoneFromBlocks(stagingArea *model.StagingAre
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We count the number of blocks in past(tips) \setminus past(blockHash).
|
||||
// We don't use `len(visited)` since it includes some maximal blocks in past(blockHash) as well.
|
||||
traversalCounter++
|
||||
if maxTraversalAllowed > 0 && traversalCounter > maxTraversalAllowed {
|
||||
return nil, errors.Wrapf(model.ErrReachedMaxTraversalAllowed,
|
||||
"Passed max allowed traversal (%d > %d)", traversalCounter, maxTraversalAllowed)
|
||||
}
|
||||
|
||||
if !blockIsAncestorOfCurrent {
|
||||
anticone = append(anticone, current)
|
||||
}
|
||||
|
||||
@@ -995,13 +995,7 @@ func (pm *pruningManager) ExpectedHeaderPruningPoint(stagingArea *model.StagingA
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Note: the pruning point from the POV of the current block is the first block in its chain that is in depth of pm.pruningDepth and
|
||||
// its finality score is greater than the previous pruning point. This is why the diff between finalityScore(selectedParent.blueScore + 1) * finalityInterval
|
||||
// and the current block blue score is less than pm.pruningDepth we can know for sure that this block didn't trigger a pruning point change.
|
||||
minRequiredBlueScoreForNextPruningPoint := (pm.finalityScore(selectedParentPruningPointHeader.BlueScore()) + 1) * pm.finalityInterval
|
||||
|
||||
if hasPruningPointInItsSelectedChain &&
|
||||
minRequiredBlueScoreForNextPruningPoint+pm.pruningDepth <= ghostdagData.BlueScore() {
|
||||
if hasPruningPointInItsSelectedChain && pm.finalityScore(ghostdagData.BlueScore()) > pm.finalityScore(selectedParentPruningPointHeader.BlueScore()+pm.pruningDepth) {
|
||||
var suggestedLowHash *externalapi.DomainHash
|
||||
hasReachabilityData, err := pm.reachabilityDataStore.HasReachabilityData(pm.databaseContext, stagingArea, selectedParentHeader.PruningPoint())
|
||||
if err != nil {
|
||||
@@ -1009,15 +1003,7 @@ func (pm *pruningManager) ExpectedHeaderPruningPoint(stagingArea *model.StagingA
|
||||
}
|
||||
|
||||
if hasReachabilityData {
|
||||
// nextPruningPointAndCandidateByBlockHash needs suggestedLowHash to be in the future of the pruning point because
|
||||
// otherwise reachability selected chain data is unreliable.
|
||||
isInFutureOfCurrentPruningPoint, err := pm.dagTopologyManager.IsAncestorOf(stagingArea, pruningPoint, selectedParentHeader.PruningPoint())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isInFutureOfCurrentPruningPoint {
|
||||
suggestedLowHash = selectedParentHeader.PruningPoint()
|
||||
}
|
||||
suggestedLowHash = selectedParentHeader.PruningPoint()
|
||||
}
|
||||
|
||||
nextOrCurrentPruningPoint, _, err = pm.nextPruningPointAndCandidateByBlockHash(stagingArea, blockHash, suggestedLowHash)
|
||||
|
||||
@@ -145,7 +145,7 @@ func (sm *syncManager) missingBlockBodyHashes(stagingArea *model.StagingArea, hi
|
||||
lowHash = selectedChild
|
||||
}
|
||||
if !foundHeaderOnlyBlock {
|
||||
if lowHash.Equal(highHash) {
|
||||
if lowHash == highHash {
|
||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
||||
// In these cases - return an empty list of blocks to sync
|
||||
@@ -153,7 +153,7 @@ func (sm *syncManager) missingBlockBodyHashes(stagingArea *model.StagingArea, hi
|
||||
}
|
||||
// TODO: Once block children are fixed (https://github.com/kaspanet/kaspad/issues/1499),
|
||||
// this error should be returned rather the logged
|
||||
log.Errorf("No header-only blocks between %s and %s",
|
||||
log.Errorf("no header-only blocks between %s and %s",
|
||||
lowHash, highHash)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type syncManager struct {
|
||||
@@ -70,21 +69,6 @@ func (sm *syncManager) GetHashesBetween(stagingArea *model.StagingArea, lowHash,
|
||||
return sm.antiPastHashesBetween(stagingArea, lowHash, highHash, maxBlocks)
|
||||
}
|
||||
|
||||
func (sm *syncManager) GetAnticone(stagingArea *model.StagingArea, blockHash, contextHash *externalapi.DomainHash, maxBlocks uint64) (hashes []*externalapi.DomainHash, err error) {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "GetAnticone")
|
||||
defer onEnd()
|
||||
isContextAncestorOfBlock, err := sm.dagTopologyManager.IsAncestorOf(stagingArea, contextHash, blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isContextAncestorOfBlock {
|
||||
return nil, errors.Errorf("expected block %s to not be in future of %s",
|
||||
blockHash,
|
||||
contextHash)
|
||||
}
|
||||
return sm.dagTraversalManager.AnticoneFromBlocks(stagingArea, []*externalapi.DomainHash{contextHash}, blockHash, maxBlocks)
|
||||
}
|
||||
|
||||
func (sm *syncManager) GetMissingBlockBodyHashes(stagingArea *model.StagingArea, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "GetMissingBlockBodyHashes")
|
||||
defer onEnd()
|
||||
|
||||
@@ -2,9 +2,6 @@ package consensus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@@ -111,13 +108,13 @@ func (tc *testConsensus) AddUTXOInvalidBlock(parentHashes []*externalapi.DomainH
|
||||
return consensushashing.BlockHash(block), virtualChangeSet, nil
|
||||
}
|
||||
|
||||
// jsonBlock is a json representation of a block in mine format
|
||||
type jsonBlock struct {
|
||||
ID string `json:"id"`
|
||||
Parents []string `json:"parents"`
|
||||
}
|
||||
|
||||
func (tc *testConsensus) MineJSON(r io.Reader, blockType testapi.MineJSONBlockType) (tips []*externalapi.DomainHash, err error) {
|
||||
// jsonBlock is a json representation of a block in mine format
|
||||
type jsonBlock struct {
|
||||
ID string `json:"id"`
|
||||
Parents []string `json:"parents"`
|
||||
}
|
||||
|
||||
tipSet := map[externalapi.DomainHash]*externalapi.DomainHash{}
|
||||
tipSet[*tc.dagParams.GenesisHash] = tc.dagParams.GenesisHash
|
||||
|
||||
@@ -185,64 +182,6 @@ func (tc *testConsensus) MineJSON(r io.Reader, blockType testapi.MineJSONBlockTy
|
||||
return tips, nil
|
||||
}
|
||||
|
||||
func (tc *testConsensus) ToJSON(w io.Writer) error {
|
||||
hashToID := make(map[externalapi.DomainHash]string)
|
||||
lastID := 0
|
||||
|
||||
encoder := json.NewEncoder(w)
|
||||
visited := hashset.New()
|
||||
queue := tc.dagTraversalManager.NewUpHeap(model.NewStagingArea())
|
||||
err := queue.Push(tc.dagParams.GenesisHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blocksToAdd := make([]jsonBlock, 0)
|
||||
for queue.Len() > 0 {
|
||||
current := queue.Pop()
|
||||
if visited.Contains(current) {
|
||||
continue
|
||||
}
|
||||
|
||||
visited.Add(current)
|
||||
|
||||
if current.Equal(model.VirtualBlockHash) {
|
||||
continue
|
||||
}
|
||||
|
||||
header, err := tc.blockHeaderStore.BlockHeader(tc.databaseContext, model.NewStagingArea(), current)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
directParents := header.DirectParents()
|
||||
|
||||
parentIDs := make([]string, len(directParents))
|
||||
for i, parent := range directParents {
|
||||
parentIDs[i] = hashToID[*parent]
|
||||
}
|
||||
lastIDStr := fmt.Sprintf("%d", lastID)
|
||||
blocksToAdd = append(blocksToAdd, jsonBlock{
|
||||
ID: lastIDStr,
|
||||
Parents: parentIDs,
|
||||
})
|
||||
hashToID[*current] = lastIDStr
|
||||
lastID++
|
||||
|
||||
children, err := tc.dagTopologyManagers[0].Children(model.NewStagingArea(), current)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = queue.PushSlice(children)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return encoder.Encode(blocksToAdd)
|
||||
}
|
||||
|
||||
func (tc *testConsensus) BuildUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, error) {
|
||||
// Require write lock because BuildBlockWithParents stages temporary data
|
||||
tc.lock.Lock()
|
||||
|
||||
@@ -135,7 +135,7 @@ func (btb *blockTemplateBuilder) GetBlockTemplate(coinbaseData *consensusexterna
|
||||
|
||||
invalidTxsErr := ruleerrors.ErrInvalidTransactionsInNewBlock{}
|
||||
if errors.As(err, &invalidTxsErr) {
|
||||
log.Criticalf("consensusReference.Consensus().BuildBlock returned invalid txs in GetBlockTemplate")
|
||||
log.Criticalf("consensusReference.Consensus().BuildBlock returned invalid txs in GetBlockTemplate: %s", err)
|
||||
invalidTxs := make([]*consensusexternalapi.DomainTransaction, 0, len(invalidTxsErr.InvalidTransactions))
|
||||
for _, tx := range invalidTxsErr.InvalidTransactions {
|
||||
invalidTxs = append(invalidTxs, tx.Transaction)
|
||||
|
||||
@@ -18,7 +18,7 @@ func (tobf *TransactionsOrderedByFeeRate) GetByIndex(index int) *MempoolTransact
|
||||
|
||||
// Push inserts a transaction into the set, placing it in the correct place to preserve order
|
||||
func (tobf *TransactionsOrderedByFeeRate) Push(transaction *MempoolTransaction) error {
|
||||
index, _, err := tobf.findTransactionIndex(transaction)
|
||||
index, err := tobf.findTransactionIndex(transaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -29,21 +29,18 @@ func (tobf *TransactionsOrderedByFeeRate) Push(transaction *MempoolTransaction)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ErrTransactionNotFound is returned bt tobf.TransactionsOrderedByFeeRate
|
||||
var ErrTransactionNotFound = errors.New("Couldn't find transaction in mp.orderedTransactionsByFeeRate")
|
||||
|
||||
// Remove removes the given transaction from the set.
|
||||
// Returns an error if transaction does not exist in the set, or if the given transaction does not have mass
|
||||
// and fee filled in.
|
||||
func (tobf *TransactionsOrderedByFeeRate) Remove(transaction *MempoolTransaction) error {
|
||||
index, wasFound, err := tobf.findTransactionIndex(transaction)
|
||||
index, err := tobf.findTransactionIndex(transaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !wasFound {
|
||||
return errors.Wrapf(ErrTransactionNotFound,
|
||||
"Couldn't find %s in mp.orderedTransactionsByFeeRate", transaction.TransactionID())
|
||||
txID := transaction.TransactionID()
|
||||
if !tobf.slice[index].TransactionID().Equal(txID) {
|
||||
return errors.Errorf("Couldn't find %s in mp.orderedTransactionsByFeeRate", txID)
|
||||
}
|
||||
|
||||
return tobf.RemoveAtIndex(index)
|
||||
@@ -59,18 +56,15 @@ func (tobf *TransactionsOrderedByFeeRate) RemoveAtIndex(index int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// findTransactionIndex finds the given transaction inside the list of transactions ordered by fee rate.
|
||||
// If the transaction was not found, will return wasFound=false and index=the index at which transaction can be inserted
|
||||
// while preserving the order.
|
||||
func (tobf *TransactionsOrderedByFeeRate) findTransactionIndex(transaction *MempoolTransaction) (index int, wasFound bool, err error) {
|
||||
func (tobf *TransactionsOrderedByFeeRate) findTransactionIndex(transaction *MempoolTransaction) (int, error) {
|
||||
if transaction.Transaction().Fee == 0 || transaction.Transaction().Mass == 0 {
|
||||
return 0, false, errors.Errorf("findTransactionIndex expects a transaction with " +
|
||||
return 0, errors.Errorf("findTxIndexInOrderedTransactionsByFeeRate expects a transaction with " +
|
||||
"populated fee and mass")
|
||||
}
|
||||
txID := transaction.TransactionID()
|
||||
txFeeRate := float64(transaction.Transaction().Fee) / float64(transaction.Transaction().Mass)
|
||||
|
||||
index = sort.Search(len(tobf.slice), func(i int) bool {
|
||||
return sort.Search(len(tobf.slice), func(i int) bool {
|
||||
iElement := tobf.slice[i]
|
||||
elementFeeRate := float64(iElement.Transaction().Fee) / float64(iElement.Transaction().Mass)
|
||||
if elementFeeRate > txFeeRate {
|
||||
@@ -82,10 +76,5 @@ func (tobf *TransactionsOrderedByFeeRate) findTransactionIndex(transaction *Memp
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
|
||||
wasFound = index != len(tobf.slice) && // sort.Search returns len(tobf.slice) if nothing was found
|
||||
tobf.slice[index].TransactionID().Equal(transaction.TransactionID())
|
||||
|
||||
return index, wasFound, nil
|
||||
}), nil
|
||||
}
|
||||
|
||||
@@ -3,8 +3,6 @@ package mempool
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool/model"
|
||||
)
|
||||
@@ -81,12 +79,7 @@ func (tp *transactionsPool) removeTransaction(transaction *model.MempoolTransact
|
||||
|
||||
err := tp.transactionsOrderedByFeeRate.Remove(transaction)
|
||||
if err != nil {
|
||||
if errors.Is(err, model.ErrTransactionNotFound) {
|
||||
log.Errorf("Transaction %s not found in tp.transactionsOrderedByFeeRate. This should never happen but sometime does",
|
||||
transaction.TransactionID())
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
delete(tp.highPriorityTransactions, *transaction.TransactionID())
|
||||
|
||||
@@ -52,7 +52,7 @@ const (
|
||||
defaultSigCacheMaxSize = 100000
|
||||
sampleConfigFilename = "sample-kaspad.conf"
|
||||
defaultMaxUTXOCacheSize = 5000000000
|
||||
defaultProtocolVersion = 5
|
||||
defaultProtocolVersion = 4
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -47,10 +47,6 @@ message KaspadMessage {
|
||||
ReadyMessage ready = 50;
|
||||
BlockWithTrustedDataV4Message blockWithTrustedDataV4 = 51;
|
||||
TrustedDataMessage trustedData = 52;
|
||||
RequestIBDChainBlockLocatorMessage requestIBDChainBlockLocator = 53;
|
||||
IbdChainBlockLocatorMessage ibdChainBlockLocator = 54;
|
||||
RequestAnticoneMessage requestAnticone = 55;
|
||||
RequestNextPruningPointAndItsAnticoneBlocksMessage requestNextPruningPointAndItsAnticoneBlocks = 56;
|
||||
|
||||
GetCurrentNetworkRequestMessage getCurrentNetworkRequest = 1001;
|
||||
GetCurrentNetworkResponseMessage getCurrentNetworkResponse = 1002;
|
||||
|
||||
@@ -11,8 +11,7 @@ import (
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// P2PClient is the client API for P2P service.
|
||||
//
|
||||
@@ -30,7 +29,7 @@ func NewP2PClient(cc grpc.ClientConnInterface) P2PClient {
|
||||
}
|
||||
|
||||
func (c *p2PClient) MessageStream(ctx context.Context, opts ...grpc.CallOption) (P2P_MessageStreamClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &P2P_ServiceDesc.Streams[0], "/protowire.P2P/MessageStream", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &_P2P_serviceDesc.Streams[0], "/protowire.P2P/MessageStream", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -72,20 +71,13 @@ type P2PServer interface {
|
||||
type UnimplementedP2PServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedP2PServer) MessageStream(P2P_MessageStreamServer) error {
|
||||
func (*UnimplementedP2PServer) MessageStream(P2P_MessageStreamServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method MessageStream not implemented")
|
||||
}
|
||||
func (UnimplementedP2PServer) mustEmbedUnimplementedP2PServer() {}
|
||||
func (*UnimplementedP2PServer) mustEmbedUnimplementedP2PServer() {}
|
||||
|
||||
// UnsafeP2PServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to P2PServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeP2PServer interface {
|
||||
mustEmbedUnimplementedP2PServer()
|
||||
}
|
||||
|
||||
func RegisterP2PServer(s grpc.ServiceRegistrar, srv P2PServer) {
|
||||
s.RegisterService(&P2P_ServiceDesc, srv)
|
||||
func RegisterP2PServer(s *grpc.Server, srv P2PServer) {
|
||||
s.RegisterService(&_P2P_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _P2P_MessageStream_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
@@ -114,10 +106,7 @@ func (x *p2PMessageStreamServer) Recv() (*KaspadMessage, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// P2P_ServiceDesc is the grpc.ServiceDesc for P2P service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var P2P_ServiceDesc = grpc.ServiceDesc{
|
||||
var _P2P_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "protowire.P2P",
|
||||
HandlerType: (*P2PServer)(nil),
|
||||
Methods: []grpc.MethodDesc{},
|
||||
@@ -148,7 +137,7 @@ func NewRPCClient(cc grpc.ClientConnInterface) RPCClient {
|
||||
}
|
||||
|
||||
func (c *rPCClient) MessageStream(ctx context.Context, opts ...grpc.CallOption) (RPC_MessageStreamClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &RPC_ServiceDesc.Streams[0], "/protowire.RPC/MessageStream", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &_RPC_serviceDesc.Streams[0], "/protowire.RPC/MessageStream", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -190,20 +179,13 @@ type RPCServer interface {
|
||||
type UnimplementedRPCServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedRPCServer) MessageStream(RPC_MessageStreamServer) error {
|
||||
func (*UnimplementedRPCServer) MessageStream(RPC_MessageStreamServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method MessageStream not implemented")
|
||||
}
|
||||
func (UnimplementedRPCServer) mustEmbedUnimplementedRPCServer() {}
|
||||
func (*UnimplementedRPCServer) mustEmbedUnimplementedRPCServer() {}
|
||||
|
||||
// UnsafeRPCServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to RPCServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeRPCServer interface {
|
||||
mustEmbedUnimplementedRPCServer()
|
||||
}
|
||||
|
||||
func RegisterRPCServer(s grpc.ServiceRegistrar, srv RPCServer) {
|
||||
s.RegisterService(&RPC_ServiceDesc, srv)
|
||||
func RegisterRPCServer(s *grpc.Server, srv RPCServer) {
|
||||
s.RegisterService(&_RPC_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _RPC_MessageStream_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
@@ -232,10 +214,7 @@ func (x *rPCMessageStreamServer) Recv() (*KaspadMessage, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// RPC_ServiceDesc is the grpc.ServiceDesc for RPC service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var RPC_ServiceDesc = grpc.ServiceDesc{
|
||||
var _RPC_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "protowire.RPC",
|
||||
HandlerType: (*RPCServer)(nil),
|
||||
Methods: []grpc.MethodDesc{},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -190,20 +190,6 @@ message IbdBlockLocatorMessage {
|
||||
repeated Hash blockLocatorHashes = 2;
|
||||
}
|
||||
|
||||
message RequestIBDChainBlockLocatorMessage{
|
||||
Hash lowHash = 1;
|
||||
Hash highHash = 2;
|
||||
}
|
||||
|
||||
message IbdChainBlockLocatorMessage {
|
||||
repeated Hash blockLocatorHashes = 1;
|
||||
}
|
||||
|
||||
message RequestAnticoneMessage{
|
||||
Hash blockHash = 1;
|
||||
Hash contextHash = 2;
|
||||
}
|
||||
|
||||
message IbdBlockLocatorHighestHashMessage {
|
||||
Hash highestHash = 1;
|
||||
}
|
||||
@@ -218,9 +204,6 @@ message BlockHeadersMessage {
|
||||
message RequestPruningPointAndItsAnticoneMessage {
|
||||
}
|
||||
|
||||
message RequestNextPruningPointAndItsAnticoneBlocksMessage{
|
||||
}
|
||||
|
||||
message BlockWithTrustedDataMessage {
|
||||
BlockMessage block = 1;
|
||||
uint64 daaScore = 2;
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package protowire
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (x *KaspadMessage_IbdChainBlockLocator) toAppMessage() (appmessage.Message, error) {
|
||||
if x == nil {
|
||||
return nil, errors.Wrapf(errorNil, "KaspadMessage_IbdChainBlockLocator is nil")
|
||||
}
|
||||
return x.IbdChainBlockLocator.toAppMessage()
|
||||
}
|
||||
|
||||
func (x *IbdChainBlockLocatorMessage) toAppMessage() (appmessage.Message, error) {
|
||||
if x == nil {
|
||||
return nil, errors.Wrapf(errorNil, "IbdChainBlockLocatorMessage is nil")
|
||||
}
|
||||
blockLocatorHashes, err := protoHashesToDomain(x.BlockLocatorHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &appmessage.MsgIBDChainBlockLocator{
|
||||
BlockLocatorHashes: blockLocatorHashes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (x *KaspadMessage_IbdChainBlockLocator) fromAppMessage(message *appmessage.MsgIBDChainBlockLocator) error {
|
||||
x.IbdChainBlockLocator = &IbdChainBlockLocatorMessage{
|
||||
BlockLocatorHashes: domainHashesToProto(message.BlockLocatorHashes),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package protowire
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (x *KaspadMessage_RequestAnticone) toAppMessage() (appmessage.Message, error) {
|
||||
if x == nil {
|
||||
return nil, errors.Wrapf(errorNil, "KaspadMessage_RequestAnticone is nil")
|
||||
}
|
||||
return x.RequestAnticone.toAppMessage()
|
||||
}
|
||||
|
||||
func (x *RequestAnticoneMessage) toAppMessage() (appmessage.Message, error) {
|
||||
if x == nil {
|
||||
return nil, errors.Wrapf(errorNil, "RequestAnticoneMessage is nil")
|
||||
}
|
||||
blockHash, err := x.BlockHash.toDomain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contextHash, err := x.ContextHash.toDomain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &appmessage.MsgRequestAnticone{
|
||||
BlockHash: blockHash,
|
||||
ContextHash: contextHash,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (x *KaspadMessage_RequestAnticone) fromAppMessage(msgRequestPastDiff *appmessage.MsgRequestAnticone) error {
|
||||
x.RequestAnticone = &RequestAnticoneMessage{
|
||||
BlockHash: domainHashToProto(msgRequestPastDiff.BlockHash),
|
||||
ContextHash: domainHashToProto(msgRequestPastDiff.ContextHash),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
package protowire
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (x *KaspadMessage_RequestIBDChainBlockLocator) toAppMessage() (appmessage.Message, error) {
|
||||
if x == nil {
|
||||
return nil, errors.Wrapf(errorNil, "KaspadMessage_RequestIBDChainBlockLocator is nil")
|
||||
}
|
||||
return x.RequestIBDChainBlockLocator.toAppMessage()
|
||||
}
|
||||
|
||||
func (x *RequestIBDChainBlockLocatorMessage) toAppMessage() (appmessage.Message, error) {
|
||||
if x == nil {
|
||||
return nil, errors.Wrapf(errorNil, "RequestIBDChainBlockLocatorMessage is nil")
|
||||
}
|
||||
var err error
|
||||
var highHash, lowHash *externalapi.DomainHash
|
||||
if x.HighHash != nil {
|
||||
highHash, err = x.HighHash.toDomain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if x.LowHash != nil {
|
||||
lowHash, err = x.LowHash.toDomain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &appmessage.MsgRequestIBDChainBlockLocator{
|
||||
HighHash: highHash,
|
||||
LowHash: lowHash,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (x *KaspadMessage_RequestIBDChainBlockLocator) fromAppMessage(msgGetBlockLocator *appmessage.MsgRequestIBDChainBlockLocator) error {
|
||||
var highHash, lowHash *Hash
|
||||
if msgGetBlockLocator.HighHash != nil {
|
||||
highHash = domainHashToProto(msgGetBlockLocator.HighHash)
|
||||
}
|
||||
if msgGetBlockLocator.LowHash != nil {
|
||||
lowHash = domainHashToProto(msgGetBlockLocator.LowHash)
|
||||
}
|
||||
x.RequestIBDChainBlockLocator = &RequestIBDChainBlockLocatorMessage{
|
||||
HighHash: highHash,
|
||||
LowHash: lowHash,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package protowire
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (x *KaspadMessage_RequestNextPruningPointAndItsAnticoneBlocks) toAppMessage() (appmessage.Message, error) {
|
||||
if x == nil {
|
||||
return nil, errors.Wrapf(errorNil, "KaspadMessage_DonePruningPointAndItsAnticoneBlocks is nil")
|
||||
}
|
||||
return &appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks{}, nil
|
||||
}
|
||||
|
||||
func (x *KaspadMessage_RequestNextPruningPointAndItsAnticoneBlocks) fromAppMessage(_ *appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks) error {
|
||||
return nil
|
||||
}
|
||||
@@ -10,13 +10,14 @@
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.12.3
|
||||
// source: rpc.proto
|
||||
|
||||
package protowire
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
@@ -30,6 +31,10 @@ const (
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
type SubmitBlockResponseMessage_RejectReason int32
|
||||
|
||||
const (
|
||||
|
||||
@@ -331,34 +331,6 @@ func toP2PPayload(message appmessage.Message) (isKaspadMessage_Payload, error) {
|
||||
return nil, err
|
||||
}
|
||||
return payload, nil
|
||||
case *appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks:
|
||||
payload := new(KaspadMessage_RequestNextPruningPointAndItsAnticoneBlocks)
|
||||
err := payload.fromAppMessage(message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return payload, nil
|
||||
case *appmessage.MsgRequestIBDChainBlockLocator:
|
||||
payload := new(KaspadMessage_RequestIBDChainBlockLocator)
|
||||
err := payload.fromAppMessage(message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return payload, nil
|
||||
case *appmessage.MsgIBDChainBlockLocator:
|
||||
payload := new(KaspadMessage_IbdChainBlockLocator)
|
||||
err := payload.fromAppMessage(message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return payload, nil
|
||||
case *appmessage.MsgRequestAnticone:
|
||||
payload := new(KaspadMessage_RequestAnticone)
|
||||
err := payload.fromAppMessage(message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return payload, nil
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
Binary file not shown.
@@ -1,7 +1 @@
|
||||
{
|
||||
"skipProofOfWork": true,
|
||||
"mergeSetSizeLimit": 30,
|
||||
"finalityDuration": 200000,
|
||||
"hardForkOmitGenesisFromParentsDaaScore": 2505,
|
||||
"k": 0
|
||||
}
|
||||
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000, "hardForkOmitGenesisFromParentsDaaScore": 2505}
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
package fast_pruning_ibd_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestGenerateFastPruningIBDTest generates the json needed for dag-for-fast-pruning-ibd-test.json.gz
|
||||
func TestGenerateFastPruningIBDTest(t *testing.T) {
|
||||
t.Skip()
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
if consensusConfig.Name != dagconfig.DevnetParams.Name {
|
||||
return
|
||||
}
|
||||
|
||||
factory := consensus.NewFactory()
|
||||
|
||||
// This is done to reduce the pruning depth to 6 blocks
|
||||
finalityDepth := 200
|
||||
consensusConfig.FinalityDuration = time.Duration(finalityDepth) * consensusConfig.TargetTimePerBlock
|
||||
consensusConfig.K = 0
|
||||
consensusConfig.PruningProofM = 1
|
||||
consensusConfig.MergeSetSizeLimit = 30
|
||||
|
||||
tc, teardownSyncer, err := factory.NewTestConsensus(consensusConfig, "TestValidateAndInsertPruningPointSyncer")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up tc: %+v", err)
|
||||
}
|
||||
defer teardownSyncer(false)
|
||||
|
||||
numBlocks := finalityDepth
|
||||
tipHash := consensusConfig.GenesisHash
|
||||
for i := 0; i < numBlocks; i++ {
|
||||
tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
tip, err := tc.GetBlock(tipHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
header := tip.Header.ToMutable()
|
||||
|
||||
for i := uint64(1); i < 1000; i++ {
|
||||
if i%100 == 0 {
|
||||
t.Logf("Added %d tips", i)
|
||||
}
|
||||
header.SetNonce(tip.Header.Nonce() + i)
|
||||
block := &externalapi.DomainBlock{Header: header.ToImmutable(), Transactions: tip.Transactions}
|
||||
_, err = tc.ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateAndInsertBlock: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
emptyCoinbase := &externalapi.DomainCoinbaseData{
|
||||
ScriptPublicKey: &externalapi.ScriptPublicKey{
|
||||
Script: nil,
|
||||
Version: 0,
|
||||
},
|
||||
}
|
||||
|
||||
pruningPoint, err := tc.PruningPoint()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
currentPruningPoint, err := tc.PruningPoint()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !pruningPoint.Equal(currentPruningPoint) {
|
||||
t.Fatalf("Pruning point unexpectedly changed")
|
||||
}
|
||||
|
||||
tips, err := tc.Tips()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(tips) == 1 {
|
||||
break
|
||||
}
|
||||
|
||||
if i%10 == 0 {
|
||||
t.Logf("Number of tips: %d", len(tips))
|
||||
}
|
||||
|
||||
block, err := tc.BuildBlock(emptyCoinbase, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = tc.ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateAndInsertBlock: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
currentPruningPoint, err := tc.PruningPoint()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !pruningPoint.Equal(currentPruningPoint) {
|
||||
break
|
||||
}
|
||||
|
||||
block, err := tc.BuildBlock(emptyCoinbase, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = tc.ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateAndInsertBlock: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
file, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = tc.ToJSON(file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("DAG saved at %s", path.Join(os.TempDir(), stat.Name()))
|
||||
})
|
||||
}
|
||||
@@ -37,7 +37,7 @@ func mineLoop(syncerRPCClient, syncedRPCClient *rpc.Client) error {
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
const timeToPropagate = 60 * time.Second
|
||||
const timeToPropagate = 30 * time.Second
|
||||
select {
|
||||
case <-syncedRPCClient.OnBlockAdded:
|
||||
case <-time.After(timeToPropagate):
|
||||
|
||||
@@ -11,7 +11,7 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs
|
||||
const (
|
||||
appMajor uint = 0
|
||||
appMinor uint = 11
|
||||
appPatch uint = 14
|
||||
appPatch uint = 12
|
||||
)
|
||||
|
||||
// appBuild is defined as a variable so it can be overridden during the build
|
||||
|
||||
Reference in New Issue
Block a user