mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-03-30 15:08:33 +00:00

* Pruning headers p2p basic structure * Remove headers-first * Fix consensus tests except TestValidateAndInsertPruningPointWithSideBlocks and TestValidateAndInsertImportedPruningPoint * Add virtual genesis * Implement PruningPointAndItsAnticoneWithMetaData * Start fixing TestValidateAndInsertImportedPruningPoint * Fix TestValidateAndInsertImportedPruningPoint * Fix BlockWindow * Update p2p and gRPC * Fix all tests except TestHandleRelayInvs * Delete TestHandleRelayInvs parts that cover the old IBD flow * Fix lint errors * Add p2p_request_ibd_blocks.go * Clean code * Make MsgBlockWithMetaData implement its own representation * Remove redundant check if highest share block is below the pruning point * Fix TestCheckLockTimeVerifyConditionedByAbsoluteTimeWithWrongLockTime * Fix comments, errors ane names * Fix window size to the real value * Check reindex root after each block at TestUpdateReindexRoot * Remove irrelevant check * Renames and comments * Remove redundant argument from sendGetBlockLocator * Don't delete staging on non-recoverable errors * Renames and comments * Remove redundant code * Commit changes inside ResolveVirtual * Add comment to IsRecoverableError * Remove blocksWithMetaDataGHOSTDAGDataStore * Increase windows pagefile * Move DeleteStagingConsensus outside of defer * Get rid of mustAccepted in receiveBlockWithMetaData * Ban on invalid pruning point * Rename interface_datastructures_daawindowstore.go to interface_datastructures_blocks_with_meta_data_daa_window_store.go * * Change GetVirtualSelectedParentChainFromBlockResponseMessage and VirtualSelectedParentChainChangedNotificationMessage to show only added block hashes * Remove ResolveVirtual * Use externalapi.ConsensusWrapper inside MiningManager * Fix pruningmanager.blockwithmetadata * Set pruning point selected child when importing the pruning point UTXO set * Change virtual genesis hash * replace the selected parent with virtual genesis on removePrunedBlocksFromGHOSTDAGData * Get rid of low hash in block locators * Remove +1 from everywhere we use difficultyAdjustmentWindowSize and increase the default value by one * Add comments about consensus wrapper * Don't use separate staging area when resolving resolveBlockStatus * Fix netsync stability test * Fix checkResolveVirtual * Rename ConsensusWrapper->ConsensusReference * Get rid of blockHeapNode * Add comment to defaultDifficultyAdjustmentWindowSize * Add SelectedChild to DAGTraversalManager * Remove redundant copy * Rename blockWindowHeap->calculateBlockWindowHeap * Move isVirtualGenesisOnlyParent to utils * Change BlockWithMetaData->BlockWithTrustedData * Get rid of maxReasonLength * Split IBD to 100 blocks each time * Fix a bug in calculateBlockWindowHeap * Switch to trusted data when encountering virtual genesis in blockWithTrustedData * Move ConsensusReference to domain * Update ConsensusReference comment * Add comment * Rename shouldNotAddGenesis->skipAddingGenesis
170 lines
5.4 KiB
Go
170 lines
5.4 KiB
Go
package flowcontext
|
|
|
|
import (
|
|
"time"
|
|
|
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/kaspanet/kaspad/app/appmessage"
|
|
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
|
)
|
|
|
|
// OnNewBlock updates the mempool after a new block arrival, and
|
|
// relays newly unorphaned transactions and possibly rebroadcast
|
|
// manually added transactions when not in IBD.
|
|
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
|
blockInsertionResult *externalapi.BlockInsertionResult) error {
|
|
|
|
hash := consensushashing.BlockHash(block)
|
|
log.Debugf("OnNewBlock start for block %s", hash)
|
|
defer log.Debugf("OnNewBlock end for block %s", hash)
|
|
|
|
unorphaningResults, err := f.UnorphanBlocks(block)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
|
|
|
|
newBlocks := []*externalapi.DomainBlock{block}
|
|
newBlockInsertionResults := []*externalapi.BlockInsertionResult{blockInsertionResult}
|
|
for _, unorphaningResult := range unorphaningResults {
|
|
newBlocks = append(newBlocks, unorphaningResult.block)
|
|
newBlockInsertionResults = append(newBlockInsertionResults, unorphaningResult.blockInsertionResult)
|
|
}
|
|
|
|
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
|
|
for i, newBlock := range newBlocks {
|
|
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
|
|
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
|
|
|
|
if f.onBlockAddedToDAGHandler != nil {
|
|
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
|
|
blockInsertionResult = newBlockInsertionResults[i]
|
|
err := f.onBlockAddedToDAGHandler(newBlock, blockInsertionResult)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
|
|
}
|
|
|
|
// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set
|
|
// resets due to pruning point change via IBD.
|
|
func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
|
|
if f.onPruningPointUTXOSetOverrideHandler != nil {
|
|
return f.onPruningPointUTXOSetOverrideHandler()
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
|
addedBlocks []*externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error {
|
|
|
|
// Don't relay transactions when in IBD.
|
|
if f.IsIBDRunning() {
|
|
return nil
|
|
}
|
|
|
|
var txIDsToRebroadcast []*externalapi.DomainTransactionID
|
|
if f.shouldRebroadcastTransactions() {
|
|
txsToRebroadcast, err := f.Domain().MiningManager().RevalidateHighPriorityTransactions()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
txIDsToRebroadcast = consensushashing.TransactionIDs(txsToRebroadcast)
|
|
f.lastRebroadcastTime = time.Now()
|
|
}
|
|
|
|
txIDsToBroadcast := make([]*externalapi.DomainTransactionID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
|
|
for i, tx := range transactionsAcceptedToMempool {
|
|
txIDsToBroadcast[i] = consensushashing.TransactionID(tx)
|
|
}
|
|
offset := len(transactionsAcceptedToMempool)
|
|
for i, txID := range txIDsToRebroadcast {
|
|
txIDsToBroadcast[offset+i] = txID
|
|
}
|
|
return f.EnqueueTransactionIDsForPropagation(txIDsToBroadcast)
|
|
}
|
|
|
|
// SharedRequestedBlocks returns a *blockrelay.SharedRequestedBlocks for sharing
|
|
// data about requested blocks between different peers.
|
|
func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks {
|
|
return f.sharedRequestedBlocks
|
|
}
|
|
|
|
// AddBlock adds the given block to the DAG and propagates it.
|
|
func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
|
if len(block.Transactions) == 0 {
|
|
return protocolerrors.Errorf(false, "cannot add header only block")
|
|
}
|
|
|
|
blockInsertionResult, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
|
if err != nil {
|
|
if errors.As(err, &ruleerrors.RuleError{}) {
|
|
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
|
|
}
|
|
return err
|
|
}
|
|
err = f.OnNewBlock(block, blockInsertionResult)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return f.Broadcast(appmessage.NewMsgInvBlock(consensushashing.BlockHash(block)))
|
|
}
|
|
|
|
// IsIBDRunning returns true if IBD is currently marked as running
|
|
func (f *FlowContext) IsIBDRunning() bool {
|
|
f.ibdPeerMutex.RLock()
|
|
defer f.ibdPeerMutex.RUnlock()
|
|
|
|
return f.ibdPeer != nil
|
|
}
|
|
|
|
// TrySetIBDRunning attempts to set `isInIBD`. Returns false
|
|
// if it is already set
|
|
func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
|
f.ibdPeerMutex.Lock()
|
|
defer f.ibdPeerMutex.Unlock()
|
|
|
|
if f.ibdPeer != nil {
|
|
return false
|
|
}
|
|
f.ibdPeer = ibdPeer
|
|
log.Infof("IBD started")
|
|
|
|
return true
|
|
}
|
|
|
|
// UnsetIBDRunning unsets isInIBD
|
|
func (f *FlowContext) UnsetIBDRunning() {
|
|
f.ibdPeerMutex.Lock()
|
|
defer f.ibdPeerMutex.Unlock()
|
|
|
|
if f.ibdPeer == nil {
|
|
panic("attempted to unset isInIBD when it was not set to begin with")
|
|
}
|
|
|
|
f.ibdPeer = nil
|
|
}
|
|
|
|
// IBDPeer returns the current IBD peer or null if the node is not
|
|
// in IBD
|
|
func (f *FlowContext) IBDPeer() *peerpkg.Peer {
|
|
f.ibdPeerMutex.RLock()
|
|
defer f.ibdPeerMutex.RUnlock()
|
|
|
|
return f.ibdPeer
|
|
}
|