Compare commits

..

2 Commits

Author SHA1 Message Date
Ori Newman
ef1a3c0dce remove debug log 2022-03-02 13:09:34 +02:00
Ori Newman
1cedc720ac patch 2022-03-02 12:31:14 +02:00
366 changed files with 4230 additions and 14210 deletions

View File

@@ -19,11 +19,16 @@ jobs:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
# Increase the pagefile size on Windows to aviod running out of memory
- name: Increase pagefile size on Windows
if: runner.os == 'Windows'
run: powershell -command .github\workflows\SetPageFileSize.ps1
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.18
go-version: 1.16
- name: Build on Linux
if: runner.os == 'Linux'

View File

@@ -22,7 +22,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.18
go-version: 1.16
- name: Set scheduled branch name
shell: bash

View File

@@ -33,7 +33,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.18
go-version: 1.16
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
@@ -58,7 +58,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.18
go-version: 1.16
- name: Checkout
uses: actions/checkout@v2
@@ -86,7 +86,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.18
go-version: 1.16
- name: Delete the stability tests from coverage
run: rm -r stability-tests

View File

@@ -7,13 +7,15 @@ Kaspad
Kaspad is the reference full node Kaspa implementation written in Go (golang).
This project is currently under active development and is in Beta state.
## What is kaspa
Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations and sub-second block times. It is based on [the PHANTOM protocol](https://eprint.iacr.org/2018/104.pdf), a generalization of Nakamoto consensus.
## Requirements
Go 1.18 or later.
Go 1.16 or later.
## Installation

View File

@@ -69,10 +69,6 @@ const (
CmdReady
CmdTrustedData
CmdBlockWithTrustedDataV4
CmdRequestNextPruningPointAndItsAnticoneBlocks
CmdRequestIBDChainBlockLocator
CmdIBDChainBlockLocator
CmdRequestAnticone
// rpc
CmdGetCurrentNetworkRequestMessage
@@ -156,13 +152,6 @@ const (
CmdVirtualDaaScoreChangedNotificationMessage
CmdGetBalancesByAddressesRequestMessage
CmdGetBalancesByAddressesResponseMessage
CmdNotifyNewBlockTemplateRequestMessage
CmdNotifyNewBlockTemplateResponseMessage
CmdNewBlockTemplateNotificationMessage
CmdGetMempoolEntriesByAddressesRequestMessage
CmdGetMempoolEntriesByAddressesResponseMessage
CmdGetCoinSupplyRequestMessage
CmdGetCoinSupplyResponseMessage
)
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
@@ -206,10 +195,6 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
CmdReady: "Ready",
CmdTrustedData: "TrustedData",
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
CmdRequestNextPruningPointAndItsAnticoneBlocks: "RequestNextPruningPointAndItsAnticoneBlocks",
CmdRequestIBDChainBlockLocator: "RequestIBDChainBlockLocator",
CmdIBDChainBlockLocator: "IBDChainBlockLocator",
CmdRequestAnticone: "RequestAnticone",
}
// RPCMessageCommandToString maps all MessageCommands to their string representation
@@ -293,13 +278,6 @@ var RPCMessageCommandToString = map[MessageCommand]string{
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest",
CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse",
CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification",
CmdGetMempoolEntriesByAddressesRequestMessage: "GetMempoolEntriesByAddressesRequest",
CmdGetMempoolEntriesByAddressesResponseMessage: "GetMempoolEntriesByAddressesResponse",
CmdGetCoinSupplyRequestMessage: "GetCoinSupplyRequest",
CmdGetCoinSupplyResponseMessage: "GetCoinSupplyResponse",
}
// Message is an interface that describes a kaspa message. A type that

View File

@@ -1,27 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgIBDChainBlockLocator implements the Message interface and represents a kaspa
// locator message. It is used to find the blockLocator of a peer that is
// syncing with you.
type MsgIBDChainBlockLocator struct {
baseMessage
BlockLocatorHashes []*externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgIBDChainBlockLocator) Command() MessageCommand {
return CmdIBDChainBlockLocator
}
// NewMsgIBDChainBlockLocator returns a new kaspa locator message that conforms to
// the Message interface. See MsgBlockLocator for details.
func NewMsgIBDChainBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgIBDChainBlockLocator {
return &MsgIBDChainBlockLocator{
BlockLocatorHashes: locatorHashes,
}
}

View File

@@ -1,33 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestAnticone implements the Message interface and represents a kaspa
// RequestHeaders message. It is used to request the set past(ContextHash) \cap anticone(BlockHash)
type MsgRequestAnticone struct {
baseMessage
BlockHash *externalapi.DomainHash
ContextHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestAnticone) Command() MessageCommand {
return CmdRequestAnticone
}
// NewMsgRequestAnticone returns a new kaspa RequestPastDiff message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequestAnticone(blockHash, contextHash *externalapi.DomainHash) *MsgRequestAnticone {
return &MsgRequestAnticone{
BlockHash: blockHash,
ContextHash: contextHash,
}
}

View File

@@ -1,31 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestIBDChainBlockLocator implements the Message interface and represents a kaspa
// IBDRequestChainBlockLocator message. It is used to request a block locator between low
// and high hash.
// The locator is returned via a locator message (MsgIBDChainBlockLocator).
type MsgRequestIBDChainBlockLocator struct {
baseMessage
HighHash *externalapi.DomainHash
LowHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestIBDChainBlockLocator) Command() MessageCommand {
return CmdRequestIBDChainBlockLocator
}
// NewMsgIBDRequestChainBlockLocator returns a new IBDRequestChainBlockLocator message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgIBDRequestChainBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestIBDChainBlockLocator {
return &MsgRequestIBDChainBlockLocator{
HighHash: highHash,
LowHash: lowHash,
}
}

View File

@@ -1,22 +0,0 @@
package appmessage
// MsgRequestNextPruningPointAndItsAnticoneBlocks implements the Message interface and represents a kaspa
// RequestNextPruningPointAndItsAnticoneBlocks message. It is used to notify the IBD syncer peer to send
// more blocks from the pruning anticone.
//
// This message has no payload.
type MsgRequestNextPruningPointAndItsAnticoneBlocks struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestNextPruningPointAndItsAnticoneBlocks) Command() MessageCommand {
return CmdRequestNextPruningPointAndItsAnticoneBlocks
}
// NewMsgRequestNextPruningPointAndItsAnticoneBlocks returns a new kaspa RequestNextPruningPointAndItsAnticoneBlocks message that conforms to the
// Message interface.
func NewMsgRequestNextPruningPointAndItsAnticoneBlocks() *MsgRequestNextPruningPointAndItsAnticoneBlocks {
return &MsgRequestNextPruningPointAndItsAnticoneBlocks{}
}

View File

@@ -133,8 +133,8 @@ func TestTx(t *testing.T) {
// TestTxHash tests the ability to generate the hash of a transaction accurately.
func TestTxHashAndID(t *testing.T) {
txHash1Str := "b06f8b650115b5cf4d59499e10764a9312742930cb43c9b4ff6495d76f332ed7"
txID1Str := "e20225c3d065ee41743607ee627db44d01ef396dc9779b05b2caf55bac50e12d"
txHash1Str := "93663e597f6c968d32d229002f76408edf30d6a0151ff679fc729812d8cb2acc"
txID1Str := "24079c6d2bdf602fc389cc307349054937744a9c8dc0f07c023e6af0e949a4e7"
wantTxID1, err := transactionid.FromString(txID1Str)
if err != nil {
t.Fatalf("NewTxIDFromStr: %v", err)
@@ -185,7 +185,7 @@ func TestTxHashAndID(t *testing.T) {
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
}
hash2Str := "fa16a8ce88d52ca1ff45187bbba0d33044e9f5fe309e8d0b22d4812dcf1782b7"
hash2Str := "8dafd1bec24527d8e3b443ceb0a3b92fffc0d60026317f890b2faf5e9afc177a"
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
if err != nil {
t.Errorf("NewTxIDFromStr: %v", err)

View File

@@ -5,7 +5,6 @@ package appmessage
type GetBlockTemplateRequestMessage struct {
baseMessage
PayAddress string
ExtraData string
}
// Command returns the protocol command string for the message
@@ -14,10 +13,9 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
}
// NewGetBlockTemplateRequestMessage returns a instance of the message
func NewGetBlockTemplateRequestMessage(payAddress, extraData string) *GetBlockTemplateRequestMessage {
func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
return &GetBlockTemplateRequestMessage{
PayAddress: payAddress,
ExtraData: extraData,
}
}

View File

@@ -1,40 +0,0 @@
package appmessage
// GetCoinSupplyRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetCoinSupplyRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *GetCoinSupplyRequestMessage) Command() MessageCommand {
return CmdGetCoinSupplyRequestMessage
}
// NewGetCoinSupplyRequestMessage returns a instance of the message
func NewGetCoinSupplyRequestMessage() *GetCoinSupplyRequestMessage {
return &GetCoinSupplyRequestMessage{}
}
// GetCoinSupplyResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetCoinSupplyResponseMessage struct {
baseMessage
MaxSompi uint64
CirculatingSompi uint64
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetCoinSupplyResponseMessage) Command() MessageCommand {
return CmdGetCoinSupplyResponseMessage
}
// NewGetCoinSupplyResponseMessage returns a instance of the message
func NewGetCoinSupplyResponseMessage(maxSompi uint64, circulatingSompi uint64) *GetCoinSupplyResponseMessage {
return &GetCoinSupplyResponseMessage{
MaxSompi: maxSompi,
CirculatingSompi: circulatingSompi,
}
}

View File

@@ -23,8 +23,6 @@ type GetInfoResponseMessage struct {
P2PID string
MempoolSize uint64
ServerVersion string
IsUtxoIndexed bool
IsSynced bool
Error *RPCError
}
@@ -35,12 +33,10 @@ func (msg *GetInfoResponseMessage) Command() MessageCommand {
}
// NewGetInfoResponseMessage returns a instance of the message
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string, isUtxoIndexed bool, isSynced bool) *GetInfoResponseMessage {
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string) *GetInfoResponseMessage {
return &GetInfoResponseMessage{
P2PID: p2pID,
MempoolSize: mempoolSize,
ServerVersion: serverVersion,
IsUtxoIndexed: isUtxoIndexed,
IsSynced: isSynced,
}
}

View File

@@ -4,8 +4,6 @@ package appmessage
// its respective RPC message
type GetMempoolEntriesRequestMessage struct {
baseMessage
IncludeOrphanPool bool
FilterTransactionPool bool
}
// Command returns the protocol command string for the message
@@ -14,11 +12,8 @@ func (msg *GetMempoolEntriesRequestMessage) Command() MessageCommand {
}
// NewGetMempoolEntriesRequestMessage returns a instance of the message
func NewGetMempoolEntriesRequestMessage(includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesRequestMessage {
return &GetMempoolEntriesRequestMessage{
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
func NewGetMempoolEntriesRequestMessage() *GetMempoolEntriesRequestMessage {
return &GetMempoolEntriesRequestMessage{}
}
// GetMempoolEntriesResponseMessage is an appmessage corresponding to

View File

@@ -1,52 +0,0 @@
package appmessage
// MempoolEntryByAddress represents MempoolEntries associated with some address
type MempoolEntryByAddress struct {
Address string
Receiving []*MempoolEntry
Sending []*MempoolEntry
}
// GetMempoolEntriesByAddressesRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetMempoolEntriesByAddressesRequestMessage struct {
baseMessage
Addresses []string
IncludeOrphanPool bool
FilterTransactionPool bool
}
// Command returns the protocol command string for the message
func (msg *GetMempoolEntriesByAddressesRequestMessage) Command() MessageCommand {
return CmdGetMempoolEntriesByAddressesRequestMessage
}
// NewGetMempoolEntriesByAddressesRequestMessage returns a instance of the message
func NewGetMempoolEntriesByAddressesRequestMessage(addresses []string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesByAddressesRequestMessage {
return &GetMempoolEntriesByAddressesRequestMessage{
Addresses: addresses,
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
}
// GetMempoolEntriesByAddressesResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetMempoolEntriesByAddressesResponseMessage struct {
baseMessage
Entries []*MempoolEntryByAddress
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetMempoolEntriesByAddressesResponseMessage) Command() MessageCommand {
return CmdGetMempoolEntriesByAddressesResponseMessage
}
// NewGetMempoolEntriesByAddressesResponseMessage returns a instance of the message
func NewGetMempoolEntriesByAddressesResponseMessage(entries []*MempoolEntryByAddress) *GetMempoolEntriesByAddressesResponseMessage {
return &GetMempoolEntriesByAddressesResponseMessage{
Entries: entries,
}
}

View File

@@ -4,9 +4,7 @@ package appmessage
// its respective RPC message
type GetMempoolEntryRequestMessage struct {
baseMessage
TxID string
IncludeOrphanPool bool
FilterTransactionPool bool
TxID string
}
// Command returns the protocol command string for the message
@@ -15,12 +13,8 @@ func (msg *GetMempoolEntryRequestMessage) Command() MessageCommand {
}
// NewGetMempoolEntryRequestMessage returns a instance of the message
func NewGetMempoolEntryRequestMessage(txID string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntryRequestMessage {
return &GetMempoolEntryRequestMessage{
TxID: txID,
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
func NewGetMempoolEntryRequestMessage(txID string) *GetMempoolEntryRequestMessage {
return &GetMempoolEntryRequestMessage{TxID: txID}
}
// GetMempoolEntryResponseMessage is an appmessage corresponding to
@@ -36,7 +30,6 @@ type GetMempoolEntryResponseMessage struct {
type MempoolEntry struct {
Fee uint64
Transaction *RPCTransaction
IsOrphan bool
}
// Command returns the protocol command string for the message
@@ -45,12 +38,11 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
}
// NewGetMempoolEntryResponseMessage returns a instance of the message
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction, isOrphan bool) *GetMempoolEntryResponseMessage {
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
return &GetMempoolEntryResponseMessage{
Entry: &MempoolEntry{
Fee: fee,
Transaction: transaction,
IsOrphan: isOrphan,
},
}
}

View File

@@ -4,8 +4,7 @@ package appmessage
// its respective RPC message
type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
baseMessage
StartHash string
IncludeAcceptedTransactionIDs bool
StartHash string
}
// Command returns the protocol command string for the message
@@ -14,29 +13,18 @@ func (msg *GetVirtualSelectedParentChainFromBlockRequestMessage) Command() Messa
}
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(
startHash string, includeAcceptedTransactionIDs bool) *GetVirtualSelectedParentChainFromBlockRequestMessage {
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *GetVirtualSelectedParentChainFromBlockRequestMessage {
return &GetVirtualSelectedParentChainFromBlockRequestMessage{
StartHash: startHash,
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
StartHash: startHash,
}
}
// AcceptedTransactionIDs is a part of the GetVirtualSelectedParentChainFromBlockResponseMessage and
// VirtualSelectedParentChainChangedNotificationMessage appmessages
type AcceptedTransactionIDs struct {
AcceptingBlockHash string
AcceptedTransactionIDs []string
}
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlockHashes []string
AcceptedTransactionIDs []*AcceptedTransactionIDs
Error *RPCError
}
@@ -48,11 +36,10 @@ func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() Mess
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes,
addedChainBlockHashes []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *GetVirtualSelectedParentChainFromBlockResponseMessage {
addedChainBlockHashes []string) *GetVirtualSelectedParentChainFromBlockResponseMessage {
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlockHashes: addedChainBlockHashes,
AcceptedTransactionIDs: acceptedTransactionIDs,
}
}

View File

@@ -1,50 +0,0 @@
package appmessage
// NotifyNewBlockTemplateRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyNewBlockTemplateRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NotifyNewBlockTemplateRequestMessage) Command() MessageCommand {
return CmdNotifyNewBlockTemplateRequestMessage
}
// NewNotifyNewBlockTemplateRequestMessage returns an instance of the message
func NewNotifyNewBlockTemplateRequestMessage() *NotifyNewBlockTemplateRequestMessage {
return &NotifyNewBlockTemplateRequestMessage{}
}
// NotifyNewBlockTemplateResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyNewBlockTemplateResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyNewBlockTemplateResponseMessage) Command() MessageCommand {
return CmdNotifyNewBlockTemplateResponseMessage
}
// NewNotifyNewBlockTemplateResponseMessage returns an instance of the message
func NewNotifyNewBlockTemplateResponseMessage() *NotifyNewBlockTemplateResponseMessage {
return &NotifyNewBlockTemplateResponseMessage{}
}
// NewBlockTemplateNotificationMessage is an appmessage corresponding to
// its respective RPC message
type NewBlockTemplateNotificationMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NewBlockTemplateNotificationMessage) Command() MessageCommand {
return CmdNewBlockTemplateNotificationMessage
}
// NewNewBlockTemplateNotificationMessage returns an instance of the message
func NewNewBlockTemplateNotificationMessage() *NewBlockTemplateNotificationMessage {
return &NewBlockTemplateNotificationMessage{}
}

View File

@@ -4,7 +4,6 @@ package appmessage
// its respective RPC message
type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
baseMessage
IncludeAcceptedTransactionIDs bool
}
// Command returns the protocol command string for the message
@@ -12,13 +11,9 @@ func (msg *NotifyVirtualSelectedParentChainChangedRequestMessage) Command() Mess
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
}
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns an instance of the message
func NewNotifyVirtualSelectedParentChainChangedRequestMessage(
includeAcceptedTransactionIDs bool) *NotifyVirtualSelectedParentChainChangedRequestMessage {
return &NotifyVirtualSelectedParentChainChangedRequestMessage{
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
}
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns a instance of the message
func NewNotifyVirtualSelectedParentChainChangedRequestMessage() *NotifyVirtualSelectedParentChainChangedRequestMessage {
return &NotifyVirtualSelectedParentChainChangedRequestMessage{}
}
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
@@ -44,7 +39,6 @@ type VirtualSelectedParentChainChangedNotificationMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlockHashes []string
AcceptedTransactionIDs []*AcceptedTransactionIDs
}
// Command returns the protocol command string for the message
@@ -54,11 +48,10 @@ func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() Messa
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes,
addedChainBlocks []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *VirtualSelectedParentChainChangedNotificationMessage {
addedChainBlocks []string) *VirtualSelectedParentChainChangedNotificationMessage {
return &VirtualSelectedParentChainChangedNotificationMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlockHashes: addedChainBlocks,
AcceptedTransactionIDs: acceptedTransactionIDs,
}
}

View File

@@ -4,8 +4,6 @@ import (
"fmt"
"sync/atomic"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/app/protocol"
@@ -69,7 +67,6 @@ func (a *ComponentManager) Stop() {
}
a.protocolManager.Close()
close(a.protocolManager.Context().Domain().ConsensusEventsChannel())
return
}
@@ -121,7 +118,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
if err != nil {
return nil, err
}
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, domain.ConsensusEventsChannel(), interrupt)
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, interrupt)
return &ComponentManager{
cfg: cfg,
@@ -142,7 +139,6 @@ func setupRPC(
connectionManager *connmanager.ConnectionManager,
addressManager *addressmanager.AddressManager,
utxoIndex *utxoindex.UTXOIndex,
consensusEventsChan chan externalapi.ConsensusEvent,
shutDownChan chan<- struct{},
) *rpc.Manager {
@@ -154,10 +150,10 @@ func setupRPC(
connectionManager,
addressManager,
utxoIndex,
consensusEventsChan,
shutDownChan,
)
protocolManager.SetOnNewBlockTemplateHandler(rpcManager.NotifyNewBlockTemplate)
protocolManager.SetOnVirtualChange(rpcManager.NotifyVirtualChange)
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
return rpcManager

View File

@@ -16,42 +16,53 @@ import (
// OnNewBlock updates the mempool after a new block arrival, and
// relays newly unorphaned transactions and possibly rebroadcast
// manually added transactions when not in IBD.
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock) error {
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
virtualChangeSet *externalapi.VirtualChangeSet) error {
hash := consensushashing.BlockHash(block)
log.Tracef("OnNewBlock start for block %s", hash)
defer log.Tracef("OnNewBlock end for block %s", hash)
log.Debugf("OnNewBlock start for block %s", hash)
defer log.Debugf("OnNewBlock end for block %s", hash)
unorphanedBlocks, err := f.UnorphanBlocks(block)
unorphaningResults, err := f.UnorphanBlocks(block)
if err != nil {
return err
}
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphanedBlocks))
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
newBlocks := []*externalapi.DomainBlock{block}
newBlocks = append(newBlocks, unorphanedBlocks...)
newVirtualChangeSets := []*externalapi.VirtualChangeSet{virtualChangeSet}
for _, unorphaningResult := range unorphaningResults {
newBlocks = append(newBlocks, unorphaningResult.block)
newVirtualChangeSets = append(newVirtualChangeSets, unorphaningResult.virtualChangeSet)
}
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
for _, newBlock := range newBlocks {
for i, newBlock := range newBlocks {
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
if err != nil {
return err
}
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
if f.onBlockAddedToDAGHandler != nil {
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
virtualChangeSet = newVirtualChangeSets[i]
err := f.onBlockAddedToDAGHandler(newBlock, virtualChangeSet)
if err != nil {
return err
}
}
}
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
}
// OnNewBlockTemplate calls the handler function whenever a new block template is available for miners.
func (f *FlowContext) OnNewBlockTemplate() error {
// Clear current template cache. Note we call this even if the handler is nil, in order to keep the
// state consistent without dependency on external event registration
f.Domain().MiningManager().ClearBlockTemplate()
if f.onNewBlockTemplateHandler != nil {
return f.onNewBlockTemplateHandler()
// OnVirtualChange calls the handler function whenever the virtual block changes.
func (f *FlowContext) OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
if f.onVirtualChangeHandler != nil && virtualChangeSet != nil {
return f.onVirtualChangeHandler(virtualChangeSet)
}
return nil
@@ -107,18 +118,14 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
return protocolerrors.Errorf(false, "cannot add header only block")
}
err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
virtualChangeSet, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
}
return err
}
err = f.OnNewBlockTemplate()
if err != nil {
return err
}
err = f.OnNewBlock(block)
err = f.OnNewBlock(block, virtualChangeSet)
if err != nil {
return err
}
@@ -143,7 +150,7 @@ func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
return false
}
f.ibdPeer = ibdPeer
log.Infof("IBD started with peer %s", ibdPeer)
log.Infof("IBD started")
return true
}

View File

@@ -2,7 +2,6 @@ package flowcontext
import (
"errors"
"strings"
"sync/atomic"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
@@ -10,11 +9,6 @@ import (
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
)
var (
// ErrPingTimeout signifies that a ping operation timed out.
ErrPingTimeout = protocolerrors.New(false, "timeout expired on ping")
)
// HandleError handles an error from a flow,
// It sends the error to errChan if isStopping == 0 and increments isStopping
//
@@ -27,15 +21,8 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
panic(err)
}
if errors.Is(err, ErrPingTimeout) {
// Avoid printing the call stack on ping timeouts, since users get panicked and this case is not interesting
log.Errorf("error from %s: %s", flowName, err)
} else {
// Explain to the user that this is not a panic, but only a protocol error with a specific peer
logFrame := strings.Repeat("=", 52)
log.Errorf("Non-critical peer protocol error from %s, printing the full stack for debug purposes: \n%s\n%+v \n%s",
flowName, logFrame, err, logFrame)
}
log.Errorf("error from %s: %s", flowName, err)
}
if atomic.AddUint32(isStopping, 1) == 1 {

View File

@@ -18,8 +18,12 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
)
// OnNewBlockTemplateHandler is a handler function that's triggered when a new block template is available
type OnNewBlockTemplateHandler func() error
// OnBlockAddedToDAGHandler is a handler function that's triggered
// when a block is added to the DAG
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
// OnVirtualChangeHandler is a handler function that's triggered when the virtual changes
type OnVirtualChangeHandler func(virtualChangeSet *externalapi.VirtualChangeSet) error
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
// resets due to pruning point change via IBD.
@@ -40,7 +44,8 @@ type FlowContext struct {
timeStarted int64
onNewBlockTemplateHandler OnNewBlockTemplateHandler
onVirtualChangeHandler OnVirtualChangeHandler
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
@@ -97,14 +102,14 @@ func (f *FlowContext) ShutdownChan() <-chan struct{} {
return f.shutdownChan
}
// IsNearlySynced returns whether current consensus is considered synced or close to being synced.
func (f *FlowContext) IsNearlySynced() (bool, error) {
return f.Domain().Consensus().IsNearlySynced()
// SetOnVirtualChangeHandler sets the onVirtualChangeHandler handler
func (f *FlowContext) SetOnVirtualChangeHandler(onVirtualChangeHandler OnVirtualChangeHandler) {
f.onVirtualChangeHandler = onVirtualChangeHandler
}
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplateHandler handler
func (f *FlowContext) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler OnNewBlockTemplateHandler) {
f.onNewBlockTemplateHandler = onNewBlockTemplateHandler
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
}
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler

View File

@@ -72,10 +72,3 @@ func (f *FlowContext) Peers() []*peerpkg.Peer {
}
return peers
}
// HasPeers returns whether there are currently active peers
func (f *FlowContext) HasPeers() bool {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
return len(f.peers) > 0
}

View File

@@ -15,6 +15,12 @@ import (
// on: 2^orphanResolutionRange * PHANTOM K.
const maxOrphans = 600
// UnorphaningResult is the result of unorphaning a block
type UnorphaningResult struct {
block *externalapi.DomainBlock
virtualChangeSet *externalapi.VirtualChangeSet
}
// AddOrphan adds the block to the orphan set
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
f.orphansMutex.Lock()
@@ -51,7 +57,7 @@ func (f *FlowContext) IsOrphan(blockHash *externalapi.DomainHash) bool {
}
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*externalapi.DomainBlock, error) {
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*UnorphaningResult, error) {
f.orphansMutex.Lock()
defer f.orphansMutex.Unlock()
@@ -60,7 +66,7 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*ext
rootBlockHash := consensushashing.BlockHash(rootBlock)
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
var unorphanedBlocks []*externalapi.DomainBlock
var unorphaningResults []*UnorphaningResult
for len(processQueue) > 0 {
var orphanHash externalapi.DomainHash
orphanHash, processQueue = processQueue[0], processQueue[1:]
@@ -84,18 +90,21 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*ext
}
}
if canBeUnorphaned {
unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
virtualChangeSet, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
if err != nil {
return nil, err
}
if unorphaningSucceeded {
unorphanedBlocks = append(unorphanedBlocks, orphanBlock)
unorphaningResults = append(unorphaningResults, &UnorphaningResult{
block: orphanBlock,
virtualChangeSet: virtualChangeSet,
})
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
}
}
}
return unorphanedBlocks, nil
return unorphaningResults, nil
}
// addChildOrphansToProcessQueue finds all child orphans of `blockHash`
@@ -134,24 +143,24 @@ func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash)
return childOrphans
}
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (bool, error) {
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.VirtualChangeSet, bool, error) {
orphanBlock, ok := f.orphans[orphanHash]
if !ok {
return false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
}
delete(f.orphans, orphanHash)
err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
virtualChangeSet, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
return false, nil
return nil, false, nil
}
return false, err
return nil, false, err
}
log.Infof("Unorphaned block %s", orphanHash)
return true, nil
return virtualChangeSet, true, nil
}
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan

View File

@@ -0,0 +1,47 @@
package flowcontext
import "github.com/kaspanet/kaspad/util/mstime"
const (
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
)
// ShouldMine returns whether it's ok to use block template from this node
// for mining purposes.
func (f *FlowContext) ShouldMine() (bool, error) {
peers := f.Peers()
if len(peers) == 0 {
log.Debugf("The node is not connected, so ShouldMine returns false")
return false, nil
}
if f.IsIBDRunning() {
log.Debugf("IBD is running, so ShouldMine returns false")
return false, nil
}
virtualSelectedParent, err := f.domain.Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
if virtualSelectedParent.Equal(f.Config().NetParams().GenesisHash) {
return false, nil
}
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
if err != nil {
return false, err
}
now := mstime.Now().UnixMilliseconds()
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
virtualSelectedParentHeader.TimeInMilliseconds())
return true, nil
}
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
virtualSelectedParentHeader.TimeInMilliseconds())
return false, nil
}

View File

@@ -18,9 +18,9 @@ var (
// minAcceptableProtocolVersion is the lowest protocol version that a
// connected peer may support.
minAcceptableProtocolVersion = uint32(5)
minAcceptableProtocolVersion = uint32(4)
maxAcceptableProtocolVersion = uint32(5)
maxAcceptableProtocolVersion = uint32(4)
)
type receiveVersionFlow struct {

View File

@@ -21,7 +21,7 @@ func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*ex
switch message := message.(type) {
case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlockLocator:
return message.BlockLocatorHashes, nil
default:

View File

@@ -5,6 +5,7 @@ import (
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
@@ -33,7 +34,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
if err != nil {
return err
}
if !blockInfo.HasHeader() {
if !blockInfo.Exists {
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
"with an unknown targetHash %s", targetHash)
}
@@ -46,7 +47,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
}
// The IBD block locator is checking only existing blocks with bodies.
if !blockInfo.HasBody() {
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
continue
}

View File

@@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
@@ -27,15 +28,18 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
for i, hash := range msgRequestIBDBlocks.Hashes {
// Fetch the block from the database.
block, found, err := context.Domain().Consensus().GetBlock(hash)
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
}
if !found {
return protocolerrors.Errorf(false, "IBD block %s not found", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed
blockMessage := appmessage.DomainBlockToMsgBlock(block)

View File

@@ -118,33 +118,16 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
return err
}
for i, blockHash := range pointAndItsAnticone {
block, found, err := context.Domain().Consensus().GetBlock(blockHash)
for _, blockHash := range pointAndItsAnticone {
block, err := context.Domain().Consensus().GetBlock(blockHash)
if err != nil {
return err
}
if !found {
return protocolerrors.Errorf(false, "pruning point anticone block %s not found", blockHash)
}
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
if err != nil {
return err
}
if (i+1)%ibdBatchSize == 0 {
// No timeout here, as we don't care if the syncee takes its time computing,
// since it only blocks this dedicated flow
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
if _, ok := message.(*appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks); !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks, message.Command())
}
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())

View File

@@ -5,6 +5,7 @@ import (
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
@@ -28,15 +29,18 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
for _, hash := range getRelayBlocksMessage.Hashes {
// Fetch the block from the database.
block, found, err := context.Domain().Consensus().GetBlock(hash)
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
}
if !found {
return protocolerrors.Errorf(false, "Relay block %s not found", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))

View File

@@ -7,11 +7,9 @@ import (
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
@@ -26,8 +24,8 @@ var orphanResolutionRange uint32 = 5
type RelayInvsContext interface {
Domain() domain.Domain
Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock) error
OnNewBlockTemplate() error
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
OnPruningPointUTXOSetOverride() error
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
Broadcast(message appmessage.Message) error
@@ -36,19 +34,13 @@ type RelayInvsContext interface {
IsOrphan(blockHash *externalapi.DomainHash) bool
IsIBDRunning() bool
IsRecoverableError(err error) bool
IsNearlySynced() (bool, error)
}
type invRelayBlock struct {
Hash *externalapi.DomainHash
IsOrphanRoot bool
}
type handleRelayInvsFlow struct {
RelayInvsContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
invsQueue []invRelayBlock
invsQueue []*appmessage.MsgInvRelayBlock
}
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
@@ -61,7 +53,7 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
invsQueue: make([]invRelayBlock, 0),
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
}
err := flow.start()
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
@@ -112,16 +104,10 @@ func (flow *handleRelayInvsFlow) start() error {
continue
}
// Block relay is disabled if the node is already during IBD AND considered out of sync
// Block relay is disabled during IBD
if flow.IsIBDRunning() {
isNearlySynced, err := flow.IsNearlySynced()
if err != nil {
return err
}
if !isNearlySynced {
log.Debugf("Got block %s while in IBD and the node is out of sync. Continuing...", inv.Hash)
continue
}
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
continue
}
log.Debugf("Requesting block %s", inv.Hash)
@@ -144,36 +130,8 @@ func (flow *handleRelayInvsFlow) start() error {
continue
}
// Note we do not apply the heuristic below if inv was queued as an orphan root, since
// that means the process started by a proper and relevant relay block
if !inv.IsOrphanRoot {
// Check bounded merge depth to avoid requesting irrelevant data which cannot be merged under virtual
virtualMergeDepthRoot, err := flow.Domain().Consensus().VirtualMergeDepthRoot()
if err != nil {
return err
}
if !virtualMergeDepthRoot.Equal(model.VirtualGenesisBlockHash) {
mergeDepthRootHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualMergeDepthRoot)
if err != nil {
return err
}
// Since `BlueWork` respects topology, this condition means that the relay
// block is not in the future of virtual's merge depth root, and thus cannot be merged unless
// other valid blocks Kosherize it, in which case it will be obtained once the merger is relayed
if block.Header.BlueWork().Cmp(mergeDepthRootHeader.BlueWork()) <= 0 {
log.Debugf("Block %s has lower blue work than virtual's merge root %s (%d <= %d), hence we are skipping it",
inv.Hash, virtualMergeDepthRoot, block.Header.BlueWork(), mergeDepthRootHeader.BlueWork())
continue
}
}
}
log.Debugf("Processing block %s", inv.Hash)
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
if err != nil {
return err
}
missingParents, err := flow.processBlock(block)
missingParents, virtualChangeSet, err := flow.processBlock(block)
if err != nil {
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
log.Infof("Ignoring pruned block %s", inv.Hash)
@@ -195,48 +153,13 @@ func (flow *handleRelayInvsFlow) start() error {
continue
}
oldVirtualParents := hashset.New()
for _, parent := range oldVirtualInfo.ParentHashes {
oldVirtualParents.Add(parent)
}
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
log.Debugf("Relaying block %s", inv.Hash)
err = flow.relayBlock(block)
if err != nil {
return err
}
virtualHasNewParents := false
for _, parent := range newVirtualInfo.ParentHashes {
if oldVirtualParents.Contains(parent) {
continue
}
virtualHasNewParents = true
block, found, err := flow.Domain().Consensus().GetBlock(parent)
if err != nil {
return err
}
if !found {
return protocolerrors.Errorf(false, "Virtual parent %s not found", parent)
}
blockHash := consensushashing.BlockHash(block)
log.Debugf("Relaying block %s", blockHash)
err = flow.relayBlock(block)
if err != nil {
return err
}
}
if virtualHasNewParents {
log.Debugf("Virtual %d has new parents, raising new block template event", newVirtualInfo.DAAScore)
err = flow.OnNewBlockTemplate()
if err != nil {
return err
}
}
log.Infof("Accepted block %s via relay", inv.Hash)
err = flow.OnNewBlock(block)
err = flow.OnNewBlock(block, virtualChangeSet)
if err != nil {
return err
}
@@ -252,24 +175,24 @@ func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.Domai
return nil
}
func (flow *handleRelayInvsFlow) readInv() (invRelayBlock, error) {
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
if len(flow.invsQueue) > 0 {
var inv invRelayBlock
var inv *appmessage.MsgInvRelayBlock
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
return inv, nil
}
msg, err := flow.incomingRoute.Dequeue()
if err != nil {
return invRelayBlock{}, err
return nil, err
}
msgInv, ok := msg.(*appmessage.MsgInvRelayBlock)
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
if !ok {
return invRelayBlock{}, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
"expecting an inv message", msg.Command())
}
return invRelayBlock{Hash: msgInv.Hash, IsOrphanRoot: false}, nil
return inv, nil
}
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
@@ -314,7 +237,7 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
switch message := message.(type) {
case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlock:
return message, nil
default:
@@ -323,25 +246,22 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
}
}
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, error) {
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
blockHash := consensushashing.BlockHash(block)
err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return nil, errors.Wrapf(err, "failed to process block %s", blockHash)
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
}
missingParentsError := &ruleerrors.ErrMissingParents{}
if errors.As(err, missingParentsError) {
return missingParentsError.MissingParentHashes, nil
return missingParentsError.MissingParentHashes, nil, nil
}
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
}
return nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
}
return nil, nil
return nil, virtualChangeSet, nil
}
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
@@ -449,16 +369,12 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
"probably happened because it was randomly evicted immediately after it was added.", orphan)
}
if len(orphanRoots) == 0 {
// In some rare cases we get here when there are no orphan roots already
return nil
}
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
invMessages := make([]invRelayBlock, len(orphanRoots))
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
for i, root := range orphanRoots {
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
invMessages[i] = invRelayBlock{Hash: root, IsOrphanRoot: true}
invMessages[i] = appmessage.NewMsgInvBlock(root)
}
flow.invsQueue = append(invMessages, flow.invsQueue...)

View File

@@ -10,9 +10,7 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
const ibdBatchSize = 99
const ibdBatchSize = router.DefaultMaxMessages
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
type RequestHeadersContext interface {
@@ -44,34 +42,7 @@ func (flow *handleRequestHeadersFlow) start() error {
if err != nil {
return err
}
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
consensus := flow.Domain().Consensus()
lowHashInfo, err := consensus.GetBlockInfo(lowHash)
if err != nil {
return err
}
if !lowHashInfo.HasHeader() {
return protocolerrors.Errorf(true, "Block %s does not exist", lowHash)
}
highHashInfo, err := consensus.GetBlockInfo(highHash)
if err != nil {
return err
}
if !highHashInfo.HasHeader() {
return protocolerrors.Errorf(true, "Block %s does not exist", highHash)
}
isLowSelectedAncestorOfHigh, err := consensus.IsInSelectedParentChainOf(lowHash, highHash)
if err != nil {
return err
}
if !isLowSelectedAncestorOfHigh {
return protocolerrors.Errorf(true, "Expected %s to be on the selected chain of %s",
lowHash, highHash)
}
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
for !lowHash.Equal(highHash) {
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
@@ -80,7 +51,7 @@ func (flow *handleRequestHeadersFlow) start() error {
// in order to avoid locking the consensus for too long
// maxBlocks MUST be >= MergeSetSizeLimit + 1
const maxBlocks = 1 << 10
blockHashes, _, err := consensus.GetHashesBetween(lowHash, highHash, maxBlocks)
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
if err != nil {
return err
}
@@ -88,7 +59,7 @@ func (flow *handleRequestHeadersFlow) start() error {
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
for i, blockHash := range blockHashes {
blockHeader, err := consensus.GetBlockHeader(blockHash)
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
if err != nil {
return err
}

View File

@@ -1,12 +1,12 @@
package blockrelay
import (
"fmt"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
@@ -21,8 +21,8 @@ import (
type IBDContext interface {
Domain() domain.Domain
Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock) error
OnNewBlockTemplate() error
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
OnPruningPointUTXOSetOverride() error
IsIBDRunning() bool
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
@@ -71,25 +71,43 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
}
isFinishedSuccessfully := false
var err error
defer func() {
flow.UnsetIBDRunning()
flow.logIBDFinished(isFinishedSuccessfully, err)
flow.logIBDFinished(isFinishedSuccessfully)
}()
relayBlockHash := consensushashing.BlockHash(block)
log.Infof("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
log.Infof("Syncing blocks up to %s", relayBlockHash)
log.Infof("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
highHash := consensushashing.BlockHash(block)
log.Criticalf("IBD started with peer %s and highHash %s", flow.peer, highHash)
log.Criticalf("Syncing blocks up to %s", highHash)
log.Criticalf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
if err != nil {
return err
}
log.Criticalf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
checkpoint, err := externalapi.NewDomainHashFromString("05ff0f2e1d201dcaee7c5e567cc2c1d42ca3cce9fefbd3b519dc68b5bb89d0b9")
if err != nil {
return err
}
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(
block, highestKnownSyncerChainHash)
info, err := flow.Domain().Consensus().GetBlockInfo(checkpoint)
if err != nil {
return err
}
if info.Exists {
isInSelectedParentChainOf, err := flow.Domain().Consensus().IsInSelectedParentChainOf(checkpoint, highestSharedBlockHash)
if err != nil {
return err
}
if !isInSelectedParentChainOf {
log.Criticalf("Stopped IBD because the checkpoint %s is not in the selected chain of %s", checkpoint, highestSharedBlockHash)
return nil
}
}
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
if err != nil {
return err
}
@@ -100,7 +118,7 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
if shouldDownloadHeadersProof {
log.Infof("Starting IBD with headers proof")
err = flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
err := flow.ibdWithHeadersProof(highHash, block.Header.DAAScore())
if err != nil {
return err
}
@@ -113,166 +131,27 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
if isGenesisVirtualSelectedParent {
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", relayBlockHash)
"to the recent pruning point before normal operation can resume.", highHash)
return nil
}
}
err = flow.syncPruningPointFutureHeaders(
flow.Domain().Consensus(),
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash, block.Header.DAAScore())
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash, block.Header.DAAScore())
if err != nil {
return err
}
}
// We start by syncing missing bodies over the syncer selected chain
err = flow.syncMissingBlockBodies(syncerHeaderSelectedTipHash)
err = flow.syncMissingBlockBodies(highHash)
if err != nil {
return err
}
relayBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(relayBlockHash)
if err != nil {
return err
}
// Relay block might be in the anticone of syncer selected tip, thus
// check his chain for missing bodies as well.
// Note: this operation can be slightly optimized to avoid the full chain search since relay block
// is in syncer virtual mergeset which has bounded size.
if relayBlockInfo.BlockStatus == externalapi.StatusHeaderOnly {
err = flow.syncMissingBlockBodies(relayBlockHash)
if err != nil {
return err
}
}
log.Debugf("Finished syncing blocks up to %s", relayBlockHash)
log.Debugf("Finished syncing blocks up to %s", highHash)
isFinishedSuccessfully = true
return nil
}
func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) {
/*
Algorithm:
Request full selected chain block locator from syncer
Find the highest block which we know
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
*/
// Empty hashes indicate that the full chain is queried
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout)
if err != nil {
return nil, nil, err
}
if len(locatorHashes) == 0 {
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
"to contain at least one element")
}
log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer,
len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
syncerHeaderSelectedTipHash := locatorHashes[0]
var highestKnownSyncerChainHash *externalapi.DomainHash
chainNegotiationRestartCounter := 0
chainNegotiationZoomCounts := 0
initialLocatorLen := len(locatorHashes)
for {
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
for _, syncerChainHash := range locatorHashes {
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
if err != nil {
return nil, nil, err
}
if info.Exists {
if info.BlockStatus == externalapi.StatusInvalid {
return nil, nil, protocolerrors.Errorf(true, "Sent invalid chain block %s", syncerChainHash)
}
currentHighestKnownSyncerChainHash = syncerChainHash
break
}
lowestUnknownSyncerChainHash = syncerChainHash
}
// No unknown blocks, break. Note this can only happen in the first iteration
if lowestUnknownSyncerChainHash == nil {
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
break
}
// No shared block, break
if currentHighestKnownSyncerChainHash == nil {
highestKnownSyncerChainHash = nil
break
}
// No point in zooming further
if len(locatorHashes) == 1 {
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
break
}
// Zoom in
locatorHashes, err = flow.getSyncerChainBlockLocator(
lowestUnknownSyncerChainHash,
currentHighestKnownSyncerChainHash, time.Second*10)
if err != nil {
return nil, nil, err
}
if len(locatorHashes) > 0 {
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
!locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) {
return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+
"hashes to match the locator bounds")
}
chainNegotiationZoomCounts++
log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer,
chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
if len(locatorHashes) == 2 {
// We found our search target
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
break
}
if chainNegotiationZoomCounts > initialLocatorLen*2 {
// Since the zoom-in always queries two consecutive entries in the previous locator, it is
// expected to decrease in size at least every two iterations
return nil, nil, protocolerrors.Errorf(true,
"IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d",
chainNegotiationZoomCounts, initialLocatorLen)
}
} else { // Empty locator signals a restart due to chain changes
chainNegotiationZoomCounts = 0
chainNegotiationRestartCounter++
if chainNegotiationRestartCounter > 32 {
return nil, nil, protocolerrors.Errorf(false,
"IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
}
log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter)
// An empty locator signals that the syncer chain was modified and no longer contains one of
// the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10)
if err != nil {
return nil, nil, err
}
if len(locatorHashes) == 0 {
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
"to contain at least one element")
}
log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer,
chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
initialLocatorLen = len(locatorHashes)
// Reset syncer's header selected tip
syncerHeaderSelectedTipHash = locatorHashes[0]
}
}
log.Infof("Found highest known syncer chain block %s from peer %s",
highestKnownSyncerChainHash, flow.peer)
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
}
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err != nil {
@@ -282,62 +161,146 @@ func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
}
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool, err error) {
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
successString := "successfully"
if !isFinishedSuccessfully {
successString = fmt.Sprintf("(interrupted: %s)", err)
successString = "(interrupted)"
}
log.Infof("IBD with peer %s finished %s", flow.peer, successString)
log.Infof("IBD finished %s", successString)
}
func (flow *handleIBDFlow) getSyncerChainBlockLocator(
highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) {
// findHighestSharedBlock attempts to find the highest shared block between the peer
// and this node. This method may fail because the peer and us have conflicting pruning
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
func (flow *handleIBDFlow) findHighestSharedBlockHash(
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash)
err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage)
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
if err != nil {
return nil, err
return nil, false, err
}
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
for {
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
if err != nil {
return nil, false, err
}
if !highestHashFound {
return nil, false, nil
}
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
if err != nil {
return nil, false, err
}
if highestHashIndex == 0 ||
// If the block locator contains only two adjacent chain blocks, the
// syncer will always find the same highest chain block, so to avoid
// an endless loop, we explicitly stop the loop in such situation.
(len(blockLocator) == 2 && highestHashIndex == 1) {
return highestHash, true, nil
}
locatorHashAboveHighestHash := highestHash
if highestHashIndex > 0 {
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
}
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
if err != nil {
return nil, false, err
}
}
}
func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
if err != nil {
return nil, err
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
return nil, err
}
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
"restarting with full block locator")
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
if err != nil {
return nil, err
}
}
return blockLocator, nil
}
func (flow *handleIBDFlow) findHighestHashIndex(
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
highestHashIndex := 0
highestHashIndexFound := false
for i, blockLocatorHash := range blockLocator {
if highestHash.Equal(blockLocatorHash) {
highestHashIndex = i
highestHashIndexFound = true
break
}
}
if !highestHashIndexFound {
return 0, protocolerrors.Errorf(true, "highest hash %s "+
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
}
log.Debugf("The index of the highest hash in the original "+
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
return highestHashIndex, nil
}
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
// blockLocator. This method may fail because the peer and us have conflicting pruning
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
func (flow *handleIBDFlow) fetchHighestHash(
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
if err != nil {
return nil, false, err
}
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch message := message.(type) {
case *appmessage.MsgIBDChainBlockLocator:
if len(message.BlockLocatorHashes) > 64 {
return nil, protocolerrors.Errorf(true,
"Got block locator of size %d>64 while expecting locator to have size "+
"which is logarithmic in DAG size (which should never exceed 2^64)",
len(message.BlockLocatorHashes))
}
return message.BlockLocatorHashes, nil
case *appmessage.MsgIBDBlockLocatorHighestHash:
highestHash := message.HighestHash
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
return highestHash, true, nil
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
log.Debugf("Peer %s does not know any block within our blockLocator. "+
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
return nil, false, nil
default:
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDChainBlockLocator, message.Command())
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
}
}
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus,
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash *externalapi.DomainHash,
highBlockDAAScoreHint uint64) error {
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
log.Infof("Downloading headers from %s", flow.peer)
if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) {
// No need to get syncer selected tip headers, so sync relay past and return
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
}
err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
if err != nil {
return err
}
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestKnownSyncerChainHash)
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestSharedBlockHash)
if err != nil {
return err
}
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScoreHint, "block headers")
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScore, "block headers")
// Keep a short queue of BlockHeadersMessages so that there's
// never a moment when the node is not validating and inserting
@@ -355,11 +318,6 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
close(blockHeadersMessageChan)
return
}
if len(blockHeadersMessage.BlockHeaders) == 0 {
// The syncer should have sent a done message if the search completed, and not an empty list
errChan <- protocolerrors.Errorf(true, "Received an empty headers message from peer %s", flow.peer)
return
}
blockHeadersMessageChan <- blockHeadersMessage
@@ -371,14 +329,24 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
}
})
count := 0
for {
select {
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
if !ok {
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
// If the highHash has not been received, the peer is misbehaving
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
if err != nil {
return err
}
if !highHashBlockInfo.Exists {
return protocolerrors.Errorf(true, "did not receive "+
"highHash block %s from peer %s during block download", highHash, flow.peer)
}
return nil
}
for _, header := range ibdBlocksMessage.BlockHeaders {
err = flow.processHeader(consensus, header)
_, err := flow.processHeader(consensus, header)
if err != nil {
return err
}
@@ -392,70 +360,11 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
}
}
func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error {
// Finished downloading syncer selected tip blocks,
// check if we already have the triggering relayBlockHash
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
if err != nil {
return err
}
if !relayBlockInfo.Exists {
// Send a special header request for the selected tip anticone. This is expected to
// be a small set, as it is bounded to the size of virtual's mergeset.
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
if err != nil {
return err
}
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
if err != nil {
return err
}
if anticoneDone {
return protocolerrors.Errorf(true,
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
relayBlockHash, syncerHeaderSelectedTipHash)
}
_, anticoneDone, err = flow.receiveHeaders()
if err != nil {
return err
}
if !anticoneDone {
return protocolerrors.Errorf(true,
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
relayBlockHash, syncerHeaderSelectedTipHash)
}
for _, header := range anticoneHeadersMessage.BlockHeaders {
err = flow.processHeader(consensus, header)
if err != nil {
return err
}
}
}
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
peerSelectedTipHash *externalapi.DomainHash) error {
// If the relayBlockHash has still not been received, the peer is misbehaving
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
if err != nil {
return err
}
if !relayBlockInfo.Exists {
return protocolerrors.Errorf(true, "did not receive "+
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
}
return nil
}
func (flow *handleIBDFlow) sendRequestAnticone(
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error {
msgRequestAnticone := appmessage.NewMsgRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
return flow.outgoingRoute.Enqueue(msgRequestAnticone)
}
func (flow *handleIBDFlow) sendRequestHeaders(
highestKnownSyncerChainHash, syncerHeaderSelectedTipHash *externalapi.DomainHash) error {
msgRequestHeaders := appmessage.NewMsgRequstHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
return flow.outgoingRoute.Enqueue(msgRequestHeaders)
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
}
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
@@ -478,7 +387,7 @@ func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeader
}
}
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) (bool, error) {
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
block := &externalapi.DomainBlock{
Header: header,
@@ -488,27 +397,26 @@ func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlo
blockHash := consensushashing.BlockHash(block)
blockInfo, err := consensus.GetBlockInfo(blockHash)
if err != nil {
return err
return false, err
}
if blockInfo.Exists {
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
return nil
return false, nil
}
err = consensus.ValidateAndInsertBlock(block, false)
_, err = consensus.ValidateAndInsertBlock(block, false)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
return false, errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
}
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
} else {
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
return false, protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
}
}
return nil
return true, nil
}
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
@@ -572,7 +480,7 @@ func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
receivedChunkCount++
if receivedChunkCount%ibdBatchSize == 0 {
log.Infof("Received %d UTXO set chunks so far, totaling in %d UTXOs",
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
receivedChunkCount, receivedUTXOCount)
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
@@ -624,12 +532,6 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
highestProcessedDAAScore := lowBlockHeader.DAAScore()
// If the IBD is small, we want to update the virtual after each block in order to avoid complications and possible bugs.
updateVirtual, err := flow.Domain().Consensus().IsNearlySynced()
if err != nil {
return err
}
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
var hashesToRequest []*externalapi.DomainHash
if offset+ibdBatchSize < len(hashes) {
@@ -666,7 +568,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
return err
}
err = flow.Domain().Consensus().ValidateAndInsertBlock(block, updateVirtual)
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
if err != nil {
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
@@ -674,7 +576,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
}
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
}
err = flow.OnNewBlock(block)
err = flow.OnNewBlock(block, virtualChangeSet)
if err != nil {
return err
}
@@ -685,15 +587,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
}
// We need to resolve virtual only if it wasn't updated while syncing block bodies
if !updateVirtual {
err := flow.resolveVirtual(highestProcessedDAAScore)
if err != nil {
return err
}
}
return flow.OnNewBlockTemplate()
return flow.resolveVirtual(highestProcessedDAAScore)
}
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
@@ -706,24 +600,38 @@ func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock
}
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
err := flow.Domain().Consensus().ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) {
var percents int
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
percents = 100
} else {
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
}
if percents < 0 {
percents = 0
} else if percents > 100 {
percents = 100
}
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
})
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
log.Infof("Resolved virtual")
return nil
for i := 0; ; i++ {
if i%10 == 0 {
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
var percents int
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
percents = 100
} else {
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
}
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
}
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
if err != nil {
return err
}
err = flow.OnVirtualChange(virtualChangeSet)
if err != nil {
return err
}
if isCompletelyResolved {
log.Infof("Resolved virtual")
return nil
}
}
}

View File

@@ -10,10 +10,6 @@ type ibdProgressReporter struct {
}
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
if highDAAScore <= lowDAAScore {
// Avoid a zero or negative diff
highDAAScore = lowDAAScore + 1
}
return &ibdProgressReporter{
lowDAAScore: lowDAAScore,
highDAAScore: highDAAScore,
@@ -27,16 +23,7 @@ func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
ipr.processed += processedDelta
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
if highestProcessedDAAScore > ipr.highDAAScore {
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
}
relativeDAAScore := uint64(0)
if highestProcessedDAAScore > ipr.lowDAAScore {
// Avoid a negative diff
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
}
relativeDAAScore := highestProcessedDAAScore - ipr.lowDAAScore
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
if progressPercent > ipr.lastReportedProgressPercent {
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)

View File

@@ -12,20 +12,18 @@ import (
"time"
)
func (flow *handleIBDFlow) ibdWithHeadersProof(
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
err := flow.Domain().InitStagingConsensusWithoutGenesis()
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
err := flow.Domain().InitStagingConsensus()
if err != nil {
return err
}
err = flow.downloadHeadersAndPruningUTXOSet(syncerHeaderSelectedTipHash, relayBlockHash, highBlockDAAScore)
err = flow.downloadHeadersAndPruningUTXOSet(highHash, highBlockDAAScore)
if err != nil {
if !flow.IsRecoverableError(err) {
return err
}
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus. (%s)", flow.peer, err)
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
if deleteStagingConsensusErr != nil {
return deleteStagingConsensusErr
@@ -34,8 +32,6 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(
return err
}
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
err = flow.Domain().CommitStagingConsensus()
if err != nil {
return err
@@ -49,34 +45,11 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(
return nil
}
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
relayBlock *externalapi.DomainBlock,
highestKnownSyncerChainHash *externalapi.DomainHash) (shouldDownload, shouldSync bool, err error) {
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *externalapi.DomainBlock,
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
if highestKnownSyncerChainHash != nil {
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highestKnownSyncerChainHash)
if err != nil {
return false, false, err
}
highestSharedBlockFound = blockInfo.HasBody()
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return false, false, err
}
isPruningPointInSharedBlockChain, err = flow.Domain().Consensus().IsInSelectedParentChainOf(
pruningPoint, highestKnownSyncerChainHash)
if err != nil {
return false, false, err
}
}
// Note: in the case where `highestSharedBlockFound == true && isPruningPointInSharedBlockChain == false`
// we might have here info which is relevant to finality conflict decisions. This should be taken into
// account when we improve this aspect.
if !highestSharedBlockFound || !isPruningPointInSharedBlockChain {
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock)
if !highestSharedBlockFound {
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock)
if err != nil {
return false, false, err
}
@@ -91,22 +64,22 @@ func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
return false, true, nil
}
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock *externalapi.DomainBlock) (bool, error) {
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
if err != nil {
return false, err
}
virtualSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(virtualSelectedParent)
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
if err != nil {
return false, err
}
if relayBlock.Header.BlueScore() < virtualSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
if highBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
return false, nil
}
return relayBlock.Header.BlueWork().Cmp(virtualSelectedTipInfo.BlueWork) > 0, nil
return highBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
}
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
@@ -141,10 +114,7 @@ func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.Doma
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
}
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash,
highBlockDAAScore uint64) error {
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
if err != nil {
return err
@@ -161,20 +131,19 @@ func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
}
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(),
syncerHeaderSelectedTipHash, proofPruningPoint, relayBlockHash, highBlockDAAScore)
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash, highBlockDAAScore)
if err != nil {
return err
}
log.Infof("Headers downloaded from peer %s", flow.peer)
relayBlockInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(relayBlockHash)
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
if err != nil {
return err
}
if !relayBlockInfo.Exists {
if !highHashInfo.Exists {
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
}
@@ -237,8 +206,7 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
return err
}
i := 0
for ; ; i++ {
for {
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
if err != nil {
return err
@@ -252,19 +220,9 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
if err != nil {
return err
}
// We're using i+2 because we want to check if the next block will belong to the next batch, but we already downloaded
// the pruning point outside the loop so we use i+2 instead of i+1.
if (i+2)%ibdBatchSize == 0 {
log.Infof("Downloaded %d blocks from the pruning point anticone", i+1)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextPruningPointAndItsAnticoneBlocks())
if err != nil {
return err
}
}
}
log.Infof("Finished downloading pruning point and its anticone from %s. Total blocks downloaded: %d", flow.peer, i+1)
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
return nil
}
@@ -285,14 +243,8 @@ func (flow *handleIBDFlow) processBlockWithTrustedData(
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
}
err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
return protocolerrors.Wrapf(true, err, "failed validating block with trusted data")
}
return err
}
return nil
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
return err
}
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
@@ -392,7 +344,6 @@ func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consens
log.Info("Fetching the pruning point UTXO set")
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
if err != nil {
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
return false, err
}

View File

@@ -2,8 +2,6 @@ package ping
import (
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/pkg/errors"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
@@ -63,9 +61,6 @@ func (flow *sendPingsFlow) start() error {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
if errors.Is(err, router.ErrTimeout) {
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
}
return err
}
pongMessage := message.(*appmessage.MsgPong)

View File

@@ -1,14 +1,14 @@
package v5
package v4
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/ping"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/rejects"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/ping"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/rejects"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
@@ -78,7 +78,6 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
appmessage.CmdPruningPointProof,
appmessage.CmdTrustedData,
appmessage.CmdIBDChainBlockLocator,
},
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBD(m.Context(), incomingRoute,
@@ -122,7 +121,7 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
),
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone, appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks}, isStopping, errChan,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
@@ -135,20 +134,6 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
},
),
m.RegisterFlow("HandleRequestIBDChainBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdRequestIBDChainBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestIBDChainBlockLocator(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestAnticone", router,
[]appmessage.MessageCommand{appmessage.CmdRequestAnticone}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestAnticone(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandlePruningPointProofRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {

View File

@@ -1,7 +1,7 @@
package testing
import (
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
"testing"
"time"

View File

@@ -22,7 +22,7 @@ type TransactionsRelayContext interface {
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
OnTransactionAddedToMempool()
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
IsNearlySynced() (bool, error)
IsIBDRunning() bool
}
type handleRelayedTransactionsFlow struct {
@@ -50,12 +50,7 @@ func (flow *handleRelayedTransactionsFlow) start() error {
return err
}
isNearlySynced, err := flow.IsNearlySynced()
if err != nil {
return err
}
// Transaction relay is disabled if the node is out of sync and thus not mining
if !isNearlySynced {
if flow.IsIBDRunning() {
continue
}
@@ -102,7 +97,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
// Ask the transaction memory pool if the transaction is known
// to it in any form (main pool or orphan).
if _, _, ok := flow.Domain().MiningManager().GetTransaction(txID, true, true); ok {
if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
return true
}

View File

@@ -3,7 +3,7 @@ package transactionrelay_test
import (
"errors"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
"strings"
"testing"
@@ -47,8 +47,8 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
}
func (m *mocTransactionsRelayContext) IsNearlySynced() (bool, error) {
return true, nil
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
return false
}
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't

View File

@@ -30,7 +30,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
}
for _, transactionID := range msgRequestTransactions.IDs {
tx, _, ok := flow.Domain().MiningManager().GetTransaction(transactionID, true, false)
tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
if !ok {
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
@@ -40,6 +40,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
}
continue
}
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
if err != nil {
return err

View File

@@ -2,7 +2,7 @@ package transactionrelay_test
import (
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
"testing"
"github.com/kaspanet/kaspad/app/appmessage"

View File

@@ -1,16 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"testing"
)
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
if ibdBatchSize >= router.DefaultMaxMessages {
t.Fatalf("IBD batch size (%d) must be smaller than router.DefaultMaxMessages (%d)",
ibdBatchSize, router.DefaultMaxMessages)
}
}

View File

@@ -1,85 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// RequestIBDChainBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
type RequestIBDChainBlockLocatorContext interface {
Domain() domain.Domain
}
type handleRequestIBDChainBlockLocatorFlow struct {
RequestIBDChainBlockLocatorContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestIBDChainBlockLocator handles getBlockLocator messages
func HandleRequestIBDChainBlockLocator(context RequestIBDChainBlockLocatorContext, incomingRoute *router.Route,
outgoingRoute *router.Route) error {
flow := &handleRequestIBDChainBlockLocatorFlow{
RequestIBDChainBlockLocatorContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestIBDChainBlockLocatorFlow) start() error {
for {
highHash, lowHash, err := flow.receiveRequestIBDChainBlockLocator()
if err != nil {
return err
}
log.Debugf("Received getIBDChainBlockLocator with highHash: %s, lowHash: %s", highHash, lowHash)
var locator externalapi.BlockLocator
if highHash == nil || lowHash == nil {
locator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
} else {
locator, err = flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
// The chain has been modified, signal it by sending an empty locator
locator, err = externalapi.BlockLocator{}, nil
}
}
if err != nil {
log.Debugf("Received error from CreateHeadersSelectedChainBlockLocator: %s", err)
return protocolerrors.Errorf(true, "couldn't build a block "+
"locator between %s and %s", lowHash, highHash)
}
err = flow.sendIBDChainBlockLocator(locator)
if err != nil {
return err
}
}
}
func (flow *handleRequestIBDChainBlockLocatorFlow) receiveRequestIBDChainBlockLocator() (highHash, lowHash *externalapi.DomainHash, err error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgGetBlockLocator := message.(*appmessage.MsgRequestIBDChainBlockLocator)
return msgGetBlockLocator.HighHash, msgGetBlockLocator.LowHash, nil
}
func (flow *handleRequestIBDChainBlockLocatorFlow) sendIBDChainBlockLocator(locator externalapi.BlockLocator) error {
msgIBDChainBlockLocator := appmessage.NewMsgIBDChainBlockLocator(locator)
err := flow.outgoingRoute.Enqueue(msgIBDChainBlockLocator)
if err != nil {
return err
}
return nil
}

View File

@@ -1,95 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"sort"
)
// RequestAnticoneContext is the interface for the context needed for the HandleRequestHeaders flow.
type RequestAnticoneContext interface {
Domain() domain.Domain
Config() *config.Config
}
type handleRequestAnticoneFlow struct {
RequestAnticoneContext
incomingRoute, outgoingRoute *router.Route
peer *peer.Peer
}
// HandleRequestAnticone handles RequestAnticone messages
func HandleRequestAnticone(context RequestAnticoneContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peer.Peer) error {
flow := &handleRequestAnticoneFlow{
RequestAnticoneContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleRequestAnticoneFlow) start() error {
for {
blockHash, contextHash, err := receiveRequestAnticone(flow.incomingRoute)
if err != nil {
return err
}
log.Debugf("Received requestAnticone with blockHash: %s, contextHash: %s", blockHash, contextHash)
log.Debugf("Getting past(%s) cap anticone(%s) for peer %s", contextHash, blockHash, flow.peer)
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
// we relay blocks only if they enter virtual's mergeset. We add a 2 factor for possible sync gaps.
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
flow.Config().ActiveNetParams.MergeSetSizeLimit*2)
if err != nil {
return protocolerrors.Wrap(true, err, "Failed querying anticone")
}
log.Debugf("Got %d header hashes in past(%s) cap anticone(%s)", len(blockHashes), contextHash, blockHash)
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
for i, blockHash := range blockHashes {
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
if err != nil {
return err
}
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
}
// We sort the headers in bottom-up topological order before sending
sort.Slice(blockHeaders, func(i, j int) bool {
return blockHeaders[i].BlueWork.Cmp(blockHeaders[j].BlueWork) < 0
})
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
if err != nil {
return err
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
return err
}
}
}
func receiveRequestAnticone(incomingRoute *router.Route) (blockHash *externalapi.DomainHash,
contextHash *externalapi.DomainHash, err error) {
message, err := incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgRequestAnticone := message.(*appmessage.MsgRequestAnticone)
return msgRequestAnticone.BlockHash, msgRequestAnticone.ContextHash, nil
}

View File

@@ -2,11 +2,10 @@ package protocol
import (
"fmt"
"github.com/kaspanet/kaspad/app/protocol/common"
"sync"
"sync/atomic"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain"
@@ -91,9 +90,14 @@ func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-c
return <-errChan
}
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplate handler
func (m *Manager) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler flowcontext.OnNewBlockTemplateHandler) {
m.context.SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler)
// SetOnVirtualChange sets the onVirtualChangeHandler handler
func (m *Manager) SetOnVirtualChange(onVirtualChangeHandler flowcontext.OnVirtualChangeHandler) {
m.context.SetOnVirtualChangeHandler(onVirtualChangeHandler)
}
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowcontext.OnBlockAddedToDAGHandler) {
m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)
}
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
@@ -106,6 +110,12 @@ func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMemp
m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler)
}
// ShouldMine returns whether it's ok to use block template from this node
// for mining purposes.
func (m *Manager) ShouldMine() (bool, error) {
return m.context.ShouldMine()
}
// IsIBDRunning returns true if IBD is currently marked as running
func (m *Manager) IsIBDRunning() bool {
return m.context.IsIBDRunning()

View File

@@ -3,7 +3,7 @@ package protocol
import (
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
"github.com/kaspanet/kaspad/app/protocol/flows/v5"
v4 "github.com/kaspanet/kaspad/app/protocol/flows/v4"
"sync"
"sync/atomic"
@@ -23,7 +23,7 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
// errChan is used by the flow goroutines to return to runFlows when an error occurs.
// They are both initialized here and passed to register flows.
isStopping := uint32(0)
errChan := make(chan error, 1)
errChan := make(chan error)
receiveVersionRoute, sendVersionRoute, receiveReadyRoute := registerHandshakeRoutes(router)
@@ -76,8 +76,8 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
var flows []*common.Flow
log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion())
switch peer.ProtocolVersion() {
case 5:
flows = v5.Register(m, router, errChan, &isStopping)
case 4:
flows = v4.Register(m, router, errChan, &isStopping)
default:
panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion()))
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/pkg/errors"
)
// Manager is an RPC manager
@@ -29,7 +28,6 @@ func NewManager(
connectionManager *connmanager.ConnectionManager,
addressManager *addressmanager.AddressManager,
utxoIndex *utxoindex.UTXOIndex,
consensusEventsChan chan externalapi.ConsensusEvent,
shutDownChan chan<- struct{}) *Manager {
manager := Manager{
@@ -46,90 +44,50 @@ func NewManager(
}
netAdapter.SetRPCRouterInitializer(manager.routerInitializer)
manager.initConsensusEventsHandler(consensusEventsChan)
return &manager
}
func (m *Manager) initConsensusEventsHandler(consensusEventsChan chan externalapi.ConsensusEvent) {
spawn("consensusEventsHandler", func() {
for {
consensusEvent, ok := <-consensusEventsChan
if !ok {
return
}
switch event := consensusEvent.(type) {
case *externalapi.VirtualChangeSet:
err := m.notifyVirtualChange(event)
if err != nil {
panic(err)
}
case *externalapi.BlockAdded:
err := m.notifyBlockAddedToDAG(event.Block)
if err != nil {
panic(err)
}
default:
panic(errors.Errorf("Got event of unsupported type %T", consensusEvent))
}
}
})
}
// notifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
func (m *Manager) notifyBlockAddedToDAG(block *externalapi.DomainBlock) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.notifyBlockAddedToDAG")
// NotifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
defer onEnd()
// Before converting the block and populating it, we check if any listeners are interested.
// This is done since most nodes do not use this event.
if !m.context.NotificationManager.HasBlockAddedListeners() {
return nil
err := m.NotifyVirtualChange(virtualChangeSet)
if err != nil {
return err
}
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
err := m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, true)
err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
if err != nil {
return err
}
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
err = m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
if err != nil {
return err
}
return nil
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
}
// notifyVirtualChange notifies the manager that the virtual block has been changed.
func (m *Manager) notifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualChange")
// NotifyVirtualChange notifies the manager that the virtual block has been changed.
func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
defer onEnd()
if m.context.Config.UTXOIndex && virtualChangeSet.VirtualUTXODiff != nil {
if m.context.Config.UTXOIndex {
err := m.notifyUTXOsChanged(virtualChangeSet)
if err != nil {
return err
}
}
err := m.notifyVirtualSelectedParentBlueScoreChanged(virtualChangeSet.VirtualSelectedParentBlueScore)
err := m.notifyVirtualSelectedParentBlueScoreChanged()
if err != nil {
return err
}
err = m.notifyVirtualDaaScoreChanged(virtualChangeSet.VirtualDAAScore)
err = m.notifyVirtualDaaScoreChanged()
if err != nil {
return err
}
if virtualChangeSet.VirtualSelectedParentChainChanges == nil ||
(len(virtualChangeSet.VirtualSelectedParentChainChanges.Added) == 0 &&
len(virtualChangeSet.VirtualSelectedParentChainChanges.Removed) == 0) {
return nil
}
err = m.notifyVirtualSelectedParentChainChanged(virtualChangeSet)
if err != nil {
return err
@@ -138,13 +96,6 @@ func (m *Manager) notifyVirtualChange(virtualChangeSet *externalapi.VirtualChang
return nil
}
// NotifyNewBlockTemplate notifies the manager that a new
// block template is available for miners
func (m *Manager) NotifyNewBlockTemplate() error {
notification := appmessage.NewNewBlockTemplateNotificationMessage()
return m.context.NotificationManager.NotifyNewBlockTemplate(notification)
}
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
// resets due to pruning point change via IBD.
func (m *Manager) NotifyPruningPointUTXOSetOverride() error {
@@ -187,7 +138,6 @@ func (m *Manager) notifyUTXOsChanged(virtualChangeSet *externalapi.VirtualChange
if err != nil {
return err
}
return m.context.NotificationManager.NotifyUTXOsChanged(utxoIndexChanges)
}
@@ -203,18 +153,33 @@ func (m *Manager) notifyPruningPointUTXOSetOverride() error {
return m.context.NotificationManager.NotifyPruningPointUTXOSetOverride()
}
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged(virtualSelectedParentBlueScore uint64) error {
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged() error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentBlueScoreChanged")
defer onEnd()
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(virtualSelectedParentBlueScore)
virtualSelectedParent, err := m.context.Domain.Consensus().GetVirtualSelectedParent()
if err != nil {
return err
}
blockInfo, err := m.context.Domain.Consensus().GetBlockInfo(virtualSelectedParent)
if err != nil {
return err
}
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(blockInfo.BlueScore)
return m.context.NotificationManager.NotifyVirtualSelectedParentBlueScoreChanged(notification)
}
func (m *Manager) notifyVirtualDaaScoreChanged(virtualDAAScore uint64) error {
func (m *Manager) notifyVirtualDaaScoreChanged() error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualDaaScoreChanged")
defer onEnd()
virtualDAAScore, err := m.context.Domain.Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
notification := appmessage.NewVirtualDaaScoreChangedNotificationMessage(virtualDAAScore)
return m.context.NotificationManager.NotifyVirtualDaaScoreChanged(notification)
}
@@ -223,16 +188,10 @@ func (m *Manager) notifyVirtualSelectedParentChainChanged(virtualChangeSet *exte
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged")
defer onEnd()
hasListeners, includeAcceptedTransactionIDs := m.context.NotificationManager.HasListenersThatPropagateVirtualSelectedParentChainChanged()
if hasListeners {
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
virtualChangeSet.VirtualSelectedParentChainChanges, includeAcceptedTransactionIDs)
if err != nil {
return err
}
return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification)
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
virtualChangeSet.VirtualSelectedParentChainChanges)
if err != nil {
return err
}
return nil
return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification)
}

View File

@@ -48,9 +48,6 @@ var handlers = map[appmessage.MessageCommand]handler{
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond,
appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged,
appmessage.CmdNotifyNewBlockTemplateRequestMessage: rpchandlers.HandleNotifyNewBlockTemplate,
appmessage.CmdGetCoinSupplyRequestMessage: rpchandlers.HandleGetCoinSupply,
appmessage.CmdGetMempoolEntriesByAddressesRequestMessage: rpchandlers.HandleGetMempoolEntriesByAddresses,
}
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {

View File

@@ -3,14 +3,12 @@ package rpccontext
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
)
// ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage converts
// VirtualSelectedParentChainChanges to VirtualSelectedParentChainChangedNotificationMessage
func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
selectedParentChainChanges *externalapi.SelectedChainPath, includeAcceptedTransactionIDs bool) (
*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
selectedParentChainChanges *externalapi.SelectedChainPath) (*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
removedChainBlockHashes := make([]string, len(selectedParentChainChanges.Removed))
for i, removed := range selectedParentChainChanges.Removed {
@@ -22,58 +20,5 @@ func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotifi
addedChainBlocks[i] = added.String()
}
var acceptedTransactionIDs []*appmessage.AcceptedTransactionIDs
if includeAcceptedTransactionIDs {
var err error
acceptedTransactionIDs, err = ctx.getAndConvertAcceptedTransactionIDs(selectedParentChainChanges)
if err != nil {
return nil, err
}
}
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(
removedChainBlockHashes, addedChainBlocks, acceptedTransactionIDs), nil
}
func (ctx *Context) getAndConvertAcceptedTransactionIDs(selectedParentChainChanges *externalapi.SelectedChainPath) (
[]*appmessage.AcceptedTransactionIDs, error) {
acceptedTransactionIDs := make([]*appmessage.AcceptedTransactionIDs, len(selectedParentChainChanges.Added))
const chunk = 1000
position := 0
for position < len(selectedParentChainChanges.Added) {
var chainBlocksChunk []*externalapi.DomainHash
if position+chunk > len(selectedParentChainChanges.Added) {
chainBlocksChunk = selectedParentChainChanges.Added[position:]
} else {
chainBlocksChunk = selectedParentChainChanges.Added[position : position+chunk]
}
// We use chunks in order to avoid blocking consensus for too long
chainBlocksAcceptanceData, err := ctx.Domain.Consensus().GetBlocksAcceptanceData(chainBlocksChunk)
if err != nil {
return nil, err
}
for i, addedChainBlock := range chainBlocksChunk {
chainBlockAcceptanceData := chainBlocksAcceptanceData[i]
acceptedTransactionIDs[position+i] = &appmessage.AcceptedTransactionIDs{
AcceptingBlockHash: addedChainBlock.String(),
AcceptedTransactionIDs: nil,
}
for _, blockAcceptanceData := range chainBlockAcceptanceData {
for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData {
if transactionAcceptanceData.IsAccepted {
acceptedTransactionIDs[position+i].AcceptedTransactionIDs =
append(acceptedTransactionIDs[position+i].AcceptedTransactionIDs,
consensushashing.TransactionID(transactionAcceptanceData.Transaction).String())
}
}
}
}
position += chunk
}
return acceptedTransactionIDs, nil
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes, addedChainBlocks), nil
}

View File

@@ -44,7 +44,7 @@ func NewContext(cfg *config.Config,
UTXOIndex: utxoIndex,
ShutDownChan: shutDownChan,
}
context.NotificationManager = NewNotificationManager(cfg.ActiveNetParams)
context.NotificationManager = NewNotificationManager()
return context
}

View File

@@ -3,11 +3,6 @@ package rpccontext
import (
"sync"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/utxoindex"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
@@ -18,7 +13,6 @@ import (
type NotificationManager struct {
sync.RWMutex
listeners map[*routerpkg.Router]*NotificationListener
params *dagconfig.Params
}
// UTXOsChangedNotificationAddress represents a kaspad address.
@@ -30,8 +24,6 @@ type UTXOsChangedNotificationAddress struct {
// NotificationListener represents a registered RPC notification listener
type NotificationListener struct {
params *dagconfig.Params
propagateBlockAddedNotifications bool
propagateVirtualSelectedParentChainChangedNotifications bool
propagateFinalityConflictNotifications bool
@@ -40,16 +32,13 @@ type NotificationListener struct {
propagateVirtualSelectedParentBlueScoreChangedNotifications bool
propagateVirtualDaaScoreChangedNotifications bool
propagatePruningPointUTXOSetOverrideNotifications bool
propagateNewBlockTemplateNotifications bool
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications bool
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
}
// NewNotificationManager creates a new NotificationManager
func NewNotificationManager(params *dagconfig.Params) *NotificationManager {
func NewNotificationManager() *NotificationManager {
return &NotificationManager{
params: params,
listeners: make(map[*routerpkg.Router]*NotificationListener),
}
}
@@ -59,7 +48,7 @@ func (nm *NotificationManager) AddListener(router *routerpkg.Router) {
nm.Lock()
defer nm.Unlock()
listener := newNotificationListener(nm.params)
listener := newNotificationListener()
nm.listeners[router] = listener
}
@@ -83,19 +72,6 @@ func (nm *NotificationManager) Listener(router *routerpkg.Router) (*Notification
return listener, nil
}
// HasBlockAddedListeners indicates if the notification manager has any listeners for `BlockAdded` events
func (nm *NotificationManager) HasBlockAddedListeners() bool {
nm.RLock()
defer nm.RUnlock()
for _, listener := range nm.listeners {
if listener.propagateBlockAddedNotifications {
return true
}
}
return false
}
// NotifyBlockAdded notifies the notification manager that a block has been added to the DAG
func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAddedNotificationMessage) error {
nm.RLock()
@@ -103,8 +79,10 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
for router, listener := range nm.listeners {
if listener.propagateBlockAddedNotifications {
err := router.OutgoingRoute().MaybeEnqueue(notification)
if err != nil {
err := router.OutgoingRoute().Enqueue(notification)
if errors.Is(err, routerpkg.ErrRouteClosed) {
log.Warnf("Couldn't send notification: %s", err)
} else if err != nil {
return err
}
}
@@ -113,27 +91,13 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
}
// NotifyVirtualSelectedParentChainChanged notifies the notification manager that the DAG's selected parent chain has changed
func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(
notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
nm.RLock()
defer nm.RUnlock()
notificationWithoutAcceptedTransactionIDs := &appmessage.VirtualSelectedParentChainChangedNotificationMessage{
RemovedChainBlockHashes: notification.RemovedChainBlockHashes,
AddedChainBlockHashes: notification.AddedChainBlockHashes,
}
for router, listener := range nm.listeners {
if listener.propagateVirtualSelectedParentChainChangedNotifications {
var err error
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
err = router.OutgoingRoute().MaybeEnqueue(notification)
} else {
err = router.OutgoingRoute().MaybeEnqueue(notificationWithoutAcceptedTransactionIDs)
}
err := router.OutgoingRoute().Enqueue(notification)
if err != nil {
return err
}
@@ -142,31 +106,6 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(
return nil
}
// HasListenersThatPropagateVirtualSelectedParentChainChanged returns whether there's any listener that is
// subscribed to VirtualSelectedParentChainChanged notifications as well as checks if any such listener requested
// to include AcceptedTransactionIDs.
func (nm *NotificationManager) HasListenersThatPropagateVirtualSelectedParentChainChanged() (hasListeners, hasListenersThatRequireAcceptedTransactionIDs bool) {
nm.RLock()
defer nm.RUnlock()
hasListeners = false
hasListenersThatRequireAcceptedTransactionIDs = false
for _, listener := range nm.listeners {
if listener.propagateVirtualSelectedParentChainChangedNotifications {
hasListeners = true
// Generating acceptedTransactionIDs is a heavy operation, so we check if it's needed by any listener.
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
hasListenersThatRequireAcceptedTransactionIDs = true
break
}
}
}
return hasListeners, hasListenersThatRequireAcceptedTransactionIDs
}
// NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG
func (nm *NotificationManager) NotifyFinalityConflict(notification *appmessage.FinalityConflictNotificationMessage) error {
nm.RLock()
@@ -207,10 +146,7 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
for router, listener := range nm.listeners {
if listener.propagateUTXOsChangedNotifications {
// Filter utxoChanges and create a notification
notification, err := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges)
if err != nil {
return err
}
notification := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges)
// Don't send the notification if it's empty
if len(notification.Added) == 0 && len(notification.Removed) == 0 {
@@ -218,7 +154,7 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
}
// Enqueue the notification
err = router.OutgoingRoute().MaybeEnqueue(notification)
err := router.OutgoingRoute().Enqueue(notification)
if err != nil {
return err
}
@@ -237,7 +173,7 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentBlueScoreChanged(
for router, listener := range nm.listeners {
if listener.propagateVirtualSelectedParentBlueScoreChangedNotifications {
err := router.OutgoingRoute().MaybeEnqueue(notification)
err := router.OutgoingRoute().Enqueue(notification)
if err != nil {
return err
}
@@ -256,25 +192,6 @@ func (nm *NotificationManager) NotifyVirtualDaaScoreChanged(
for router, listener := range nm.listeners {
if listener.propagateVirtualDaaScoreChangedNotifications {
err := router.OutgoingRoute().MaybeEnqueue(notification)
if err != nil {
return err
}
}
}
return nil
}
// NotifyNewBlockTemplate notifies the notification manager that a new
// block template is available for miners
func (nm *NotificationManager) NotifyNewBlockTemplate(
notification *appmessage.NewBlockTemplateNotificationMessage) error {
nm.RLock()
defer nm.RUnlock()
for router, listener := range nm.listeners {
if listener.propagateNewBlockTemplateNotifications {
err := router.OutgoingRoute().Enqueue(notification)
if err != nil {
return err
@@ -301,27 +218,18 @@ func (nm *NotificationManager) NotifyPruningPointUTXOSetOverride() error {
return nil
}
func newNotificationListener(params *dagconfig.Params) *NotificationListener {
func newNotificationListener() *NotificationListener {
return &NotificationListener{
params: params,
propagateBlockAddedNotifications: false,
propagateVirtualSelectedParentChainChangedNotifications: false,
propagateFinalityConflictNotifications: false,
propagateFinalityConflictResolvedNotifications: false,
propagateUTXOsChangedNotifications: false,
propagateVirtualSelectedParentBlueScoreChangedNotifications: false,
propagateNewBlockTemplateNotifications: false,
propagatePruningPointUTXOSetOverrideNotifications: false,
}
}
// IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications returns true if this listener
// includes accepted transaction IDs in it's virtual-selected-parent-chain-changed notifications
func (nl *NotificationListener) IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications() bool {
return nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications
}
// PropagateBlockAddedNotifications instructs the listener to send block added notifications
// to the remote listener
func (nl *NotificationListener) PropagateBlockAddedNotifications() {
@@ -330,9 +238,8 @@ func (nl *NotificationListener) PropagateBlockAddedNotifications() {
// PropagateVirtualSelectedParentChainChangedNotifications instructs the listener to send chain changed notifications
// to the remote listener
func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications(includeAcceptedTransactionIDs bool) {
func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications() {
nl.propagateVirtualSelectedParentChainChangedNotifications = true
nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications = includeAcceptedTransactionIDs
}
// PropagateFinalityConflictNotifications instructs the listener to send finality conflict notifications
@@ -351,11 +258,7 @@ func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications()
// to the remote listener for the given addresses. Subsequent calls instruct the listener to
// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses
// are ignored.
func (nm *NotificationManager) PropagateUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
// Apply a write-lock since the internal listener address map is modified
nm.Lock()
defer nm.Unlock()
func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
if !nl.propagateUTXOsChangedNotifications {
nl.propagateUTXOsChangedNotifications = true
nl.propagateUTXOsChangedNotificationAddresses =
@@ -370,11 +273,7 @@ func (nm *NotificationManager) PropagateUTXOsChangedNotifications(nl *Notificati
// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs
// changed notifications to the remote listener for the given addresses. Addresses for which
// notifications are not currently sent are ignored.
func (nm *NotificationManager) StopPropagatingUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
// Apply a write-lock since the internal listener address map is modified
nm.Lock()
defer nm.Unlock()
func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
if !nl.propagateUTXOsChangedNotifications {
return
}
@@ -385,7 +284,7 @@ func (nm *NotificationManager) StopPropagatingUTXOsChangedNotifications(nl *Noti
}
func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
utxoChanges *utxoindex.UTXOChanges) (*appmessage.UTXOsChangedNotificationMessage, error) {
utxoChanges *utxoindex.UTXOChanges) *appmessage.UTXOsChangedNotificationMessage {
// As an optimization, we iterate over the smaller set (O(n)) among the two below
// and check existence over the larger set (O(1))
@@ -400,64 +299,27 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
notification.Added = append(notification.Added, utxosByAddressesEntries...)
}
}
for scriptPublicKeyString, removedPairs := range utxoChanges.Removed {
for scriptPublicKeyString, removedOutpoints := range utxoChanges.Removed {
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
}
}
} else if addressesSize > 0 {
} else {
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
notification.Added = append(notification.Added, utxosByAddressesEntries...)
}
if removedPairs, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
}
}
} else {
for scriptPublicKeyString, addedPairs := range utxoChanges.Added {
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
if err != nil {
return nil, err
}
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, addedPairs)
notification.Added = append(notification.Added, utxosByAddressesEntries...)
}
for scriptPublicKeyString, removedPAirs := range utxoChanges.Removed {
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
if err != nil {
return nil, err
}
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, removedPAirs)
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
}
}
return notification, nil
}
func (nl *NotificationListener) scriptPubKeyStringToAddressString(scriptPublicKeyString utxoindex.ScriptPublicKeyString) (string, error) {
scriptPubKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString))
// ignore error because it is often returned when the script is of unknown type
scriptType, address, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey, nl.params)
if err != nil {
return "", err
}
var addressString string
if scriptType == txscript.NonStandardTy {
addressString = ""
} else {
addressString = address.String()
}
return addressString, nil
return notification
}
// PropagateVirtualSelectedParentBlueScoreChangedNotifications instructs the listener to send
@@ -472,12 +334,6 @@ func (nl *NotificationListener) PropagateVirtualDaaScoreChangedNotifications() {
nl.propagateVirtualDaaScoreChangedNotifications = true
}
// PropagateNewBlockTemplateNotifications instructs the listener to send
// new block template notifications to the remote listener
func (nl *NotificationListener) PropagateNewBlockTemplateNotifications() {
nl.propagateNewBlockTemplateNotifications = true
}
// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications
// to the remote listener.
func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() {

View File

@@ -32,6 +32,22 @@ func ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(address string, pair
return utxosByAddressesEntries
}
// convertUTXOOutpointsToUTXOsByAddressesEntries converts
// UTXOOutpoints to a slice of UTXOsByAddressesEntry
func convertUTXOOutpointsToUTXOsByAddressesEntries(address string, outpoints utxoindex.UTXOOutpoints) []*appmessage.UTXOsByAddressesEntry {
utxosByAddressesEntries := make([]*appmessage.UTXOsByAddressesEntry, 0, len(outpoints))
for outpoint := range outpoints {
utxosByAddressesEntries = append(utxosByAddressesEntries, &appmessage.UTXOsByAddressesEntry{
Address: address,
Outpoint: &appmessage.RPCOutpoint{
TransactionID: outpoint.TransactionID.String(),
Index: outpoint.Index,
},
})
}
return utxosByAddressesEntries
}
// ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings
// to UTXOsChangedNotificationAddresses
func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
@@ -47,7 +63,7 @@ func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
if err != nil {
return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
}
scriptPublicKeyString := utxoindex.ScriptPublicKeyString(scriptPublicKey.String())
scriptPublicKeyString := utxoindex.ConvertScriptPublicKeyToString(scriptPublicKey)
addresses[i] = &UTXOsChangedNotificationAddress{
Address: addressString,
ScriptPublicKeyString: scriptPublicKeyString,

View File

@@ -122,7 +122,6 @@ func (ctx *Context) PopulateTransactionWithVerboseData(
}
ctx.Domain.Consensus().PopulateMass(domainTransaction)
transaction.VerboseData = &appmessage.RPCTransactionVerboseData{
TransactionID: consensushashing.TransactionID(domainTransaction).String(),
Hash: consensushashing.TransactionHash(domainTransaction).String(),

View File

@@ -9,14 +9,6 @@ import (
// HandleAddPeer handles the respectively named RPC command
func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("AddPeer RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewAddPeerResponseMessage()
response.Error =
appmessage.RPCErrorf("AddPeer RPC command called while node in safe RPC mode")
return response, nil
}
AddPeerRequest := request.(*appmessage.AddPeerRequestMessage)
address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort)
if err != nil {

View File

@@ -9,14 +9,6 @@ import (
// HandleBan handles the respectively named RPC command
func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("Ban RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewBanResponseMessage()
response.Error =
appmessage.RPCErrorf("Ban RPC command called while node in safe RPC mode")
return response, nil
}
banRequest := request.(*appmessage.BanRequestMessage)
ip := net.ParseIP(banRequest.IP)
if ip == nil {

View File

@@ -27,27 +27,6 @@ func HandleEstimateNetworkHashesPerSecond(
}
}
if context.Config.SafeRPC {
const windowSizeLimit = 10000
if windowSize > windowSizeLimit {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
response.Error =
appmessage.RPCErrorf(
"Requested window size %d is larger than max allowed in RPC safe mode (%d)",
windowSize, windowSizeLimit)
return response, nil
}
}
if uint64(windowSize) > context.Config.ActiveNetParams.PruningDepth() {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
response.Error =
appmessage.RPCErrorf(
"Requested window size %d is larger than pruning point depth %d",
windowSize, context.Config.ActiveNetParams.PruningDepth())
return response, nil
}
networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize)
if err != nil {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}

View File

@@ -22,7 +22,7 @@ func HandleGetBalanceByAddress(context *rpccontext.Context, _ *router.Router, re
balance, err := getBalanceByAddress(context, getBalanceByAddressRequest.Address)
if err != nil {
rpcError := &appmessage.RPCError{}
if !errors.As(err, &rpcError) {
if !errors.As(err, rpcError) {
return nil, err
}
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}

View File

@@ -23,7 +23,7 @@ func HandleGetBalancesByAddresses(context *rpccontext.Context, _ *router.Router,
if err != nil {
rpcError := &appmessage.RPCError{}
if !errors.As(err, &rpcError) {
if !errors.As(err, rpcError) {
return nil, err
}
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}

View File

@@ -4,11 +4,9 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/version"
)
// HandleGetBlockTemplate handles the respectively named RPC command
@@ -17,7 +15,7 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
payAddress, err := util.DecodeAddress(getBlockTemplateRequest.PayAddress, context.Config.ActiveNetParams.Prefix)
if err != nil {
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
errorMessage := &appmessage.GetBlockResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Could not decode address: %s", err)
return errorMessage, nil
}
@@ -27,20 +25,18 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
return nil, err
}
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version() + "/" + getBlockTemplateRequest.ExtraData)}
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey}
templateBlock, isNearlySynced, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
templateBlock, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
if err != nil {
return nil, err
}
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
isSynced, err := context.ProtocolManager.ShouldMine()
if err != nil {
return nil, err
}
if uint64(len(templateBlock.Transactions[transactionhelper.CoinbaseTransactionIndex].Payload)) > context.Config.NetParams().MaxCoinbasePayloadLength {
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Coinbase payload is above max length (%d). Try to shorten the extra data.", context.Config.NetParams().MaxCoinbasePayloadLength)
return errorMessage, nil
}
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, context.ProtocolManager.Context().HasPeers() && isNearlySynced), nil
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, isSynced), nil
}

View File

@@ -37,7 +37,7 @@ func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appm
return nil, err
}
if !blockInfo.HasHeader() {
if !blockInfo.Exists {
return &appmessage.GetBlocksResponseMessage{
Error: appmessage.RPCErrorf("Could not find lowHash %s", getBlocksRequest.LowHash),
}, nil

View File

@@ -23,10 +23,6 @@ type fakeDomain struct {
testapi.TestConsensus
}
func (d fakeDomain) ConsensusEventsChannel() chan externalapi.ConsensusEvent {
panic("implement me")
}
func (d fakeDomain) DeleteStagingConsensus() error {
panic("implement me")
}
@@ -35,7 +31,7 @@ func (d fakeDomain) StagingConsensus() externalapi.Consensus {
panic("implement me")
}
func (d fakeDomain) InitStagingConsensusWithoutGenesis() error {
func (d fakeDomain) InitStagingConsensus() error {
panic("implement me")
}

View File

@@ -1,29 +0,0 @@
package rpchandlers
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleGetCoinSupply handles the respectively named RPC command
func HandleGetCoinSupply(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
if !context.Config.UTXOIndex {
errorMessage := &appmessage.GetCoinSupplyResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
return errorMessage, nil
}
circulatingSompiSupply, err := context.UTXOIndex.GetCirculatingSompiSupply()
if err != nil {
return nil, err
}
response := appmessage.NewGetCoinSupplyResponseMessage(
constants.MaxSompi,
circulatingSompiSupply,
)
return response, nil
}

View File

@@ -9,17 +9,10 @@ import (
// HandleGetInfo handles the respectively named RPC command
func HandleGetInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
isNearlySynced, err := context.Domain.Consensus().IsNearlySynced()
if err != nil {
return nil, err
}
response := appmessage.NewGetInfoResponseMessage(
context.NetAdapter.ID().String(),
uint64(context.Domain.MiningManager().TransactionCount(true, false)),
uint64(context.Domain.MiningManager().TransactionCount()),
version.Version(),
context.Config.UTXOIndex,
context.ProtocolManager.Context().HasPeers() && isNearlySynced,
)
return response, nil

View File

@@ -7,40 +7,19 @@ import (
)
// HandleGetMempoolEntries handles the respectively named RPC command
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
getMempoolEntriesRequest := request.(*appmessage.GetMempoolEntriesRequestMessage)
entries := make([]*appmessage.MempoolEntry, 0)
transactionPoolTransactions, orphanPoolTransactions := context.Domain.MiningManager().AllTransactions(!getMempoolEntriesRequest.FilterTransactionPool, getMempoolEntriesRequest.IncludeOrphanPool)
if !getMempoolEntriesRequest.FilterTransactionPool {
for _, transaction := range transactionPoolTransactions {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
entries = append(entries, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: false,
})
}
}
if getMempoolEntriesRequest.IncludeOrphanPool {
for _, transaction := range orphanPoolTransactions {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
entries = append(entries, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: true,
})
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
transactions := context.Domain.MiningManager().AllTransactions()
entries := make([]*appmessage.MempoolEntry, 0, len(transactions))
for _, transaction := range transactions {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
entries = append(entries, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
})
}
return appmessage.NewGetMempoolEntriesResponseMessage(entries), nil

View File

@@ -1,122 +0,0 @@
package rpchandlers
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
)
// HandleGetMempoolEntriesByAddresses handles the respectively named RPC command
func HandleGetMempoolEntriesByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
getMempoolEntriesByAddressesRequest := request.(*appmessage.GetMempoolEntriesByAddressesRequestMessage)
mempoolEntriesByAddresses := make([]*appmessage.MempoolEntryByAddress, 0)
sendingInTransactionPool, receivingInTransactionPool, sendingInOrphanPool, receivingInOrphanPool, err := context.Domain.MiningManager().GetTransactionsByAddresses(!getMempoolEntriesByAddressesRequest.FilterTransactionPool, getMempoolEntriesByAddressesRequest.IncludeOrphanPool)
if err != nil {
return nil, err
}
for _, addressString := range getMempoolEntriesByAddressesRequest.Addresses {
address, err := util.DecodeAddress(addressString, context.Config.NetParams().Prefix)
if err != nil {
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Could not decode address '%s': %s", addressString, err)
return errorMessage, nil
}
sending := make([]*appmessage.MempoolEntry, 0)
receiving := make([]*appmessage.MempoolEntry, 0)
scriptPublicKey, err := txscript.PayToAddrScript(address)
if err != nil {
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Could not extract scriptPublicKey from address '%s': %s", addressString, err)
return errorMessage, nil
}
if !getMempoolEntriesByAddressesRequest.FilterTransactionPool {
if transaction, found := sendingInTransactionPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
sending = append(sending, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: false,
},
)
}
if transaction, found := receivingInTransactionPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
receiving = append(receiving, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: false,
},
)
}
}
if getMempoolEntriesByAddressesRequest.IncludeOrphanPool {
if transaction, found := sendingInOrphanPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
sending = append(sending, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: true,
},
)
}
if transaction, found := receivingInOrphanPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
receiving = append(receiving, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: true,
},
)
}
}
if len(sending) > 0 || len(receiving) > 0 {
mempoolEntriesByAddresses = append(
mempoolEntriesByAddresses,
&appmessage.MempoolEntryByAddress{
Address: address.String(),
Sending: sending,
Receiving: receiving,
},
)
}
}
return appmessage.NewGetMempoolEntriesByAddressesResponseMessage(mempoolEntriesByAddresses), nil
}

View File

@@ -3,18 +3,12 @@ package rpchandlers
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleGetMempoolEntry handles the respectively named RPC command
func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
transaction := &externalapi.DomainTransaction{}
var found bool
var isOrphan bool
getMempoolEntryRequest := request.(*appmessage.GetMempoolEntryRequestMessage)
transactionID, err := transactionid.FromString(getMempoolEntryRequest.TxID)
@@ -24,18 +18,17 @@ func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, reques
return errorMessage, nil
}
mempoolTransaction, isOrphan, found := context.Domain.MiningManager().GetTransaction(transactionID, !getMempoolEntryRequest.FilterTransactionPool, getMempoolEntryRequest.IncludeOrphanPool)
if !found {
transaction, ok := context.Domain.MiningManager().GetTransaction(transactionID)
if !ok {
errorMessage := &appmessage.GetMempoolEntryResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Transaction %s was not found", transactionID)
return errorMessage, nil
}
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(mempoolTransaction)
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err = context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction, isOrphan), nil
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction), nil
}

View File

@@ -26,14 +26,12 @@ func HandleGetVirtualSelectedParentChainFromBlock(context *rpccontext.Context, _
return response, nil
}
chainChangedNotification, err := context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
virtualSelectedParentChain, getVirtualSelectedParentChainFromBlockRequest.IncludeAcceptedTransactionIDs)
chainChangedNotification, err := context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(virtualSelectedParentChain)
if err != nil {
return nil, err
}
response := appmessage.NewGetVirtualSelectedParentChainFromBlockResponseMessage(
chainChangedNotification.RemovedChainBlockHashes, chainChangedNotification.AddedChainBlockHashes,
chainChangedNotification.AcceptedTransactionIDs)
chainChangedNotification.RemovedChainBlockHashes, chainChangedNotification.AddedChainBlockHashes)
return response, nil
}

View File

@@ -1,19 +0,0 @@
package rpchandlers
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleNotifyNewBlockTemplate handles the respectively named RPC command
func HandleNotifyNewBlockTemplate(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
listener, err := context.NotificationManager.Listener(router)
if err != nil {
return nil, err
}
listener.PropagateNewBlockTemplateNotifications()
response := appmessage.NewNotifyNewBlockTemplateResponseMessage()
return response, nil
}

View File

@@ -26,7 +26,7 @@ func HandleNotifyUTXOsChanged(context *rpccontext.Context, router *router.Router
if err != nil {
return nil, err
}
context.NotificationManager.PropagateUTXOsChangedNotifications(listener, addresses)
listener.PropagateUTXOsChangedNotifications(addresses)
response := appmessage.NewNotifyUTXOsChangedResponseMessage()
return response, nil

View File

@@ -7,17 +7,12 @@ import (
)
// HandleNotifyVirtualSelectedParentChainChanged handles the respectively named RPC command
func HandleNotifyVirtualSelectedParentChainChanged(context *rpccontext.Context, router *router.Router,
request appmessage.Message) (appmessage.Message, error) {
notifyVirtualSelectedParentChainChangedRequest := request.(*appmessage.NotifyVirtualSelectedParentChainChangedRequestMessage)
func HandleNotifyVirtualSelectedParentChainChanged(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
listener, err := context.NotificationManager.Listener(router)
if err != nil {
return nil, err
}
listener.PropagateVirtualSelectedParentChainChangedNotifications(
notifyVirtualSelectedParentChainChangedRequest.IncludeAcceptedTransactionIDs)
listener.PropagateVirtualSelectedParentChainChangedNotifications()
response := appmessage.NewNotifyVirtualSelectedParentChainChangedResponseMessage()
return response, nil

View File

@@ -8,14 +8,6 @@ import (
// HandleResolveFinalityConflict handles the respectively named RPC command
func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("ResolveFinalityConflict RPC command called while node in safe RPC mode -- ignoring.")
response := &appmessage.ResolveFinalityConflictResponseMessage{}
response.Error =
appmessage.RPCErrorf("ResolveFinalityConflict RPC command called while node in safe RPC mode")
return response, nil
}
response := &appmessage.ResolveFinalityConflictResponseMessage{}
response.Error = appmessage.RPCErrorf("not implemented")
return response, nil

View File

@@ -12,14 +12,6 @@ const pauseBeforeShutDown = time.Second
// HandleShutDown handles the respectively named RPC command
func HandleShutDown(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("ShutDown RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewShutDownResponseMessage()
response.Error =
appmessage.RPCErrorf("ShutDown RPC command called while node in safe RPC mode")
return response, nil
}
log.Warn("ShutDown RPC called.")
// Wait a second before shutting down, to allow time to return the response to the caller

View File

@@ -26,7 +26,7 @@ func HandleStopNotifyingUTXOsChanged(context *rpccontext.Context, router *router
if err != nil {
return nil, err
}
context.NotificationManager.StopPropagatingUTXOsChangedNotifications(listener, addresses)
listener.StopPropagatingUTXOsChangedNotifications(addresses)
response := appmessage.NewStopNotifyingUTXOsChangedResponseMessage()
return response, nil

View File

@@ -1,7 +1,6 @@
package rpchandlers
import (
"encoding/json"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
@@ -15,14 +14,9 @@ import (
func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage)
var err error
isSynced := false
// The node is considered synced if it has peers and consensus state is nearly synced
if context.ProtocolManager.Context().HasPeers() {
isSynced, err = context.ProtocolManager.Context().IsNearlySynced()
if err != nil {
return nil, err
}
isSynced, err := context.ProtocolManager.ShouldMine()
if err != nil {
return nil, err
}
if !context.Config.AllowSubmitBlockWhenNotSynced && !isSynced {
@@ -64,12 +58,6 @@ func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request ap
return nil, err
}
jsonBytes, _ := json.MarshalIndent(submitBlockRequest.Block.Header, "", " ")
if jsonBytes != nil {
log.Warnf("The RPC submitted block triggered a rule/protocol error (%s), printing "+
"the full header for debug purposes: \n%s", err, string(jsonBytes))
}
return &appmessage.SubmitBlockResponseMessage{
Error: appmessage.RPCErrorf("Block rejected. Reason: %s", err),
RejectReason: appmessage.RejectReasonBlockInvalid,

View File

@@ -9,14 +9,6 @@ import (
// HandleUnban handles the respectively named RPC command
func HandleUnban(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("Unban RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewUnbanResponseMessage()
response.Error =
appmessage.RPCErrorf("Unban RPC command called while node in safe RPC mode")
return response, nil
}
unbanRequest := request.(*appmessage.UnbanRequestMessage)
ip := net.ParseIP(unbanRequest.IP)
if ip == nil {

View File

@@ -5,8 +5,9 @@ FLAGS=$@
go version
go get $FLAGS -t -d ./...
GO111MODULE=off go get $FLAGS golang.org/x/lint/golint
go install $FLAGS honnef.co/go/tools/cmd/staticcheck@latest
# This is to bypass a go bug: https://github.com/golang/go/issues/27643
GO111MODULE=off go get $FLAGS golang.org/x/lint/golint \
honnef.co/go/tools/cmd/staticcheck
test -z "$(go fmt ./...)"

View File

@@ -1,197 +1,3 @@
Kaspad v0.12.10 - 2022-11-23
===========================
* Increase devnet's initial difficulty (#2167)
Bug fixes:
* Check rule errors when validating blocks with trusted data (#2171)
* Compare blue score with selected tip when checking if a pruning point proof is needed (#2169)
* Add found to GetBlock (#2165)
Wallet new features:
* Use one of the From addresses as a change address (#2164)
Kaspad v0.12.9 - 2022-10-23
===========================
* Create directory before locking lock file (#2160)
Kaspad v0.12.8 - 2022-10-23
===========================
* Remove hard fork activation rules (#2152)
* Add lock file to kaspawallet (#2154)
* Add a new testnet DNS seeder (#2156)
* Use utxo diff algo for pruning point move and use acceptance data method only as a fall-back (#2157)
* Make more checks if status is invalid even if the block exists (#2158)
Kaspad v0.12.7 - 2022-09-21
===========================
* Security Fix + Hard fork - Full details can be seen here: https://medium.com/@michaelsuttonil/kaspa-security-patch-and-hard-fork-september-2022-12da617b0094
Kaspad v0.12.6 - 2022-09-09
===========================
* Remove tests from docker files (#2133)
Wallet new features:
* Optionally show serialized transactions on send (#2135)
Bug fixes:
* Update virtual on IBD if nearly synced (#2134)
Kaspad v0.12.5 - 2022-08-28
===========================
* Add tests for hash writers (#2120)
* Replace daglabs's dnsseeder with Wolfie's (#2119)
* Change testnet dnsseeder (#2126)
* Add RPC timeout parameter to wallet daemon (#2104)
Wallet new features:
* Add UseExistingChangeAddress option to the wallet (#2127)
Bug fixes:
* Call update pruning point if required on resolve virtual or startup (#2129)
* Add missing locks to notification listener modifications (#2124)
* Calculate pruning point utxo set from acceptance data (#2123)
* Fix RPC client memory/goroutine leak (#2122)
* Fix a subtle lock sync issue in consensus insert block (#2121)
* Mempool: Retrieve stable state of the mempool. Optimze get mempool entries by addresses (#2111)
* Kaspawallet.send(): Make separate context for Broadcast, to prolong timeout (#2131)
Kaspad v0.12.4 - 2022-07-17
===========================
* Crucial fix for the UTXO difference mechanism (#2114)
* Implement multi-layer auto-compound (#2115)
Kaspad v0.12.3 - 2022-06-29
===========================
* Fixes a few bugs which can lead to node crashes or out-of-memory errors
Kaspad v0.12.2 - 2022-06-17
===========================
* Clarify wallet message concerning a wallet daemon sync state (#2045)
* Change the way the miner executable reports execution errors (closes issue #1677) (#2048)
* Fix kaspawallet help messages, clarify sweep command help string (#2067)
* Wallet parse/send/create commands improvement (#2024)
* Use chunks for `GetBlocksAcceptanceData` calls in order to avoid blocking consensus for too long (#2075)
* Unite multiple `GetBlockAcceptanceData` consensus calls to one (#2074)
* Update many-small-chains-and-one-big-chain DAG to not fail merge depth limit (#2072)
RPC API Changes:
* RPC: include orphans into mempool entries (#2046)
* RPC & UtxoIndex: keep track of, query and test circulating supply. (#2070)
Bug Fixes:
* Fix RPC connections counting (#2026)
* Fix UTXO diff child error (#2084)
* Fix `not in selected chain` crash (#2082)
Kaspad v0.12.1 - 2022-05-31
===========================
* Fix utxoindex synchronization bug which resulted in kaspawallet orphan tx errors (#2052, #2056, #2059)
* Add a channel mechanism for consensus events to be processed in the order they were produced (#2052, #2056, #2059)
* Block template cache improvement (#2023)
* Improved staging shard performance (#2034)
* Add finality check to ResolveVirtual (#2041)
* Update Dockerfile for go 1.18 (#2038)
* Remove HF1 activation code (#2042)
Kaspa wallet:
* Various kaspawallet text fixes and log additions (#2032, #2047, #2062)
* Wallet address synchronization improvement (#2025)
* Add support for `from` address in `kaspawallet send` (#1964)
* Make kaspawallet ignore outputs that exist in the mempool (#2053)
* Wrap the entire wallet send operation with a lock (#2063)
RPC API:
* Add "GetMempoolEntriesByAddresses" to kaspad RPC (#2022)
* Make sure RPCErrors are returned and do not crash the system (#2039)
* Add AcceptedTransactionIDs to ChainChanged notification and VirtualSelectedParentChain RPC (#2036, for exchanges to track tx confirmations)
* Allow blank address in NotifyUTXOsChanged to get all updates (#2027)
* Include isSynced and isUtxoIndexed in GetInfoResponse (#2068)
Kaspad v0.12.0 - 2022-04-14
===========================
Breaking changes:
Hard-fork at DAA score 14687583 (estimated to be on 28/04 16:38 UTC) which includes:
* Using separate depth than finality depth for merge set calculations (#2013)
* Not counting the header size as part of the block mass (#2013)
* Increasing block version to 1 (#2013)
* Removing the limit on amount of KAS that can be sent in one transaction (#2013)
Bug fixes:
* Making a workaround for the UTXO diff child bug (#2020)
* Use cosigner index 0 for read only wallets (#2014)
Non-breaking changes:
* Adding a "sweep" command to `kaspawallet` (#2018)
* Use `blue work` heuristic to skip irrelevant relay blocks
* Kaspawallet daemon: Add Send and Sign commands (#2016)
Kaspad v0.11.17 - 2022-04-06
===========================
* Decrement estimatedHeaderUpperBound from mempool's MaxBlockMass (#2009)
Kaspad v0.11.16 - 2022-04-05
===========================
* Don't skip wallet address with different cosigner index (#2007)
Kaspad v0.11.15 - 2022-04-05
===========================
* Add support for auto-compound in `kaspawallet send` (#1951)
* Unite reachability stores (#1963, #1993, #2001)
* Add names to nameless routes (#1986)
* Optimize the miner-kaspad flow and latency (#1988)
* Upgrade to go 1.18 (#1992)
* Add package name to kaspawalletd .proto file (#1991)
* Block template cache (#1994)
* Add extra data to GetBlockTemplate request (#1995, #1997)
* New definition for "out of sync" (#1996)
* Remove v4 p2p version (#1998)
* Remove increase pagefile from deploy.yaml (#2000)
* Cache the pruning point anticone (#2002)
* Add DB compaction after the deletion of a DB prefix (#2003)
* Fixed a bug in staging of pruning point by index (#2005)
* Clean up debug log level by moving many frequent logs to trace level (#2004)
Kaspad v0.11.14 - 2022-03-20
===========================
* Fix a bug in the new p2p v5 IBD chain negotiation (#1981)
Kaspad v0.11.13 - 2022-03-16
===========================
* Display progress of IBD process in Kaspad logs (#1938, #1939, #1949, #1977)
* Optimize DB writes during fresh IBD (#1937)
* Add AllowConnectionToDifferentVersions flag to kaspactl (#1940)
* Drop support for p2p v3 (#1942)
* Various transaction processing fixes and workarounds (#1943, #1946, #1971, #1974)
* Make kaspawallet store the utxos sorted by amount (#1947)
* Implement a `parse` sub command in the kaspawallet (#1953)
* Set MaxBlockLevels for non-mainnet networks to 250 (#1952)
* Add cache to DAA block window (#1948)
* kaspactl: string slice parser for GetUtxosByAddresses (#1955, first contribution by @icook)
* Add MergeSet and IsChainBlock to RPC (#1961)
* Ignore transaction invs during IBD (#1960)
* Optimize validation of expected header pruning point (#1962)
* Fix a bug in bounded marge depth validation (#1966)
* Don't relay blocks in virtual anticone (#1970)
* Add version to block template to allow tracking of miner's kaspad version (#1967)
* New p2p version: v5 (#1969)
* Fix IBD shared past negotiation to be non quadratic also in the worst-case (#1969, p2p v5)
* Send pruning point anticone in batches (#1973, p2p v5)
* Cleanup log output mistakes and try to be more clear to the user (#1976, #1978)
* Apply avoiding IBD logic from patch10 to p2p v4 IBD handling (#1979)
Kaspad v0.11.11 - 2022-01-27
===========================
* Fix for rare consensus bug regarding DAA window order. The bug only affected IBD from scratch and only today (#1934)

View File

@@ -4,7 +4,7 @@ kaspactl is an RPC client for kaspad
## Requirements
Go 1.18 or later.
Go 1.16 or later.
## Installation

View File

@@ -31,13 +31,10 @@ var commandTypes = []reflect.Type{
reflect.TypeOf(protowire.KaspadMessage_GetMempoolEntryRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetMempoolEntriesRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetMempoolEntriesByAddressesRequest{}),
reflect.TypeOf(protowire.KaspadMessage_SubmitTransactionRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetUtxosByAddressesRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetBalanceByAddressRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetCoinSupplyRequest{}),
reflect.TypeOf(protowire.KaspadMessage_BanRequest{}),
reflect.TypeOf(protowire.KaspadMessage_UnbanRequest{}),

View File

@@ -1,11 +1,13 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.18-alpine AS build
FROM golang:1.16-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
WORKDIR /go/src/github.com/kaspanet/kaspad
RUN apk add --no-cache curl git openssh binutils gcc musl-dev
RUN go get -u golang.org/x/lint/golint \
honnef.co/go/tools/cmd/staticcheck
COPY go.mod .
COPY go.sum .
@@ -16,6 +18,10 @@ COPY . .
WORKDIR /go/src/github.com/kaspanet/kaspad/cmd/kaspactl
RUN GOFMT_RESULT=`go fmt ./...`; echo $GOFMT_RESULT; test -z "$GOFMT_RESULT"
RUN go vet ./...
RUN golint -set_exit_status ./...
RUN staticcheck -checks SA4006 ./...
RUN GOOS=linux go build -a -installsuffix cgo -o kaspactl .
# --- multistage docker build: stage #2: runtime image

View File

@@ -4,7 +4,7 @@ Kaspaminer is a CPU-based miner for kaspad
## Requirements
Go 1.18 or later.
Go 1.16 or later.
## Installation

View File

@@ -13,8 +13,8 @@ const minerTimeout = 10 * time.Second
type minerClient struct {
*rpcclient.RPCClient
cfg *configFlags
newBlockTemplateNotificationChan chan struct{}
cfg *configFlags
blockAddedNotificationChan chan struct{}
}
func (mc *minerClient) connect() error {
@@ -30,14 +30,14 @@ func (mc *minerClient) connect() error {
mc.SetTimeout(minerTimeout)
mc.SetLogger(backendLog, logger.LevelTrace)
err = mc.RegisterForNewBlockTemplateNotifications(func(_ *appmessage.NewBlockTemplateNotificationMessage) {
err = mc.RegisterForBlockAddedNotifications(func(_ *appmessage.BlockAddedNotificationMessage) {
select {
case mc.newBlockTemplateNotificationChan <- struct{}{}:
case mc.blockAddedNotificationChan <- struct{}{}:
default:
}
})
if err != nil {
return errors.Wrapf(err, "error requesting new-block-template notifications")
return errors.Wrapf(err, "error requesting block-added notifications")
}
log.Infof("Connected to %s", rpcAddress)
@@ -47,8 +47,8 @@ func (mc *minerClient) connect() error {
func newMinerClient(cfg *configFlags) (*minerClient, error) {
minerClient := &minerClient{
cfg: cfg,
newBlockTemplateNotificationChan: make(chan struct{}),
cfg: cfg,
blockAddedNotificationChan: make(chan struct{}),
}
err := minerClient.connect()

View File

@@ -1,11 +1,13 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.18-alpine AS build
FROM golang:1.16-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
WORKDIR /go/src/github.com/kaspanet/kaspad
RUN apk add --no-cache curl git openssh binutils gcc musl-dev
RUN go get -u golang.org/x/lint/golint \
honnef.co/go/tools/cmd/staticcheck
COPY go.mod .
COPY go.sum .
@@ -15,6 +17,11 @@ RUN go mod download
COPY . .
WORKDIR /go/src/github.com/kaspanet/kaspad/cmd/kaspaminer
RUN GOFMT_RESULT=`go fmt ./...`; echo $GOFMT_RESULT; test -z "$GOFMT_RESULT"
RUN go vet ./...
RUN golint -set_exit_status ./...
RUN staticcheck -checks SA4006 ./...
RUN GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
# --- multistage docker build: stage #2: runtime image

View File

@@ -23,7 +23,8 @@ func main() {
cfg, err := parseConfig()
if err != nil {
printErrorAndExit(errors.Errorf("Error parsing command-line arguments: %s", err))
fmt.Fprintf(os.Stderr, "Error parsing command-line arguments: %s\n", err)
os.Exit(1)
}
defer backendLog.Close()
@@ -43,7 +44,7 @@ func main() {
miningAddr, err := util.DecodeAddress(cfg.MiningAddr, cfg.ActiveNetParams.Prefix)
if err != nil {
printErrorAndExit(errors.Errorf("Error decoding mining address: %s", err))
panic(errors.Wrap(err, "error decoding mining address"))
}
doneChan := make(chan struct{})
@@ -60,8 +61,3 @@ func main() {
case <-interrupt:
}
}
func printErrorAndExit(err error) {
fmt.Fprintf(os.Stderr, "%+v\n", err)
os.Exit(1)
}

View File

@@ -2,7 +2,6 @@ package main
import (
nativeerrors "errors"
"github.com/kaspanet/kaspad/version"
"math/rand"
"sync/atomic"
"time"
@@ -188,7 +187,7 @@ func getBlockForMining(mineWhenNotSynced bool) (*externalapi.DomainBlock, *pow.S
func templatesLoop(client *minerClient, miningAddr util.Address, errChan chan error) {
getBlockTemplate := func() {
template, err := client.GetBlockTemplate(miningAddr.String(), "kaspaminer-"+version.Version())
template, err := client.GetBlockTemplate(miningAddr.String())
if nativeerrors.Is(err, router.ErrTimeout) {
log.Warnf("Got timeout while requesting block template from %s: %s", client.Address(), err)
reconnectErr := client.Reconnect()
@@ -218,7 +217,7 @@ func templatesLoop(client *minerClient, miningAddr util.Address, errChan chan er
ticker := time.NewTicker(tickerTime)
for {
select {
case <-client.newBlockTemplateNotificationChan:
case <-client.blockAddedNotificationChan:
getBlockTemplate()
ticker.Reset(tickerTime)
case <-ticker.C:

View File

@@ -3,12 +3,19 @@ package main
import (
"context"
"fmt"
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/client"
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
"github.com/kaspanet/kaspad/cmd/kaspawallet/utils"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
)
func formatKas(amount uint64) string {
res := " "
if amount > 0 {
res = fmt.Sprintf("%19.8f", float64(amount)/constants.SompiPerKaspa)
}
return res
}
func balance(conf *balanceConfig) error {
daemonClient, tearDown, err := client.Connect(conf.DaemonAddress)
if err != nil {
@@ -32,12 +39,12 @@ func balance(conf *balanceConfig) error {
println("Address Available Pending")
println("-----------------------------------------------------------------------------------------------------------")
for _, addressBalance := range response.AddressBalances {
fmt.Printf("%s %s %s\n", addressBalance.Address, utils.FormatKas(addressBalance.Available), utils.FormatKas(addressBalance.Pending))
fmt.Printf("%s %s %s\n", addressBalance.Address, formatKas(addressBalance.Available), formatKas(addressBalance.Pending))
}
println("-----------------------------------------------------------------------------------------------------------")
print(" ")
}
fmt.Printf("Total balance, KAS %s %s%s\n", utils.FormatKas(response.Available), utils.FormatKas(response.Pending), pendingSuffix)
fmt.Printf("Total balance, KAS %s %s%s\n", formatKas(response.Available), formatKas(response.Pending), pendingSuffix)
return nil
}

Some files were not shown because too many files have changed in this diff Show More