mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 19:22:53 +00:00
Compare commits
1 Commits
v0.12.0
...
missing-sp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7078dada5c |
7
.github/workflows/deploy.yaml
vendored
7
.github/workflows/deploy.yaml
vendored
@@ -19,11 +19,16 @@ jobs:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Increase the pagefile size on Windows to aviod running out of memory
|
||||
- name: Increase pagefile size on Windows
|
||||
if: runner.os == 'Windows'
|
||||
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
||||
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18
|
||||
go-version: 1.16
|
||||
|
||||
- name: Build on Linux
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
2
.github/workflows/race.yaml
vendored
2
.github/workflows/race.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18
|
||||
go-version: 1.16
|
||||
|
||||
- name: Set scheduled branch name
|
||||
shell: bash
|
||||
|
||||
6
.github/workflows/tests.yaml
vendored
6
.github/workflows/tests.yaml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18
|
||||
go-version: 1.16
|
||||
|
||||
|
||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18
|
||||
go-version: 1.16
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18
|
||||
go-version: 1.16
|
||||
|
||||
- name: Delete the stability tests from coverage
|
||||
run: rm -r stability-tests
|
||||
|
||||
@@ -12,7 +12,8 @@ If you want to make a big change it's better to discuss it first by opening an i
|
||||
|
||||
## Pull Request process
|
||||
|
||||
Any pull request should be opened against the development branch `dev`.
|
||||
Any pull request should be opened against the development branch of the target version. The development branch format is
|
||||
as follows: `vx.y.z-dev`, for example: `v0.8.5-dev`.
|
||||
|
||||
All pull requests should pass the checks written in `build_and_test.sh`, so it's recommended to run this script before
|
||||
submitting your PR.
|
||||
@@ -1,13 +1,16 @@
|
||||
|
||||
Kaspad
|
||||
====
|
||||
Warning: This is pre-alpha software. There's no guarantee anything works.
|
||||
====
|
||||
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad)
|
||||
|
||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
||||
|
||||
This project is currently under active development and is in Beta state.
|
||||
This project is currently under active development and is in a pre-Alpha state.
|
||||
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
|
||||
|
||||
## What is kaspa
|
||||
|
||||
@@ -15,7 +18,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.18 or later.
|
||||
Go 1.16 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -87,7 +87,6 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
|
||||
if app.cfg.Profile != "" {
|
||||
profiling.Start(app.cfg.Profile, log)
|
||||
}
|
||||
profiling.TrackHeap(app.cfg.AppDir, log)
|
||||
|
||||
// Return now if an interrupt signal was triggered.
|
||||
if signal.InterruptRequested(interrupt) {
|
||||
|
||||
@@ -436,10 +436,10 @@ func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
|
||||
|
||||
// BlockWithTrustedDataToDomainBlockWithTrustedData converts *MsgBlockWithTrustedData to *externalapi.BlockWithTrustedData
|
||||
func BlockWithTrustedDataToDomainBlockWithTrustedData(block *MsgBlockWithTrustedData) *externalapi.BlockWithTrustedData {
|
||||
daaWindow := make([]*externalapi.TrustedDataDataDAAHeader, len(block.DAAWindow))
|
||||
daaWindow := make([]*externalapi.TrustedDataDataDAABlock, len(block.DAAWindow))
|
||||
for i, daaBlock := range block.DAAWindow {
|
||||
daaWindow[i] = &externalapi.TrustedDataDataDAAHeader{
|
||||
Header: BlockHeaderToDomainBlockHeader(&daaBlock.Block.Header),
|
||||
daaWindow[i] = &externalapi.TrustedDataDataDAABlock{
|
||||
Block: MsgBlockToDomainBlock(daaBlock.Block),
|
||||
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
@@ -454,27 +454,12 @@ func BlockWithTrustedDataToDomainBlockWithTrustedData(block *MsgBlockWithTrusted
|
||||
|
||||
return &externalapi.BlockWithTrustedData{
|
||||
Block: MsgBlockToDomainBlock(block.Block),
|
||||
DAAScore: block.DAAScore,
|
||||
DAAWindow: daaWindow,
|
||||
GHOSTDAGData: ghostdagData,
|
||||
}
|
||||
}
|
||||
|
||||
// TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader converts *TrustedDataDAAHeader to *externalapi.TrustedDataDataDAAHeader
|
||||
func TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(daaBlock *TrustedDataDAAHeader) *externalapi.TrustedDataDataDAAHeader {
|
||||
return &externalapi.TrustedDataDataDAAHeader{
|
||||
Header: BlockHeaderToDomainBlockHeader(daaBlock.Header),
|
||||
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
// GHOSTDAGHashPairToDomainGHOSTDAGHashPair converts *BlockGHOSTDAGDataHashPair to *externalapi.BlockGHOSTDAGDataHashPair
|
||||
func GHOSTDAGHashPairToDomainGHOSTDAGHashPair(datum *BlockGHOSTDAGDataHashPair) *externalapi.BlockGHOSTDAGDataHashPair {
|
||||
return &externalapi.BlockGHOSTDAGDataHashPair{
|
||||
Hash: datum.Hash,
|
||||
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(datum.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
func ghostdagDataToDomainGHOSTDAGData(data *BlockGHOSTDAGData) *externalapi.BlockGHOSTDAGData {
|
||||
bluesAnticoneSizes := make(map[externalapi.DomainHash]externalapi.KType, len(data.BluesAnticoneSizes))
|
||||
for _, pair := range data.BluesAnticoneSizes {
|
||||
@@ -515,9 +500,7 @@ func DomainBlockWithTrustedDataToBlockWithTrustedData(block *externalapi.BlockWi
|
||||
daaWindow := make([]*TrustedDataDataDAABlock, len(block.DAAWindow))
|
||||
for i, daaBlock := range block.DAAWindow {
|
||||
daaWindow[i] = &TrustedDataDataDAABlock{
|
||||
Block: &MsgBlock{
|
||||
Header: *DomainBlockHeaderToBlockHeader(daaBlock.Header),
|
||||
},
|
||||
Block: DomainBlockToMsgBlock(daaBlock.Block),
|
||||
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
@@ -532,41 +515,7 @@ func DomainBlockWithTrustedDataToBlockWithTrustedData(block *externalapi.BlockWi
|
||||
|
||||
return &MsgBlockWithTrustedData{
|
||||
Block: DomainBlockToMsgBlock(block.Block),
|
||||
DAAScore: block.Block.Header.DAAScore(),
|
||||
DAAWindow: daaWindow,
|
||||
GHOSTDAGData: ghostdagData,
|
||||
}
|
||||
}
|
||||
|
||||
// DomainBlockWithTrustedDataToBlockWithTrustedDataV4 converts a set of *externalapi.DomainBlock, daa window indices and ghostdag data indices
|
||||
// to *MsgBlockWithTrustedDataV4
|
||||
func DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block *externalapi.DomainBlock, daaWindowIndices, ghostdagDataIndices []uint64) *MsgBlockWithTrustedDataV4 {
|
||||
return &MsgBlockWithTrustedDataV4{
|
||||
Block: DomainBlockToMsgBlock(block),
|
||||
DAAWindowIndices: daaWindowIndices,
|
||||
GHOSTDAGDataIndices: ghostdagDataIndices,
|
||||
}
|
||||
}
|
||||
|
||||
// DomainTrustedDataToTrustedData converts *externalapi.BlockWithTrustedData to *MsgBlockWithTrustedData
|
||||
func DomainTrustedDataToTrustedData(domainDAAWindow []*externalapi.TrustedDataDataDAAHeader, domainGHOSTDAGData []*externalapi.BlockGHOSTDAGDataHashPair) *MsgTrustedData {
|
||||
daaWindow := make([]*TrustedDataDAAHeader, len(domainDAAWindow))
|
||||
for i, daaBlock := range domainDAAWindow {
|
||||
daaWindow[i] = &TrustedDataDAAHeader{
|
||||
Header: DomainBlockHeaderToBlockHeader(daaBlock.Header),
|
||||
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
ghostdagData := make([]*BlockGHOSTDAGDataHashPair, len(domainGHOSTDAGData))
|
||||
for i, datum := range domainGHOSTDAGData {
|
||||
ghostdagData[i] = &BlockGHOSTDAGDataHashPair{
|
||||
Hash: datum.Hash,
|
||||
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(datum.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
return &MsgTrustedData{
|
||||
DAAScore: block.DAAScore,
|
||||
DAAWindow: daaWindow,
|
||||
GHOSTDAGData: ghostdagData,
|
||||
}
|
||||
|
||||
@@ -38,10 +38,6 @@ type RPCError struct {
|
||||
Message string
|
||||
}
|
||||
|
||||
func (err RPCError) Error() string {
|
||||
return err.Message
|
||||
}
|
||||
|
||||
// RPCErrorf formats according to a format specifier and returns the string
|
||||
// as an RPCError.
|
||||
func RPCErrorf(format string, args ...interface{}) *RPCError {
|
||||
|
||||
@@ -66,13 +66,6 @@ const (
|
||||
CmdPruningPoints
|
||||
CmdRequestPruningPointProof
|
||||
CmdPruningPointProof
|
||||
CmdReady
|
||||
CmdTrustedData
|
||||
CmdBlockWithTrustedDataV4
|
||||
CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||
CmdRequestIBDChainBlockLocator
|
||||
CmdIBDChainBlockLocator
|
||||
CmdRequestAnticone
|
||||
|
||||
// rpc
|
||||
CmdGetCurrentNetworkRequestMessage
|
||||
@@ -131,8 +124,6 @@ const (
|
||||
CmdStopNotifyingUTXOsChangedResponseMessage
|
||||
CmdGetUTXOsByAddressesRequestMessage
|
||||
CmdGetUTXOsByAddressesResponseMessage
|
||||
CmdGetBalanceByAddressRequestMessage
|
||||
CmdGetBalanceByAddressResponseMessage
|
||||
CmdGetVirtualSelectedParentBlueScoreRequestMessage
|
||||
CmdGetVirtualSelectedParentBlueScoreResponseMessage
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
|
||||
@@ -154,11 +145,6 @@ const (
|
||||
CmdNotifyVirtualDaaScoreChangedRequestMessage
|
||||
CmdNotifyVirtualDaaScoreChangedResponseMessage
|
||||
CmdVirtualDaaScoreChangedNotificationMessage
|
||||
CmdGetBalancesByAddressesRequestMessage
|
||||
CmdGetBalancesByAddressesResponseMessage
|
||||
CmdNotifyNewBlockTemplateRequestMessage
|
||||
CmdNotifyNewBlockTemplateResponseMessage
|
||||
CmdNewBlockTemplateNotificationMessage
|
||||
)
|
||||
|
||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -199,13 +185,6 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdPruningPoints: "PruningPoints",
|
||||
CmdRequestPruningPointProof: "RequestPruningPointProof",
|
||||
CmdPruningPointProof: "PruningPointProof",
|
||||
CmdReady: "Ready",
|
||||
CmdTrustedData: "TrustedData",
|
||||
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
|
||||
CmdRequestNextPruningPointAndItsAnticoneBlocks: "RequestNextPruningPointAndItsAnticoneBlocks",
|
||||
CmdRequestIBDChainBlockLocator: "RequestIBDChainBlockLocator",
|
||||
CmdIBDChainBlockLocator: "IBDChainBlockLocator",
|
||||
CmdRequestAnticone: "RequestAnticone",
|
||||
}
|
||||
|
||||
// RPCMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -264,8 +243,6 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdStopNotifyingUTXOsChangedResponseMessage: "StopNotifyingUTXOsChangedResponse",
|
||||
CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest",
|
||||
CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse",
|
||||
CmdGetBalanceByAddressRequestMessage: "GetBalanceByAddressRequest",
|
||||
CmdGetBalanceByAddressResponseMessage: "GetBalancesByAddressResponse",
|
||||
CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest",
|
||||
CmdGetVirtualSelectedParentBlueScoreResponseMessage: "GetVirtualSelectedParentBlueScoreResponse",
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest",
|
||||
@@ -287,11 +264,6 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdNotifyVirtualDaaScoreChangedRequestMessage: "NotifyVirtualDaaScoreChangedRequest",
|
||||
CmdNotifyVirtualDaaScoreChangedResponseMessage: "NotifyVirtualDaaScoreChangedResponse",
|
||||
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
|
||||
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
|
||||
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
|
||||
CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest",
|
||||
CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse",
|
||||
CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification",
|
||||
}
|
||||
|
||||
// Message is an interface that describes a kaspa message. A type that
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
// TestBlock tests the MsgBlock API.
|
||||
func TestBlock(t *testing.T) {
|
||||
pver := uint32(4)
|
||||
pver := ProtocolVersion
|
||||
|
||||
// Block 1 header.
|
||||
parents := blockOne.Header.Parents
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// MsgBlockWithTrustedDataV4 represents a kaspa BlockWithTrustedDataV4 message
|
||||
type MsgBlockWithTrustedDataV4 struct {
|
||||
baseMessage
|
||||
|
||||
Block *MsgBlock
|
||||
DAAWindowIndices []uint64
|
||||
GHOSTDAGDataIndices []uint64
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgBlockWithTrustedDataV4) Command() MessageCommand {
|
||||
return CmdBlockWithTrustedDataV4
|
||||
}
|
||||
|
||||
// NewMsgBlockWithTrustedDataV4 returns a new MsgBlockWithTrustedDataV4.
|
||||
func NewMsgBlockWithTrustedDataV4() *MsgBlockWithTrustedDataV4 {
|
||||
return &MsgBlockWithTrustedDataV4{}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||
// locator message. It is used to find the blockLocator of a peer that is
|
||||
// syncing with you.
|
||||
type MsgIBDChainBlockLocator struct {
|
||||
baseMessage
|
||||
BlockLocatorHashes []*externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgIBDChainBlockLocator) Command() MessageCommand {
|
||||
return CmdIBDChainBlockLocator
|
||||
}
|
||||
|
||||
// NewMsgIBDChainBlockLocator returns a new kaspa locator message that conforms to
|
||||
// the Message interface. See MsgBlockLocator for details.
|
||||
func NewMsgIBDChainBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgIBDChainBlockLocator {
|
||||
return &MsgIBDChainBlockLocator{
|
||||
BlockLocatorHashes: locatorHashes,
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgRequestAnticone implements the Message interface and represents a kaspa
|
||||
// RequestHeaders message. It is used to request the set past(ContextHash) \cap anticone(BlockHash)
|
||||
type MsgRequestAnticone struct {
|
||||
baseMessage
|
||||
BlockHash *externalapi.DomainHash
|
||||
ContextHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestAnticone) Command() MessageCommand {
|
||||
return CmdRequestAnticone
|
||||
}
|
||||
|
||||
// NewMsgRequestAnticone returns a new kaspa RequestPastDiff message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgRequestAnticone(blockHash, contextHash *externalapi.DomainHash) *MsgRequestAnticone {
|
||||
return &MsgRequestAnticone{
|
||||
BlockHash: blockHash,
|
||||
ContextHash: contextHash,
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgRequestIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||
// IBDRequestChainBlockLocator message. It is used to request a block locator between low
|
||||
// and high hash.
|
||||
// The locator is returned via a locator message (MsgIBDChainBlockLocator).
|
||||
type MsgRequestIBDChainBlockLocator struct {
|
||||
baseMessage
|
||||
HighHash *externalapi.DomainHash
|
||||
LowHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestIBDChainBlockLocator) Command() MessageCommand {
|
||||
return CmdRequestIBDChainBlockLocator
|
||||
}
|
||||
|
||||
// NewMsgIBDRequestChainBlockLocator returns a new IBDRequestChainBlockLocator message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgIBDRequestChainBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestIBDChainBlockLocator {
|
||||
return &MsgRequestIBDChainBlockLocator{
|
||||
HighHash: highHash,
|
||||
LowHash: lowHash,
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// MsgRequestNextPruningPointAndItsAnticoneBlocks implements the Message interface and represents a kaspa
|
||||
// RequestNextPruningPointAndItsAnticoneBlocks message. It is used to notify the IBD syncer peer to send
|
||||
// more blocks from the pruning anticone.
|
||||
//
|
||||
// This message has no payload.
|
||||
type MsgRequestNextPruningPointAndItsAnticoneBlocks struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestNextPruningPointAndItsAnticoneBlocks) Command() MessageCommand {
|
||||
return CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||
}
|
||||
|
||||
// NewMsgRequestNextPruningPointAndItsAnticoneBlocks returns a new kaspa RequestNextPruningPointAndItsAnticoneBlocks message that conforms to the
|
||||
// Message interface.
|
||||
func NewMsgRequestNextPruningPointAndItsAnticoneBlocks() *MsgRequestNextPruningPointAndItsAnticoneBlocks {
|
||||
return &MsgRequestNextPruningPointAndItsAnticoneBlocks{}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// MsgTrustedData represents a kaspa TrustedData message
|
||||
type MsgTrustedData struct {
|
||||
baseMessage
|
||||
|
||||
DAAWindow []*TrustedDataDAAHeader
|
||||
GHOSTDAGData []*BlockGHOSTDAGDataHashPair
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgTrustedData) Command() MessageCommand {
|
||||
return CmdTrustedData
|
||||
}
|
||||
|
||||
// NewMsgTrustedData returns a new MsgTrustedData.
|
||||
func NewMsgTrustedData() *MsgTrustedData {
|
||||
return &MsgTrustedData{}
|
||||
}
|
||||
|
||||
// TrustedDataDAAHeader is an appmessage representation of externalapi.TrustedDataDataDAAHeader
|
||||
type TrustedDataDAAHeader struct {
|
||||
Header *MsgBlockHeader
|
||||
GHOSTDAGData *BlockGHOSTDAGData
|
||||
}
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
// TestTx tests the MsgTx API.
|
||||
func TestTx(t *testing.T) {
|
||||
pver := uint32(4)
|
||||
pver := ProtocolVersion
|
||||
|
||||
txIDStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
|
||||
txID, err := transactionid.FromString(txIDStr)
|
||||
|
||||
@@ -82,12 +82,12 @@ func (msg *MsgVersion) Command() MessageCommand {
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
|
||||
subnetworkID *externalapi.DomainSubnetworkID, protocolVersion uint32) *MsgVersion {
|
||||
subnetworkID *externalapi.DomainSubnetworkID) *MsgVersion {
|
||||
|
||||
// Limit the timestamp to one millisecond precision since the protocol
|
||||
// doesn't support better.
|
||||
return &MsgVersion{
|
||||
ProtocolVersion: protocolVersion,
|
||||
ProtocolVersion: ProtocolVersion,
|
||||
Network: network,
|
||||
Services: 0,
|
||||
Timestamp: mstime.Now(),
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
// TestVersion tests the MsgVersion API.
|
||||
func TestVersion(t *testing.T) {
|
||||
pver := uint32(4)
|
||||
pver := ProtocolVersion
|
||||
|
||||
// Create version message data.
|
||||
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
|
||||
@@ -26,7 +26,7 @@ func TestVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure we get the correct data back out.
|
||||
msg := NewMsgVersion(me, generatedID, "mainnet", nil, 4)
|
||||
msg := NewMsgVersion(me, generatedID, "mainnet", nil)
|
||||
if msg.ProtocolVersion != pver {
|
||||
t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v",
|
||||
msg.ProtocolVersion, pver)
|
||||
|
||||
@@ -5,9 +5,8 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"net"
|
||||
)
|
||||
|
||||
// NetAddress defines information about a peer on the network including the time
|
||||
@@ -58,7 +57,3 @@ func NewNetAddressTimestamp(
|
||||
func NewNetAddress(addr *net.TCPAddr) *NetAddress {
|
||||
return NewNetAddressIPPort(addr.IP, uint16(addr.Port))
|
||||
}
|
||||
|
||||
func (na NetAddress) String() string {
|
||||
return na.TCPAddress().String()
|
||||
}
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// MsgReady implements the Message interface and represents a kaspa
|
||||
// Ready message. It is used to notify that the peer is ready to receive
|
||||
// messages.
|
||||
//
|
||||
// This message has no payload.
|
||||
type MsgReady struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgReady) Command() MessageCommand {
|
||||
return CmdReady
|
||||
}
|
||||
|
||||
// NewMsgReady returns a new kaspa Ready message that conforms to the
|
||||
// Message interface.
|
||||
func NewMsgReady() *MsgReady {
|
||||
return &MsgReady{}
|
||||
}
|
||||
@@ -11,6 +11,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// ProtocolVersion is the latest protocol version this package supports.
|
||||
ProtocolVersion uint32 = 3
|
||||
|
||||
// DefaultServices describes the default services that are supported by
|
||||
// the server.
|
||||
DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// GetBalanceByAddressRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetBalanceByAddressRequestMessage struct {
|
||||
baseMessage
|
||||
Address string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetBalanceByAddressRequestMessage) Command() MessageCommand {
|
||||
return CmdGetBalanceByAddressRequestMessage
|
||||
}
|
||||
|
||||
// NewGetBalanceByAddressRequest returns a instance of the message
|
||||
func NewGetBalanceByAddressRequest(address string) *GetBalanceByAddressRequestMessage {
|
||||
return &GetBalanceByAddressRequestMessage{
|
||||
Address: address,
|
||||
}
|
||||
}
|
||||
|
||||
// GetBalanceByAddressResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetBalanceByAddressResponseMessage struct {
|
||||
baseMessage
|
||||
Balance uint64
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetBalanceByAddressResponseMessage) Command() MessageCommand {
|
||||
return CmdGetBalanceByAddressResponseMessage
|
||||
}
|
||||
|
||||
// NewGetBalanceByAddressResponse returns an instance of the message
|
||||
func NewGetBalanceByAddressResponse(Balance uint64) *GetBalanceByAddressResponseMessage {
|
||||
return &GetBalanceByAddressResponseMessage{
|
||||
Balance: Balance,
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// GetBalancesByAddressesRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetBalancesByAddressesRequestMessage struct {
|
||||
baseMessage
|
||||
Addresses []string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetBalancesByAddressesRequestMessage) Command() MessageCommand {
|
||||
return CmdGetBalancesByAddressesRequestMessage
|
||||
}
|
||||
|
||||
// NewGetBalancesByAddressesRequest returns a instance of the message
|
||||
func NewGetBalancesByAddressesRequest(addresses []string) *GetBalancesByAddressesRequestMessage {
|
||||
return &GetBalancesByAddressesRequestMessage{
|
||||
Addresses: addresses,
|
||||
}
|
||||
}
|
||||
|
||||
// BalancesByAddressesEntry represents the balance of some address
|
||||
type BalancesByAddressesEntry struct {
|
||||
Address string
|
||||
Balance uint64
|
||||
}
|
||||
|
||||
// GetBalancesByAddressesResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetBalancesByAddressesResponseMessage struct {
|
||||
baseMessage
|
||||
Entries []*BalancesByAddressesEntry
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetBalancesByAddressesResponseMessage) Command() MessageCommand {
|
||||
return CmdGetBalancesByAddressesResponseMessage
|
||||
}
|
||||
|
||||
// NewGetBalancesByAddressesResponse returns an instance of the message
|
||||
func NewGetBalancesByAddressesResponse(entries []*BalancesByAddressesEntry) *GetBalancesByAddressesResponseMessage {
|
||||
return &GetBalancesByAddressesResponseMessage{
|
||||
Entries: entries,
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@ package appmessage
|
||||
type GetBlockTemplateRequestMessage struct {
|
||||
baseMessage
|
||||
PayAddress string
|
||||
ExtraData string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -14,10 +13,9 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlockTemplateRequestMessage returns a instance of the message
|
||||
func NewGetBlockTemplateRequestMessage(payAddress, extraData string) *GetBlockTemplateRequestMessage {
|
||||
func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
|
||||
return &GetBlockTemplateRequestMessage{
|
||||
PayAddress: payAddress,
|
||||
ExtraData: extraData,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// NotifyNewBlockTemplateRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyNewBlockTemplateRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyNewBlockTemplateRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyNewBlockTemplateRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyNewBlockTemplateRequestMessage returns an instance of the message
|
||||
func NewNotifyNewBlockTemplateRequestMessage() *NotifyNewBlockTemplateRequestMessage {
|
||||
return &NotifyNewBlockTemplateRequestMessage{}
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplateResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyNewBlockTemplateResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyNewBlockTemplateResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyNewBlockTemplateResponseMessage
|
||||
}
|
||||
|
||||
// NewNotifyNewBlockTemplateResponseMessage returns an instance of the message
|
||||
func NewNotifyNewBlockTemplateResponseMessage() *NotifyNewBlockTemplateResponseMessage {
|
||||
return &NotifyNewBlockTemplateResponseMessage{}
|
||||
}
|
||||
|
||||
// NewBlockTemplateNotificationMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NewBlockTemplateNotificationMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NewBlockTemplateNotificationMessage) Command() MessageCommand {
|
||||
return CmdNewBlockTemplateNotificationMessage
|
||||
}
|
||||
|
||||
// NewNewBlockTemplateNotificationMessage returns an instance of the message
|
||||
func NewNewBlockTemplateNotificationMessage() *NewBlockTemplateNotificationMessage {
|
||||
return &NewBlockTemplateNotificationMessage{}
|
||||
}
|
||||
@@ -4,8 +4,7 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type SubmitBlockRequestMessage struct {
|
||||
baseMessage
|
||||
Block *RPCBlock
|
||||
AllowNonDAABlocks bool
|
||||
Block *RPCBlock
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -14,10 +13,9 @@ func (msg *SubmitBlockRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewSubmitBlockRequestMessage returns a instance of the message
|
||||
func NewSubmitBlockRequestMessage(block *RPCBlock, allowNonDAABlocks bool) *SubmitBlockRequestMessage {
|
||||
func NewSubmitBlockRequestMessage(block *RPCBlock) *SubmitBlockRequestMessage {
|
||||
return &SubmitBlockRequestMessage{
|
||||
Block: block,
|
||||
AllowNonDAABlocks: allowNonDAABlocks,
|
||||
Block: block,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,14 +90,11 @@ type RPCBlockLevelParents struct {
|
||||
|
||||
// RPCBlockVerboseData holds verbose data about a block
|
||||
type RPCBlockVerboseData struct {
|
||||
Hash string
|
||||
Difficulty float64
|
||||
SelectedParentHash string
|
||||
TransactionIDs []string
|
||||
IsHeaderOnly bool
|
||||
BlueScore uint64
|
||||
ChildrenHashes []string
|
||||
MergeSetBluesHashes []string
|
||||
MergeSetRedsHashes []string
|
||||
IsChainBlock bool
|
||||
Hash string
|
||||
Difficulty float64
|
||||
SelectedParentHash string
|
||||
TransactionIDs []string
|
||||
IsHeaderOnly bool
|
||||
BlueScore uint64
|
||||
ChildrenHashes []string
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
|
||||
|
||||
var utxoIndex *utxoindex.UTXOIndex
|
||||
if cfg.UTXOIndex {
|
||||
utxoIndex, err = utxoindex.New(domain, db)
|
||||
utxoIndex, err = utxoindex.New(domain.Consensus(), db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -152,8 +152,6 @@ func setupRPC(
|
||||
utxoIndex,
|
||||
shutDownChan,
|
||||
)
|
||||
protocolManager.SetOnVirtualChange(rpcManager.NotifyVirtualChange)
|
||||
protocolManager.SetOnNewBlockTemplateHandler(rpcManager.NotifyNewBlockTemplate)
|
||||
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
|
||||
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -14,14 +12,3 @@ const DefaultTimeout = 120 * time.Second
|
||||
|
||||
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
|
||||
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")
|
||||
|
||||
type flowExecuteFunc func(peer *peerpkg.Peer)
|
||||
|
||||
// Flow is a a data structure that is used in order to associate a p2p flow to some route in a router.
|
||||
type Flow struct {
|
||||
Name string
|
||||
ExecuteFunc flowExecuteFunc
|
||||
}
|
||||
|
||||
// FlowInitializeFunc is a function that is used in order to initialize a flow
|
||||
type FlowInitializeFunc func(route *routerpkg.Route, peer *peerpkg.Peer) error
|
||||
|
||||
@@ -11,17 +11,18 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
||||
)
|
||||
|
||||
// OnNewBlock updates the mempool after a new block arrival, and
|
||||
// relays newly unorphaned transactions and possibly rebroadcast
|
||||
// manually added transactions when not in IBD.
|
||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
blockInsertionResult *externalapi.BlockInsertionResult) error {
|
||||
|
||||
hash := consensushashing.BlockHash(block)
|
||||
log.Tracef("OnNewBlock start for block %s", hash)
|
||||
defer log.Tracef("OnNewBlock end for block %s", hash)
|
||||
log.Debugf("OnNewBlock start for block %s", hash)
|
||||
defer log.Debugf("OnNewBlock end for block %s", hash)
|
||||
|
||||
unorphaningResults, err := f.UnorphanBlocks(block)
|
||||
if err != nil {
|
||||
@@ -31,10 +32,10 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
|
||||
|
||||
newBlocks := []*externalapi.DomainBlock{block}
|
||||
newVirtualChangeSets := []*externalapi.VirtualChangeSet{virtualChangeSet}
|
||||
newBlockInsertionResults := []*externalapi.BlockInsertionResult{blockInsertionResult}
|
||||
for _, unorphaningResult := range unorphaningResults {
|
||||
newBlocks = append(newBlocks, unorphaningResult.block)
|
||||
newVirtualChangeSets = append(newVirtualChangeSets, unorphaningResult.virtualChangeSet)
|
||||
newBlockInsertionResults = append(newBlockInsertionResults, unorphaningResult.blockInsertionResult)
|
||||
}
|
||||
|
||||
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
|
||||
@@ -48,8 +49,8 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
|
||||
if f.onBlockAddedToDAGHandler != nil {
|
||||
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
|
||||
virtualChangeSet = newVirtualChangeSets[i]
|
||||
err := f.onBlockAddedToDAGHandler(newBlock, virtualChangeSet)
|
||||
blockInsertionResult = newBlockInsertionResults[i]
|
||||
err := f.onBlockAddedToDAGHandler(newBlock, blockInsertionResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -59,24 +60,6 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
|
||||
}
|
||||
|
||||
// OnVirtualChange calls the handler function whenever the virtual block changes.
|
||||
func (f *FlowContext) OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
if f.onVirtualChangeHandler != nil && virtualChangeSet != nil {
|
||||
return f.onVirtualChangeHandler(virtualChangeSet)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnNewBlockTemplate calls the handler function whenever a new block template is available for miners.
|
||||
func (f *FlowContext) OnNewBlockTemplate() error {
|
||||
if f.onNewBlockTemplateHandler != nil {
|
||||
return f.onNewBlockTemplateHandler()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
|
||||
@@ -117,7 +100,7 @@ func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
||||
|
||||
// SharedRequestedBlocks returns a *blockrelay.SharedRequestedBlocks for sharing
|
||||
// data about requested blocks between different peers.
|
||||
func (f *FlowContext) SharedRequestedBlocks() *SharedRequestedBlocks {
|
||||
func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks {
|
||||
return f.sharedRequestedBlocks
|
||||
}
|
||||
|
||||
@@ -127,18 +110,14 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
||||
return protocolerrors.Errorf(false, "cannot add header only block")
|
||||
}
|
||||
|
||||
virtualChangeSet, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
blockInsertionResult, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = f.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.OnNewBlock(block, virtualChangeSet)
|
||||
err = f.OnNewBlock(block, blockInsertionResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -163,7 +142,7 @@ func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
||||
return false
|
||||
}
|
||||
f.ibdPeer = ibdPeer
|
||||
log.Infof("IBD started with peer %s", ibdPeer)
|
||||
log.Infof("IBD started")
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package flowcontext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
@@ -10,11 +9,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPingTimeout signifies that a ping operation timed out.
|
||||
ErrPingTimeout = protocolerrors.New(false, "timeout expired on ping")
|
||||
)
|
||||
|
||||
// HandleError handles an error from a flow,
|
||||
// It sends the error to errChan if isStopping == 0 and increments isStopping
|
||||
//
|
||||
@@ -27,15 +21,8 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||
panic(err)
|
||||
}
|
||||
if errors.Is(err, ErrPingTimeout) {
|
||||
// Avoid printing the call stack on ping timeouts, since users get panicked and this case is not interesting
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
} else {
|
||||
// Explain to the user that this is not a panic, but only a protocol error with a specific peer
|
||||
logFrame := strings.Repeat("=", 52)
|
||||
log.Errorf("Non-critical peer protocol error from %s, printing the full stack for debug purposes: \n%s\n%+v \n%s",
|
||||
flowName, logFrame, err, logFrame)
|
||||
}
|
||||
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
}
|
||||
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
@@ -20,13 +22,7 @@ import (
|
||||
|
||||
// OnBlockAddedToDAGHandler is a handler function that's triggered
|
||||
// when a block is added to the DAG
|
||||
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
|
||||
// OnVirtualChangeHandler is a handler function that's triggered when the virtual changes
|
||||
type OnVirtualChangeHandler func(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
|
||||
// OnNewBlockTemplateHandler is a handler function that's triggered when a new block template is available
|
||||
type OnNewBlockTemplateHandler func() error
|
||||
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
|
||||
|
||||
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
@@ -47,17 +43,14 @@ type FlowContext struct {
|
||||
|
||||
timeStarted int64
|
||||
|
||||
onVirtualChangeHandler OnVirtualChangeHandler
|
||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
||||
onNewBlockTemplateHandler OnNewBlockTemplateHandler
|
||||
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||
|
||||
expectedDAAWindowDurationInMilliseconds int64
|
||||
lastRebroadcastTime time.Time
|
||||
sharedRequestedTransactions *SharedRequestedTransactions
|
||||
lastRebroadcastTime time.Time
|
||||
sharedRequestedTransactions *transactionrelay.SharedRequestedTransactions
|
||||
|
||||
sharedRequestedBlocks *SharedRequestedBlocks
|
||||
sharedRequestedBlocks *blockrelay.SharedRequestedBlocks
|
||||
|
||||
ibdPeer *peerpkg.Peer
|
||||
ibdPeerMutex sync.RWMutex
|
||||
@@ -85,16 +78,14 @@ func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanage
|
||||
domain: domain,
|
||||
addressManager: addressManager,
|
||||
connectionManager: connectionManager,
|
||||
sharedRequestedTransactions: NewSharedRequestedTransactions(),
|
||||
sharedRequestedBlocks: NewSharedRequestedBlocks(),
|
||||
sharedRequestedTransactions: transactionrelay.NewSharedRequestedTransactions(),
|
||||
sharedRequestedBlocks: blockrelay.NewSharedRequestedBlocks(),
|
||||
peers: make(map[id.ID]*peerpkg.Peer),
|
||||
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
||||
timeStarted: mstime.Now().UnixMilliseconds(),
|
||||
transactionIDsToPropagate: []*externalapi.DomainTransactionID{},
|
||||
lastTransactionIDPropagationTime: time.Now(),
|
||||
shutdownChan: make(chan struct{}),
|
||||
expectedDAAWindowDurationInMilliseconds: cfg.NetParams().TargetTimePerBlock.Milliseconds() *
|
||||
int64(cfg.NetParams().DifficultyAdjustmentWindowSize),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,21 +100,11 @@ func (f *FlowContext) ShutdownChan() <-chan struct{} {
|
||||
return f.shutdownChan
|
||||
}
|
||||
|
||||
// SetOnVirtualChangeHandler sets the onVirtualChangeHandler handler
|
||||
func (f *FlowContext) SetOnVirtualChangeHandler(onVirtualChangeHandler OnVirtualChangeHandler) {
|
||||
f.onVirtualChangeHandler = onVirtualChangeHandler
|
||||
}
|
||||
|
||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
||||
func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
|
||||
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
|
||||
}
|
||||
|
||||
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplateHandler handler
|
||||
func (f *FlowContext) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler OnNewBlockTemplateHandler) {
|
||||
f.onNewBlockTemplateHandler = onNewBlockTemplateHandler
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
||||
func (f *FlowContext) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler) {
|
||||
f.onPruningPointUTXOSetOverrideHandler = onPruningPointUTXOSetOverrideHandler
|
||||
|
||||
@@ -17,8 +17,8 @@ const maxOrphans = 600
|
||||
|
||||
// UnorphaningResult is the result of unorphaning a block
|
||||
type UnorphaningResult struct {
|
||||
block *externalapi.DomainBlock
|
||||
virtualChangeSet *externalapi.VirtualChangeSet
|
||||
block *externalapi.DomainBlock
|
||||
blockInsertionResult *externalapi.BlockInsertionResult
|
||||
}
|
||||
|
||||
// AddOrphan adds the block to the orphan set
|
||||
@@ -90,14 +90,14 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
||||
}
|
||||
}
|
||||
if canBeUnorphaned {
|
||||
virtualChangeSet, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||
blockInsertionResult, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if unorphaningSucceeded {
|
||||
unorphaningResults = append(unorphaningResults, &UnorphaningResult{
|
||||
block: orphanBlock,
|
||||
virtualChangeSet: virtualChangeSet,
|
||||
block: orphanBlock,
|
||||
blockInsertionResult: blockInsertionResult,
|
||||
})
|
||||
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
|
||||
}
|
||||
@@ -143,14 +143,14 @@ func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash)
|
||||
return childOrphans
|
||||
}
|
||||
|
||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.VirtualChangeSet, bool, error) {
|
||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.BlockInsertionResult, bool, error) {
|
||||
orphanBlock, ok := f.orphans[orphanHash]
|
||||
if !ok {
|
||||
return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
||||
}
|
||||
delete(f.orphans, orphanHash)
|
||||
|
||||
virtualChangeSet, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
||||
blockInsertionResult, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
|
||||
@@ -160,7 +160,7 @@ func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externa
|
||||
}
|
||||
|
||||
log.Infof("Unorphaned block %s", orphanHash)
|
||||
return virtualChangeSet, true, nil
|
||||
return blockInsertionResult, true, nil
|
||||
}
|
||||
|
||||
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
|
||||
|
||||
@@ -2,12 +2,21 @@ package flowcontext
|
||||
|
||||
import "github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
// IsNearlySynced returns whether this node is considered synced or close to being synced. This info
|
||||
// is used to determine if it's ok to use a block template from this node for mining purposes.
|
||||
func (f *FlowContext) IsNearlySynced() (bool, error) {
|
||||
const (
|
||||
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
|
||||
)
|
||||
|
||||
// ShouldMine returns whether it's ok to use block template from this node
|
||||
// for mining purposes.
|
||||
func (f *FlowContext) ShouldMine() (bool, error) {
|
||||
peers := f.Peers()
|
||||
if len(peers) == 0 {
|
||||
log.Debugf("The node is not connected to peers, so IsNearlySynced returns false")
|
||||
log.Debugf("The node is not connected, so ShouldMine returns false")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if f.IsIBDRunning() {
|
||||
log.Debugf("IBD is running, so ShouldMine returns false")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -16,25 +25,19 @@ func (f *FlowContext) IsNearlySynced() (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if virtualSelectedParent.Equal(f.Config().NetParams().GenesisHash) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
now := mstime.Now().UnixMilliseconds()
|
||||
// As a heuristic, we allow the node to mine if he is likely to be within the current DAA window of fully synced nodes.
|
||||
// Such blocks contribute to security by maintaining the current difficulty despite possibly being slightly out of sync.
|
||||
if now-virtualSelectedParentHeader.TimeInMilliseconds() < f.expectedDAAWindowDurationInMilliseconds {
|
||||
log.Debugf("The selected tip timestamp is recent (%d), so IsNearlySynced returns true",
|
||||
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
|
||||
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
|
||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log.Debugf("The selected tip timestamp is old (%d), so IsNearlySynced returns false",
|
||||
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
|
||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
)
|
||||
@@ -29,7 +30,7 @@ func (f *FlowContext) shouldRebroadcastTransactions() bool {
|
||||
|
||||
// SharedRequestedTransactions returns a *transactionrelay.SharedRequestedTransactions for sharing
|
||||
// data about requested transactions between different peers.
|
||||
func (f *FlowContext) SharedRequestedTransactions() *SharedRequestedTransactions {
|
||||
func (f *FlowContext) SharedRequestedTransactions() *transactionrelay.SharedRequestedTransactions {
|
||||
return f.sharedRequestedTransactions
|
||||
}
|
||||
|
||||
|
||||
@@ -13,21 +13,15 @@ func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.Domai
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||
case *appmessage.MsgBlockLocator:
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
||||
}
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgBlockLocator, ok := message.(*appmessage.MsgBlockLocator)
|
||||
if !ok {
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
||||
}
|
||||
return msgBlockLocator.BlockLocatorHashes, nil
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found (v5)", hash)
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
@@ -0,0 +1,95 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
|
||||
type PruningPointAndItsAnticoneRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
var isBusy uint32
|
||||
|
||||
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
|
||||
// the pruning point and its anticone to the requesting peer.
|
||||
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
err := func() error {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
|
||||
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
|
||||
}
|
||||
defer atomic.StoreUint32(&isBusy, 0)
|
||||
|
||||
log.Debugf("Got request for pruning point and its anticone from %s", peer)
|
||||
|
||||
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
|
||||
for i, header := range pruningPointHeaders {
|
||||
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, blockHash := range pointAndItsAnticone {
|
||||
err := sendBlockWithTrustedData(context, outgoingRoute, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sent pruning point and its anticone to %s", peer)
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sendBlockWithTrustedData(context PruningPointAndItsAnticoneRequestsContext, outgoingRoute *router.Route, blockHash *externalapi.DomainHash) error {
|
||||
blockWithTrustedData, err := context.Domain().Consensus().BlockWithTrustedData(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(blockWithTrustedData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -3,15 +3,12 @@ package blockrelay
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
@@ -26,30 +23,24 @@ var orphanResolutionRange uint32 = 5
|
||||
type RelayInvsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnNewBlockTemplate() error
|
||||
OnNewBlock(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
||||
SharedRequestedBlocks() *SharedRequestedBlocks
|
||||
Broadcast(message appmessage.Message) error
|
||||
AddOrphan(orphanBlock *externalapi.DomainBlock)
|
||||
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
|
||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||
UnsetIBDRunning()
|
||||
IsRecoverableError(err error) bool
|
||||
IsNearlySynced() (bool, error)
|
||||
}
|
||||
|
||||
type invRelayBlock struct {
|
||||
Hash *externalapi.DomainHash
|
||||
IsOrphanRoot bool
|
||||
}
|
||||
|
||||
type handleRelayInvsFlow struct {
|
||||
RelayInvsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
invsQueue []invRelayBlock
|
||||
invsQueue []*appmessage.MsgInvRelayBlock
|
||||
}
|
||||
|
||||
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
||||
@@ -62,12 +53,9 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
invsQueue: make([]invRelayBlock, 0),
|
||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
||||
}
|
||||
err := flow.start()
|
||||
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
||||
close(peer.IBDRequestChannel())
|
||||
return err
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) start() error {
|
||||
@@ -93,18 +81,7 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsOrphan(inv.Hash) {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
|
||||
err := flow.AddOrphanRootsToQueue(inv.Hash)
|
||||
if err != nil {
|
||||
@@ -113,16 +90,10 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Block relay is disabled if the node is already during IBD AND considered out of sync
|
||||
// Block relay is disabled during IBD
|
||||
if flow.IsIBDRunning() {
|
||||
isNearlySynced, err := flow.IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isNearlySynced {
|
||||
log.Debugf("Got block %s while in IBD and the node is out of sync. Continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Requesting block %s", inv.Hash)
|
||||
@@ -140,41 +111,8 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) {
|
||||
log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block))
|
||||
continue
|
||||
}
|
||||
|
||||
// Note we do not apply the heuristic below if inv was queued as an orphan root, since
|
||||
// that means the process started by a proper and relevant relay block
|
||||
if !inv.IsOrphanRoot {
|
||||
// Check bounded merge depth to avoid requesting irrelevant data which cannot be merged under virtual
|
||||
virtualMergeDepthRoot, err := flow.Domain().Consensus().VirtualMergeDepthRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !virtualMergeDepthRoot.Equal(model.VirtualGenesisBlockHash) {
|
||||
mergeDepthRootHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualMergeDepthRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Since `BlueWork` respects topology, this condition means that the relay
|
||||
// block is not in the future of virtual's merge depth root, and thus cannot be merged unless
|
||||
// other valid blocks Kosherize it, in which case it will be obtained once the merger is relayed
|
||||
if block.Header.BlueWork().Cmp(mergeDepthRootHeader.BlueWork()) <= 0 {
|
||||
log.Debugf("Block %s has lower blue work than virtual's merge root %s (%d <= %d), hence we are skipping it",
|
||||
inv.Hash, virtualMergeDepthRoot, block.Header.BlueWork(), mergeDepthRootHeader.BlueWork())
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
||||
missingParents, blockInsertionResult, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||
@@ -196,44 +134,13 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
oldVirtualParents := hashset.New()
|
||||
for _, parent := range oldVirtualInfo.ParentHashes {
|
||||
oldVirtualParents.Add(parent)
|
||||
}
|
||||
|
||||
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
log.Debugf("Relaying block %s", inv.Hash)
|
||||
err = flow.relayBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualHasNewParents := false
|
||||
for _, parent := range newVirtualInfo.ParentHashes {
|
||||
if oldVirtualParents.Contains(parent) {
|
||||
continue
|
||||
}
|
||||
virtualHasNewParents = true
|
||||
block, err := flow.Domain().Consensus().GetBlock(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("Relaying block %s", blockHash)
|
||||
err = flow.relayBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if virtualHasNewParents {
|
||||
log.Debugf("Virtual %d has new parents, raising new block template event", newVirtualInfo.DAAScore)
|
||||
err = flow.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
err = flow.OnNewBlock(block, blockInsertionResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -249,35 +156,35 @@ func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.Domai
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (invRelayBlock, error) {
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv invRelayBlock
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return invRelayBlock{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgInv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
if !ok {
|
||||
return invRelayBlock{}, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
"expecting an inv message", msg.Command())
|
||||
}
|
||||
return invRelayBlock{Hash: msgInv.Hash, IsOrphanRoot: false}, nil
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||
exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash)
|
||||
exists := flow.SharedRequestedBlocks().addIfNotExists(requestHash)
|
||||
if exists {
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
|
||||
// clean from any pending blocks.
|
||||
defer flow.SharedRequestedBlocks().Remove(requestHash)
|
||||
defer flow.SharedRequestedBlocks().remove(requestHash)
|
||||
|
||||
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
|
||||
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
|
||||
@@ -311,7 +218,7 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
case *appmessage.MsgBlock:
|
||||
return message, nil
|
||||
default:
|
||||
@@ -320,9 +227,9 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.BlockInsertionResult, error) {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
@@ -332,13 +239,10 @@ func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([
|
||||
if errors.As(err, missingParentsError) {
|
||||
return missingParentsError.MissingParentHashes, nil, nil
|
||||
}
|
||||
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
|
||||
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
}
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
return nil, virtualChangeSet, nil
|
||||
return nil, blockInsertionResult, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
||||
@@ -361,19 +265,6 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) e
|
||||
return err
|
||||
}
|
||||
if isBlockInOrphanResolutionRange {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", blockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Block %s is within orphan resolution range. "+
|
||||
"Adding it to the orphan set", blockHash)
|
||||
flow.AddOrphan(block)
|
||||
@@ -384,28 +275,7 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) e
|
||||
// Start IBD unless we already are in IBD
|
||||
log.Debugf("Block %s is out of orphan resolution range. "+
|
||||
"Attempting to start IBD against it.", blockHash)
|
||||
|
||||
// Send the block to IBD flow via the IBDRequestChannel.
|
||||
// Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it
|
||||
select {
|
||||
case flow.peer.IBDRequestChannel() <- block:
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool {
|
||||
parents := block.Header.DirectParents()
|
||||
return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash)
|
||||
return flow.runIBDIfNotRunning(block)
|
||||
}
|
||||
|
||||
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
|
||||
@@ -446,16 +316,12 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
|
||||
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
||||
}
|
||||
|
||||
if len(orphanRoots) == 0 {
|
||||
// In some rare cases we get here when there are no orphan roots already
|
||||
return nil
|
||||
}
|
||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
||||
|
||||
invMessages := make([]invRelayBlock, len(orphanRoots))
|
||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
||||
for i, root := range orphanRoots {
|
||||
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
||||
invMessages[i] = invRelayBlock{Hash: root, IsOrphanRoot: true}
|
||||
invMessages[i] = appmessage.NewMsgInvBlock(root)
|
||||
}
|
||||
|
||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
||||
@@ -10,9 +10,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
|
||||
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
|
||||
const ibdBatchSize = 99
|
||||
const ibdBatchSize = router.DefaultMaxMessages
|
||||
|
||||
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestHeadersContext interface {
|
||||
@@ -44,16 +42,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
|
||||
isLowSelectedAncestorOfHigh, err := flow.Domain().Consensus().IsInSelectedParentChainOf(lowHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isLowSelectedAncestorOfHigh {
|
||||
return protocolerrors.Errorf(true, "Expected %s to be on the selected chain of %s",
|
||||
lowHash, highHash)
|
||||
}
|
||||
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
|
||||
for !lowHash.Equal(highHash) {
|
||||
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
||||
492
app/protocol/flows/blockrelay/ibd.go
Normal file
492
app/protocol/flows/blockrelay/ibd.go
Normal file
@@ -0,0 +1,492 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (flow *handleRelayInvsFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
||||
if !wasIBDNotRunning {
|
||||
log.Debugf("IBD is already running")
|
||||
return nil
|
||||
}
|
||||
|
||||
isFinishedSuccessfully := false
|
||||
defer func() {
|
||||
flow.UnsetIBDRunning()
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
}()
|
||||
|
||||
highHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
log.Debugf("Syncing blocks up to %s", highHash)
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !shouldSync {
|
||||
return nil
|
||||
}
|
||||
|
||||
if shouldDownloadHeadersProof {
|
||||
log.Infof("Starting IBD with headers proof")
|
||||
err := flow.ibdWithHeadersProof(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncMissingBlockBodies(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Finished syncing blocks up to %s", highHash)
|
||||
isFinishedSuccessfully = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
||||
successString := "successfully"
|
||||
if !isFinishedSuccessfully {
|
||||
successString = "(interrupted)"
|
||||
}
|
||||
log.Infof("IBD finished %s", successString)
|
||||
}
|
||||
|
||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(
|
||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
for {
|
||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !highestHashFound {
|
||||
return nil, false, nil
|
||||
}
|
||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if highestHashIndex == 0 ||
|
||||
// If the block locator contains only two adjacent chain blocks, the
|
||||
// syncer will always find the same highest chain block, so to avoid
|
||||
// an endless loop, we explicitly stop the loop in such situation.
|
||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
||||
|
||||
return highestHash, true, nil
|
||||
}
|
||||
|
||||
locatorHashAboveHighestHash := highestHash
|
||||
if highestHashIndex > 0 {
|
||||
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
|
||||
}
|
||||
|
||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
if err != nil {
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
|
||||
"restarting with full block locator")
|
||||
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return blockLocator, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) findHighestHashIndex(
|
||||
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
|
||||
|
||||
highestHashIndex := 0
|
||||
highestHashIndexFound := false
|
||||
for i, blockLocatorHash := range blockLocator {
|
||||
if highestHash.Equal(blockLocatorHash) {
|
||||
highestHashIndex = i
|
||||
highestHashIndexFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !highestHashIndexFound {
|
||||
return 0, protocolerrors.Errorf(true, "highest hash %s "+
|
||||
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
|
||||
}
|
||||
log.Debugf("The index of the highest hash in the original "+
|
||||
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
|
||||
|
||||
return highestHashIndex, nil
|
||||
}
|
||||
|
||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleRelayInvsFlow) fetchHighestHash(
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
||||
highestHash := message.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, true, nil
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
||||
return nil, false, nil
|
||||
default:
|
||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash) error {
|
||||
|
||||
log.Infof("Downloading headers from %s", flow.peer)
|
||||
|
||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Keep a short queue of BlockHeadersMessages so that there's
|
||||
// never a moment when the node is not validating and inserting
|
||||
// headers
|
||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
||||
errChan := make(chan error)
|
||||
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
|
||||
for {
|
||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if doneIBD {
|
||||
close(blockHeadersMessageChan)
|
||||
return
|
||||
}
|
||||
|
||||
blockHeadersMessageChan <- blockHeadersMessage
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
// If the highHash has not been received, the peer is misbehaving
|
||||
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !highHashBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"highHash block %s from peer %s during block download", highHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case err := <-errChan:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.BlockHeadersMessage:
|
||||
return message, false, nil
|
||||
case *appmessage.MsgDoneHeaders:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s",
|
||||
appmessage.CmdBlockHeaders,
|
||||
appmessage.CmdDoneHeaders,
|
||||
message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||
block := &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: nil,
|
||||
}
|
||||
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
||||
} else {
|
||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) validatePruningPointFutureHeaderTimestamps() error {
|
||||
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
|
||||
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
|
||||
"tip is smaller than the current selected tip")
|
||||
}
|
||||
|
||||
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
|
||||
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
|
||||
return protocolerrors.Errorf(false, "difference between the timestamps of "+
|
||||
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveAndInsertPruningPointUTXOSet(
|
||||
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
||||
defer onEnd()
|
||||
|
||||
receivedChunkCount := 0
|
||||
receivedUTXOCount := 0
|
||||
for {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgPruningPointUTXOSetChunk:
|
||||
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
||||
domainOutpointAndUTXOEntryPairs :=
|
||||
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
||||
|
||||
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
receivedChunkCount++
|
||||
if receivedChunkCount%ibdBatchSize == 0 {
|
||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
receivedChunkCount, receivedUTXOCount)
|
||||
|
||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
||||
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
||||
return true, nil
|
||||
|
||||
case *appmessage.MsgUnexpectedPruningPoint:
|
||||
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
|
||||
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
||||
return false, nil
|
||||
|
||||
default:
|
||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(hashes) == 0 {
|
||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
||||
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
||||
log.Debugf("No missing block body hashes found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
||||
} else {
|
||||
hashesToRequest = hashes[offset:]
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, expectedHash := range hashesToRequest {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if !expectedHash.Equal(blockHash) {
|
||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||
continue
|
||||
}
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block, blockInsertionResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return flow.Domain().Consensus().ResolveVirtual()
|
||||
}
|
||||
|
||||
// dequeueIncomingMessageAndSkipInvs is a convenience method to be used during
|
||||
// IBD. Inv messages are expected to arrive at any given moment, but should be
|
||||
// ignored while we're in IBD
|
||||
func (flow *handleRelayInvsFlow) dequeueIncomingMessageAndSkipInvs(timeout time.Duration) (appmessage.Message, error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := message.(*appmessage.MsgInvRelayBlock); !ok {
|
||||
return message, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,23 +9,20 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
err := flow.Domain().InitStagingConsensusWithoutGenesis()
|
||||
func (flow *handleRelayInvsFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash) error {
|
||||
err := flow.Domain().InitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(syncerHeaderSelectedTipHash, relayBlockHash, highBlockDAAScore)
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(highHash)
|
||||
if err != nil {
|
||||
if !flow.IsRecoverableError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus.", flow.peer)
|
||||
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
||||
if deleteStagingConsensusErr != nil {
|
||||
return deleteStagingConsensusErr
|
||||
@@ -34,44 +31,19 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
|
||||
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
|
||||
err = flow.Domain().CommitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnPruningPointUTXOSetOverride()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
|
||||
relayBlock *externalapi.DomainBlock,
|
||||
highestKnownSyncerChainHash *externalapi.DomainHash) (shouldDownload, shouldSync bool, err error) {
|
||||
func (flow *handleRelayInvsFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *externalapi.DomainBlock,
|
||||
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
|
||||
|
||||
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
|
||||
if highestKnownSyncerChainHash != nil {
|
||||
highestSharedBlockFound = true
|
||||
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
isPruningPointInSharedBlockChain, err = flow.Domain().Consensus().IsInSelectedParentChainOf(
|
||||
pruningPoint, highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
// Note: in the case where `highestSharedBlockFound == true && isPruningPointInSharedBlockChain == false`
|
||||
// we might have here info which is relevant to finality conflict decisions. This should be taken into
|
||||
// account when we improve this aspect.
|
||||
if !highestSharedBlockFound || !isPruningPointInSharedBlockChain {
|
||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock)
|
||||
if !highestSharedBlockFound {
|
||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
@@ -86,7 +58,7 @@ func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
|
||||
func (flow *handleRelayInvsFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock *externalapi.DomainBlock) (bool, error) {
|
||||
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -97,20 +69,20 @@ func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruni
|
||||
return false, err
|
||||
}
|
||||
|
||||
if relayBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
if highBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return relayBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
||||
return highBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
||||
func (flow *handleRelayInvsFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
||||
log.Infof("Downloading the pruning point proof from %s", flow.peer)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointProof())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(10 * time.Minute)
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -136,10 +108,7 @@ func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.Doma
|
||||
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash,
|
||||
highBlockDAAScore uint64) error {
|
||||
|
||||
func (flow *handleRelayInvsFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash) error {
|
||||
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -156,20 +125,19 @@ func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
|
||||
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(),
|
||||
syncerHeaderSelectedTipHash, proofPruningPoint, relayBlockHash, highBlockDAAScore)
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Headers downloaded from peer %s", flow.peer)
|
||||
|
||||
relayBlockInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(relayBlockHash)
|
||||
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !relayBlockInfo.Exists {
|
||||
if !highHashInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
|
||||
}
|
||||
|
||||
@@ -191,7 +159,7 @@ func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error {
|
||||
func (flow *handleRelayInvsFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error {
|
||||
log.Infof("Downloading the past pruning points and the pruning point anticone from %s", flow.peer)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone())
|
||||
if err != nil {
|
||||
@@ -203,17 +171,6 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
|
||||
return err
|
||||
}
|
||||
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgTrustedData, ok := message.(*appmessage.MsgTrustedData)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdTrustedData, message.Command())
|
||||
}
|
||||
|
||||
pruningPointWithMetaData, done, err := flow.receiveBlockWithTrustedData()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -227,13 +184,12 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
|
||||
return protocolerrors.Errorf(true, "first block with trusted data is not the pruning point")
|
||||
}
|
||||
|
||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData, msgTrustedData)
|
||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
for {
|
||||
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -243,55 +199,31 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
|
||||
break
|
||||
}
|
||||
|
||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData, msgTrustedData)
|
||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We're using i+2 because we want to check if the next block will belong to the next batch, but we already downloaded
|
||||
// the pruning point outside the loop so we use i+2 instead of i+1.
|
||||
if (i+2)%ibdBatchSize == 0 {
|
||||
log.Infof("Downloaded %d blocks from the pruning point anticone", i+1)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextPruningPointAndItsAnticoneBlocks())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Finished downloading pruning point and its anticone from %s. Total blocks downloaded: %d", flow.peer, i+1)
|
||||
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processBlockWithTrustedData(
|
||||
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedDataV4, data *appmessage.MsgTrustedData) error {
|
||||
func (flow *handleRelayInvsFlow) processBlockWithTrustedData(
|
||||
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedData) error {
|
||||
|
||||
blockWithTrustedData := &externalapi.BlockWithTrustedData{
|
||||
Block: appmessage.MsgBlockToDomainBlock(block.Block),
|
||||
DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(block.DAAWindowIndices)),
|
||||
GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(block.GHOSTDAGDataIndices)),
|
||||
}
|
||||
|
||||
for _, index := range block.DAAWindowIndices {
|
||||
blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, appmessage.TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(data.DAAWindow[index]))
|
||||
}
|
||||
|
||||
for _, index := range block.GHOSTDAGDataIndices {
|
||||
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
|
||||
}
|
||||
|
||||
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||
_, err := consensus.ValidateAndInsertBlockWithTrustedData(appmessage.BlockWithTrustedDataToDomainBlockWithTrustedData(block), false)
|
||||
return err
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
func (flow *handleRelayInvsFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedData, bool, error) {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
switch downCastedMessage := message.(type) {
|
||||
case *appmessage.MsgBlockWithTrustedDataV4:
|
||||
case *appmessage.MsgBlockWithTrustedData:
|
||||
return downCastedMessage, false, nil
|
||||
case *appmessage.MsgDoneBlocksWithTrustedData:
|
||||
return nil, true, nil
|
||||
@@ -305,8 +237,8 @@ func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWi
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
func (flow *handleRelayInvsFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -321,7 +253,7 @@ func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints,
|
||||
return msgPruningPoints, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error {
|
||||
func (flow *handleRelayInvsFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error {
|
||||
currentPruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -365,7 +297,7 @@ func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *ext
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus,
|
||||
func (flow *handleRelayInvsFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus,
|
||||
pruningPoint *externalapi.DomainHash) (bool, error) {
|
||||
|
||||
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint)
|
||||
@@ -381,7 +313,6 @@ func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consens
|
||||
log.Info("Fetching the pruning point UTXO set")
|
||||
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
||||
if err != nil {
|
||||
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -394,7 +325,7 @@ func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consens
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
|
||||
func (flow *handleRelayInvsFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
|
||||
defer func() {
|
||||
err := flow.Domain().StagingConsensus().ClearImportedPruningPointData()
|
||||
if err != nil {
|
||||
@@ -424,5 +355,10 @@ func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus,
|
||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
|
||||
}
|
||||
|
||||
err = flow.OnPruningPointUTXOSetOverride()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package flowcontext
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"sync"
|
||||
@@ -13,15 +13,13 @@ type SharedRequestedBlocks struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// Remove removes a block from the set.
|
||||
func (s *SharedRequestedBlocks) Remove(hash *externalapi.DomainHash) {
|
||||
func (s *SharedRequestedBlocks) remove(hash *externalapi.DomainHash) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
delete(s.blocks, *hash)
|
||||
}
|
||||
|
||||
// RemoveSet removes a set of blocks from the set.
|
||||
func (s *SharedRequestedBlocks) RemoveSet(blockHashes map[externalapi.DomainHash]struct{}) {
|
||||
func (s *SharedRequestedBlocks) removeSet(blockHashes map[externalapi.DomainHash]struct{}) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for hash := range blockHashes {
|
||||
@@ -29,8 +27,7 @@ func (s *SharedRequestedBlocks) RemoveSet(blockHashes map[externalapi.DomainHash
|
||||
}
|
||||
}
|
||||
|
||||
// AddIfNotExists adds a block to the set if it doesn't exist yet.
|
||||
func (s *SharedRequestedBlocks) AddIfNotExists(hash *externalapi.DomainHash) (exists bool) {
|
||||
func (s *SharedRequestedBlocks) addIfNotExists(hash *externalapi.DomainHash) (exists bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
_, ok := s.blocks[*hash]
|
||||
@@ -28,7 +28,7 @@ type HandleHandshakeContext interface {
|
||||
HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error)
|
||||
}
|
||||
|
||||
// HandleHandshake sets up the new_handshake protocol - It sends a version message and waits for an incoming
|
||||
// HandleHandshake sets up the handshake protocol - It sends a version message and waits for an incoming
|
||||
// version message, as well as a verack for the sent version
|
||||
func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.NetConnection,
|
||||
receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
|
||||
@@ -98,7 +98,7 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
|
||||
}
|
||||
|
||||
// Handshake is different from other flows, since in it should forward router.ErrRouteClosed to errChan
|
||||
// Therefore we implement a separate handleError for new_handshake
|
||||
// Therefore we implement a separate handleError for handshake
|
||||
func handleError(err error, flowName string, isStopping *uint32, errChan chan error) {
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -18,9 +17,7 @@ var (
|
||||
|
||||
// minAcceptableProtocolVersion is the lowest protocol version that a
|
||||
// connected peer may support.
|
||||
minAcceptableProtocolVersion = uint32(5)
|
||||
|
||||
maxAcceptableProtocolVersion = uint32(5)
|
||||
minAcceptableProtocolVersion = appmessage.ProtocolVersion
|
||||
)
|
||||
|
||||
type receiveVersionFlow struct {
|
||||
@@ -100,12 +97,7 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
|
||||
return nil, protocolerrors.New(false, "incompatible subnetworks")
|
||||
}
|
||||
|
||||
if flow.Config().ProtocolVersion > maxAcceptableProtocolVersion {
|
||||
return nil, errors.Errorf("%d is a non existing protocol version", flow.Config().ProtocolVersion)
|
||||
}
|
||||
|
||||
maxProtocolVersion := flow.Config().ProtocolVersion
|
||||
flow.peer.UpdateFieldsFromMsgVersion(msgVersion, maxProtocolVersion)
|
||||
flow.peer.UpdateFieldsFromMsgVersion(msgVersion)
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgVerAck())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -57,18 +56,15 @@ func (flow *sendVersionFlow) start() error {
|
||||
// Version message.
|
||||
localAddress := flow.AddressManager().BestLocalAddress(flow.peer.Connection().NetAddress())
|
||||
subnetworkID := flow.Config().SubnetworkID
|
||||
if flow.Config().ProtocolVersion < minAcceptableProtocolVersion {
|
||||
return errors.Errorf("configured protocol version %d is obsolete", flow.Config().ProtocolVersion)
|
||||
}
|
||||
msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(),
|
||||
flow.Config().ActiveNetParams.Name, subnetworkID, flow.Config().ProtocolVersion)
|
||||
flow.Config().ActiveNetParams.Name, subnetworkID)
|
||||
msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...)
|
||||
|
||||
// Advertise the services flag
|
||||
msg.Services = defaultServices
|
||||
|
||||
// Advertise our max supported protocol version.
|
||||
msg.ProtocolVersion = flow.Config().ProtocolVersion
|
||||
msg.ProtocolVersion = appmessage.ProtocolVersion
|
||||
|
||||
// Advertise if inv messages for transactions are desired.
|
||||
msg.DisableRelayTx = flow.Config().BlocksOnly
|
||||
|
||||
@@ -2,8 +2,6 @@ package ping
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
@@ -63,9 +61,6 @@ func (flow *sendPingsFlow) start() error {
|
||||
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
if errors.Is(err, router.ErrTimeout) {
|
||||
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
pongMessage := message.(*appmessage.MsgPong)
|
||||
@@ -1,9 +0,0 @@
|
||||
package ready
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
@@ -1,56 +0,0 @@
|
||||
package ready
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"sync/atomic"
|
||||
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleReady notify the other peer that peer is ready for messages, and wait for the other peer
|
||||
// to send a ready message before start running the flows.
|
||||
func HandleReady(incomingRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
|
||||
peer *peerpkg.Peer,
|
||||
) error {
|
||||
|
||||
log.Debugf("Sending ready message to %s", peer)
|
||||
|
||||
isStopping := uint32(0)
|
||||
err := outgoingRoute.Enqueue(appmessage.NewMsgReady())
|
||||
if err != nil {
|
||||
return handleError(err, "HandleReady", &isStopping)
|
||||
}
|
||||
|
||||
_, err = incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return handleError(err, "HandleReady", &isStopping)
|
||||
}
|
||||
|
||||
log.Debugf("Got ready message from %s", peer)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ready is different from other flows, since in it should forward router.ErrRouteClosed to errChan
|
||||
// Therefore we implement a separate handleError for 'ready'
|
||||
func handleError(err error, flowName string, isStopping *uint32) error {
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
log.Errorf("Ready protocol error from %s: %s", flowName, err)
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/addressexchange"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
@@ -3,7 +3,6 @@ package transactionrelay
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@@ -19,10 +18,9 @@ import (
|
||||
type TransactionsRelayContext interface {
|
||||
NetAdapter() *netadapter.NetAdapter
|
||||
Domain() domain.Domain
|
||||
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
|
||||
SharedRequestedTransactions() *SharedRequestedTransactions
|
||||
OnTransactionAddedToMempool()
|
||||
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
|
||||
IsNearlySynced() (bool, error)
|
||||
}
|
||||
|
||||
type handleRelayedTransactionsFlow struct {
|
||||
@@ -50,15 +48,6 @@ func (flow *handleRelayedTransactionsFlow) start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
isNearlySynced, err := flow.IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Transaction relay is disabled if the node is out of sync and thus not mining
|
||||
if !isNearlySynced {
|
||||
continue
|
||||
}
|
||||
|
||||
requestedIDs, err := flow.requestInvTransactions(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -79,7 +68,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
|
||||
if flow.isKnownTransaction(txID) {
|
||||
continue
|
||||
}
|
||||
exists := flow.SharedRequestedTransactions().AddIfNotExists(txID)
|
||||
exists := flow.SharedRequestedTransactions().addIfNotExists(txID)
|
||||
if exists {
|
||||
continue
|
||||
}
|
||||
@@ -93,7 +82,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
|
||||
msgGetTransactions := appmessage.NewMsgRequestTransactions(idsToRequest)
|
||||
err = flow.outgoingRoute.Enqueue(msgGetTransactions)
|
||||
if err != nil {
|
||||
flow.SharedRequestedTransactions().RemoveMany(idsToRequest)
|
||||
flow.SharedRequestedTransactions().removeMany(idsToRequest)
|
||||
return nil, err
|
||||
}
|
||||
return idsToRequest, nil
|
||||
@@ -162,7 +151,7 @@ func (flow *handleRelayedTransactionsFlow) readMsgTxOrNotFound() (
|
||||
func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransactions []*externalapi.DomainTransactionID) error {
|
||||
// In case the function returns earlier than expected, we want to make sure sharedRequestedTransactions is
|
||||
// clean from any pending transactions.
|
||||
defer flow.SharedRequestedTransactions().RemoveMany(requestedTransactions)
|
||||
defer flow.SharedRequestedTransactions().removeMany(requestedTransactions)
|
||||
for _, expectedID := range requestedTransactions {
|
||||
msgTx, msgTxNotFound, err := flow.readMsgTxOrNotFound()
|
||||
if err != nil {
|
||||
@@ -2,11 +2,10 @@ package transactionrelay_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
@@ -25,7 +24,7 @@ import (
|
||||
type mocTransactionsRelayContext struct {
|
||||
netAdapter *netadapter.NetAdapter
|
||||
domain domain.Domain
|
||||
sharedRequestedTransactions *flowcontext.SharedRequestedTransactions
|
||||
sharedRequestedTransactions *transactionrelay.SharedRequestedTransactions
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) NetAdapter() *netadapter.NetAdapter {
|
||||
@@ -36,7 +35,7 @@ func (m *mocTransactionsRelayContext) Domain() domain.Domain {
|
||||
return m.domain
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions {
|
||||
func (m *mocTransactionsRelayContext) SharedRequestedTransactions() *transactionrelay.SharedRequestedTransactions {
|
||||
return m.sharedRequestedTransactions
|
||||
}
|
||||
|
||||
@@ -47,10 +46,6 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
|
||||
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) IsNearlySynced() (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
|
||||
// have the requested transactions in the mempool.
|
||||
func TestHandleRelayedTransactionsNotFound(t *testing.T) {
|
||||
@@ -65,7 +60,7 @@ func TestHandleRelayedTransactionsNotFound(t *testing.T) {
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
|
||||
sharedRequestedTransactions := transactionrelay.NewSharedRequestedTransactions()
|
||||
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a NetAdapter: %v", err)
|
||||
@@ -158,7 +153,7 @@ func TestOnClosedIncomingRoute(t *testing.T) {
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
|
||||
sharedRequestedTransactions := transactionrelay.NewSharedRequestedTransactions()
|
||||
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to creat a NetAdapter : %v", err)
|
||||
@@ -1,11 +1,10 @@
|
||||
package transactionrelay_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@@ -32,7 +31,7 @@ func TestHandleRequestedTransactionsNotFound(t *testing.T) {
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
|
||||
sharedRequestedTransactions := transactionrelay.NewSharedRequestedTransactions()
|
||||
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a NetAdapter: %v", err)
|
||||
@@ -1,4 +1,4 @@
|
||||
package flowcontext
|
||||
package transactionrelay
|
||||
|
||||
import (
|
||||
"sync"
|
||||
@@ -13,15 +13,13 @@ type SharedRequestedTransactions struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// Remove removes a transaction from the set.
|
||||
func (s *SharedRequestedTransactions) Remove(txID *externalapi.DomainTransactionID) {
|
||||
func (s *SharedRequestedTransactions) remove(txID *externalapi.DomainTransactionID) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
delete(s.transactions, *txID)
|
||||
}
|
||||
|
||||
// RemoveMany removes a set of transactions from the set.
|
||||
func (s *SharedRequestedTransactions) RemoveMany(txIDs []*externalapi.DomainTransactionID) {
|
||||
func (s *SharedRequestedTransactions) removeMany(txIDs []*externalapi.DomainTransactionID) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for _, txID := range txIDs {
|
||||
@@ -29,8 +27,7 @@ func (s *SharedRequestedTransactions) RemoveMany(txIDs []*externalapi.DomainTran
|
||||
}
|
||||
}
|
||||
|
||||
// AddIfNotExists adds a transaction to the set if it doesn't exist yet.
|
||||
func (s *SharedRequestedTransactions) AddIfNotExists(txID *externalapi.DomainTransactionID) (exists bool) {
|
||||
func (s *SharedRequestedTransactions) addIfNotExists(txID *externalapi.DomainTransactionID) (exists bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
_, ok := s.transactions[*txID]
|
||||
@@ -1,16 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
|
||||
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
|
||||
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
|
||||
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
|
||||
if ibdBatchSize >= router.DefaultMaxMessages {
|
||||
t.Fatalf("IBD batch size (%d) must be smaller than router.DefaultMaxMessages (%d)",
|
||||
ibdBatchSize, router.DefaultMaxMessages)
|
||||
}
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RequestIBDChainBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
|
||||
type RequestIBDChainBlockLocatorContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestIBDChainBlockLocatorFlow struct {
|
||||
RequestIBDChainBlockLocatorContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestIBDChainBlockLocator handles getBlockLocator messages
|
||||
func HandleRequestIBDChainBlockLocator(context RequestIBDChainBlockLocatorContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestIBDChainBlockLocatorFlow{
|
||||
RequestIBDChainBlockLocatorContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) start() error {
|
||||
for {
|
||||
highHash, lowHash, err := flow.receiveRequestIBDChainBlockLocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received getIBDChainBlockLocator with highHash: %s, lowHash: %s", highHash, lowHash)
|
||||
|
||||
var locator externalapi.BlockLocator
|
||||
if highHash == nil || lowHash == nil {
|
||||
locator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
} else {
|
||||
locator, err = flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
// The chain has been modified, signal it by sending an empty locator
|
||||
locator, err = externalapi.BlockLocator{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Debugf("Received error from CreateHeadersSelectedChainBlockLocator: %s", err)
|
||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||
"locator between %s and %s", lowHash, highHash)
|
||||
}
|
||||
|
||||
err = flow.sendIBDChainBlockLocator(locator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) receiveRequestIBDChainBlockLocator() (highHash, lowHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgGetBlockLocator := message.(*appmessage.MsgRequestIBDChainBlockLocator)
|
||||
|
||||
return msgGetBlockLocator.HighHash, msgGetBlockLocator.LowHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) sendIBDChainBlockLocator(locator externalapi.BlockLocator) error {
|
||||
msgIBDChainBlockLocator := appmessage.NewMsgIBDChainBlockLocator(locator)
|
||||
err := flow.outgoingRoute.Enqueue(msgIBDChainBlockLocator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
|
||||
type PruningPointAndItsAnticoneRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
var isBusy uint32
|
||||
|
||||
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
|
||||
// the pruning point and its anticone to the requesting peer.
|
||||
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
err := func() error {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
|
||||
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
|
||||
}
|
||||
defer atomic.StoreUint32(&isBusy, 0)
|
||||
|
||||
log.Debugf("Got request for pruning point and its anticone from %s", peer)
|
||||
|
||||
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
|
||||
for i, header := range pruningPointHeaders {
|
||||
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
windowSize := context.Config().NetParams().DifficultyAdjustmentWindowSize
|
||||
daaWindowBlocks := make([]*externalapi.TrustedDataDataDAAHeader, 0, windowSize)
|
||||
daaWindowHashesToIndex := make(map[externalapi.DomainHash]int, windowSize)
|
||||
trustedDataDAABlockIndexes := make(map[externalapi.DomainHash][]uint64)
|
||||
|
||||
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, 0)
|
||||
ghostdagDataHashToIndex := make(map[externalapi.DomainHash]int)
|
||||
trustedDataGHOSTDAGDataIndexes := make(map[externalapi.DomainHash][]uint64)
|
||||
for _, blockHash := range pointAndItsAnticone {
|
||||
blockDAAWindowHashes, err := context.Domain().Consensus().BlockDAAWindowHashes(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trustedDataDAABlockIndexes[*blockHash] = make([]uint64, 0, windowSize)
|
||||
for i, daaBlockHash := range blockDAAWindowHashes {
|
||||
index, exists := daaWindowHashesToIndex[*daaBlockHash]
|
||||
if !exists {
|
||||
trustedDataDataDAAHeader, err := context.Domain().Consensus().TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daaWindowBlocks = append(daaWindowBlocks, trustedDataDataDAAHeader)
|
||||
index = len(daaWindowBlocks) - 1
|
||||
daaWindowHashesToIndex[*daaBlockHash] = index
|
||||
}
|
||||
|
||||
trustedDataDAABlockIndexes[*blockHash] = append(trustedDataDAABlockIndexes[*blockHash], uint64(index))
|
||||
}
|
||||
|
||||
ghostdagDataBlockHashes, err := context.Domain().Consensus().TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trustedDataGHOSTDAGDataIndexes[*blockHash] = make([]uint64, 0, context.Config().NetParams().K)
|
||||
for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes {
|
||||
index, exists := ghostdagDataHashToIndex[*ghostdagDataBlockHash]
|
||||
if !exists {
|
||||
data, err := context.Domain().Consensus().TrustedGHOSTDAGData(ghostdagDataBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ghostdagData = append(ghostdagData, &externalapi.BlockGHOSTDAGDataHashPair{
|
||||
Hash: ghostdagDataBlockHash,
|
||||
GHOSTDAGData: data,
|
||||
})
|
||||
index = len(ghostdagData) - 1
|
||||
ghostdagDataHashToIndex[*ghostdagDataBlockHash] = index
|
||||
}
|
||||
|
||||
trustedDataGHOSTDAGDataIndexes[*blockHash] = append(trustedDataGHOSTDAGDataIndexes[*blockHash], uint64(index))
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainTrustedDataToTrustedData(daaWindowBlocks, ghostdagData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, blockHash := range pointAndItsAnticone {
|
||||
block, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if (i+1)%ibdBatchSize == 0 {
|
||||
// No timeout here, as we don't care if the syncee takes its time computing,
|
||||
// since it only blocks this dedicated flow
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := message.(*appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks); !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks, message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sent pruning point and its anticone to %s", peer)
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// RequestAnticoneContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestAnticoneContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
type handleRequestAnticoneFlow struct {
|
||||
RequestAnticoneContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peer.Peer
|
||||
}
|
||||
|
||||
// HandleRequestAnticone handles RequestAnticone messages
|
||||
func HandleRequestAnticone(context RequestAnticoneContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
flow := &handleRequestAnticoneFlow{
|
||||
RequestAnticoneContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestAnticoneFlow) start() error {
|
||||
for {
|
||||
blockHash, contextHash, err := receiveRequestAnticone(flow.incomingRoute)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received requestAnticone with blockHash: %s, contextHash: %s", blockHash, contextHash)
|
||||
log.Debugf("Getting past(%s) cap anticone(%s) for peer %s", contextHash, blockHash, flow.peer)
|
||||
|
||||
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
|
||||
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
|
||||
// we relay blocks only if they enter virtual's mergeset. We add 2 for a small margin error.
|
||||
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
|
||||
flow.Config().ActiveNetParams.MergeSetSizeLimit+2)
|
||||
if err != nil {
|
||||
return protocolerrors.Wrap(true, err, "Failed querying anticone")
|
||||
}
|
||||
log.Debugf("Got %d header hashes in past(%s) cap anticone(%s)", len(blockHashes), contextHash, blockHash)
|
||||
|
||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
|
||||
}
|
||||
|
||||
// We sort the headers in bottom-up topological order before sending
|
||||
sort.Slice(blockHeaders, func(i, j int) bool {
|
||||
return blockHeaders[i].BlueWork.Cmp(blockHeaders[j].BlueWork) < 0
|
||||
})
|
||||
|
||||
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
|
||||
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func receiveRequestAnticone(incomingRoute *router.Route) (blockHash *externalapi.DomainHash,
|
||||
contextHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgRequestAnticone := message.(*appmessage.MsgRequestAnticone)
|
||||
|
||||
return msgRequestAnticone.BlockHash, msgRequestAnticone.ContextHash, nil
|
||||
}
|
||||
@@ -1,728 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// IBDContext is the interface for the context needed for the HandleIBD flow.
|
||||
type IBDContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnNewBlockTemplate() error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||
UnsetIBDRunning()
|
||||
IsRecoverableError(err error) bool
|
||||
}
|
||||
|
||||
type handleIBDFlow struct {
|
||||
IBDContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
}
|
||||
|
||||
// HandleIBD handles IBD
|
||||
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||
peer *peerpkg.Peer) error {
|
||||
|
||||
flow := &handleIBDFlow{
|
||||
IBDContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) start() error {
|
||||
for {
|
||||
// Wait for IBD requests triggered by other flows
|
||||
block, ok := <-flow.peer.IBDRequestChannel()
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
err := flow.runIBDIfNotRunning(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
||||
if !wasIBDNotRunning {
|
||||
log.Debugf("IBD is already running")
|
||||
return nil
|
||||
}
|
||||
|
||||
isFinishedSuccessfully := false
|
||||
defer func() {
|
||||
flow.UnsetIBDRunning()
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
}()
|
||||
|
||||
relayBlockHash := consensushashing.BlockHash(block)
|
||||
|
||||
log.Debugf("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
|
||||
log.Debugf("Syncing blocks up to %s", relayBlockHash)
|
||||
log.Debugf("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
|
||||
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(
|
||||
block, highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !shouldSync {
|
||||
return nil
|
||||
}
|
||||
|
||||
if shouldDownloadHeadersProof {
|
||||
log.Infof("Starting IBD with headers proof")
|
||||
err := flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", relayBlockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(
|
||||
flow.Domain().Consensus(),
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We start by syncing missing bodies over the syncer selected chain
|
||||
err = flow.syncMissingBlockBodies(syncerHeaderSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relayBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Relay block might be in the anticone of syncer selected tip, thus
|
||||
// check his chain for missing bodies as well.
|
||||
// Note: this operation can be slightly optimized to avoid the full chain search since relay block
|
||||
// is in syncer virtual mergeset which has bounded size.
|
||||
if relayBlockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
err = flow.syncMissingBlockBodies(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Finished syncing blocks up to %s", relayBlockHash)
|
||||
isFinishedSuccessfully = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) {
|
||||
/*
|
||||
Algorithm:
|
||||
Request full selected chain block locator from syncer
|
||||
Find the highest block which we know
|
||||
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
|
||||
*/
|
||||
|
||||
// Empty hashes indicate that the full chain is queried
|
||||
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer,
|
||||
len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
syncerHeaderSelectedTipHash := locatorHashes[0]
|
||||
var highestKnownSyncerChainHash *externalapi.DomainHash
|
||||
chainNegotiationRestartCounter := 0
|
||||
chainNegotiationZoomCounts := 0
|
||||
initialLocatorLen := len(locatorHashes)
|
||||
for {
|
||||
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
|
||||
for _, syncerChainHash := range locatorHashes {
|
||||
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if info.Exists {
|
||||
currentHighestKnownSyncerChainHash = syncerChainHash
|
||||
break
|
||||
}
|
||||
lowestUnknownSyncerChainHash = syncerChainHash
|
||||
}
|
||||
// No unknown blocks, break. Note this can only happen in the first iteration
|
||||
if lowestUnknownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// No shared block, break
|
||||
if currentHighestKnownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = nil
|
||||
break
|
||||
}
|
||||
// No point in zooming further
|
||||
if len(locatorHashes) == 1 {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// Zoom in
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(
|
||||
lowestUnknownSyncerChainHash,
|
||||
currentHighestKnownSyncerChainHash, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) > 0 {
|
||||
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
|
||||
!locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+
|
||||
"hashes to match the locator bounds")
|
||||
}
|
||||
|
||||
chainNegotiationZoomCounts++
|
||||
log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
if len(locatorHashes) == 2 {
|
||||
// We found our search target
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
|
||||
if chainNegotiationZoomCounts > initialLocatorLen*2 {
|
||||
// Since the zoom-in always queries two consecutive entries in the previous locator, it is
|
||||
// expected to decrease in size at least every two iterations
|
||||
return nil, nil, protocolerrors.Errorf(true,
|
||||
"IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d",
|
||||
chainNegotiationZoomCounts, initialLocatorLen)
|
||||
}
|
||||
|
||||
} else { // Empty locator signals a restart due to chain changes
|
||||
chainNegotiationZoomCounts = 0
|
||||
chainNegotiationRestartCounter++
|
||||
if chainNegotiationRestartCounter > 32 {
|
||||
return nil, nil, protocolerrors.Errorf(false,
|
||||
"IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
|
||||
}
|
||||
log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter)
|
||||
|
||||
// An empty locator signals that the syncer chain was modified and no longer contains one of
|
||||
// the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
initialLocatorLen = len(locatorHashes)
|
||||
// Reset syncer's header selected tip
|
||||
syncerHeaderSelectedTipHash = locatorHashes[0]
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Found highest known syncer chain block %s from peer %s",
|
||||
highestKnownSyncerChainHash, flow.peer)
|
||||
|
||||
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
||||
successString := "successfully"
|
||||
if !isFinishedSuccessfully {
|
||||
successString = "(interrupted)"
|
||||
}
|
||||
log.Infof("IBD with peer %s finished %s", flow.peer, successString)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) getSyncerChainBlockLocator(
|
||||
highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) {
|
||||
|
||||
requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash)
|
||||
err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDChainBlockLocator:
|
||||
if len(message.BlockLocatorHashes) > 64 {
|
||||
return nil, protocolerrors.Errorf(true,
|
||||
"Got block locator of size %d>64 while expecting locator to have size "+
|
||||
"which is logarithmic in DAG size (which should never exceed 2^64)",
|
||||
len(message.BlockLocatorHashes))
|
||||
}
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDChainBlockLocator, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus,
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash *externalapi.DomainHash,
|
||||
highBlockDAAScoreHint uint64) error {
|
||||
|
||||
log.Infof("Downloading headers from %s", flow.peer)
|
||||
|
||||
if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) {
|
||||
// No need to get syncer selected tip headers, so sync relay past and return
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
|
||||
err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScoreHint, "block headers")
|
||||
|
||||
// Keep a short queue of BlockHeadersMessages so that there's
|
||||
// never a moment when the node is not validating and inserting
|
||||
// headers
|
||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
||||
errChan := make(chan error)
|
||||
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
|
||||
for {
|
||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if doneIBD {
|
||||
close(blockHeadersMessageChan)
|
||||
return
|
||||
}
|
||||
if len(blockHeadersMessage.BlockHeaders) == 0 {
|
||||
// The syncer should have sent a done message if the search completed, and not an empty list
|
||||
errChan <- protocolerrors.Errorf(true, "Received an empty headers message from peer %s", flow.peer)
|
||||
return
|
||||
}
|
||||
|
||||
blockHeadersMessageChan <- blockHeadersMessage
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1]
|
||||
progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore)
|
||||
case err := <-errChan:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
// Finished downloading syncer selected tip blocks,
|
||||
// check if we already have the triggering relayBlockHash
|
||||
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
// Send a special header request for the selected tip anticone. This is expected to
|
||||
// be a small set, as it is bounded to the size of virtual's mergeset.
|
||||
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
_, anticoneDone, err = flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
for _, header := range anticoneHeadersMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the relayBlockHash has still not been received, the peer is misbehaving
|
||||
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestAnticone(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
|
||||
msgRequestAnticone := appmessage.NewMsgRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
return flow.outgoingRoute.Enqueue(msgRequestAnticone)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(
|
||||
highestKnownSyncerChainHash, syncerHeaderSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgRequestHeaders := appmessage.NewMsgRequstHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgRequestHeaders)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.BlockHeadersMessage:
|
||||
return message, false, nil
|
||||
case *appmessage.MsgDoneHeaders:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s",
|
||||
appmessage.CmdBlockHeaders,
|
||||
appmessage.CmdDoneHeaders,
|
||||
message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||
block := &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: nil,
|
||||
}
|
||||
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
||||
} else {
|
||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
|
||||
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
|
||||
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
|
||||
"tip is smaller than the current selected tip")
|
||||
}
|
||||
|
||||
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
|
||||
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
|
||||
return protocolerrors.Errorf(false, "difference between the timestamps of "+
|
||||
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
||||
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
||||
defer onEnd()
|
||||
|
||||
receivedChunkCount := 0
|
||||
receivedUTXOCount := 0
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgPruningPointUTXOSetChunk:
|
||||
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
||||
domainOutpointAndUTXOEntryPairs :=
|
||||
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
||||
|
||||
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
receivedChunkCount++
|
||||
if receivedChunkCount%ibdBatchSize == 0 {
|
||||
log.Infof("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
receivedChunkCount, receivedUTXOCount)
|
||||
|
||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
||||
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
||||
return true, nil
|
||||
|
||||
case *appmessage.MsgUnexpectedPruningPoint:
|
||||
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
|
||||
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
||||
return false, nil
|
||||
|
||||
default:
|
||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(hashes) == 0 {
|
||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
||||
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
||||
log.Debugf("No missing block body hashes found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
|
||||
highestProcessedDAAScore := lowBlockHeader.DAAScore()
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
||||
} else {
|
||||
hashesToRequest = hashes[offset:]
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, expectedHash := range hashesToRequest {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if !expectedHash.Equal(blockHash) {
|
||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||
continue
|
||||
}
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestProcessedDAAScore = block.Header.DAAScore()
|
||||
}
|
||||
|
||||
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
return flow.resolveVirtual(highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||
consensushashing.BlockHash(block))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
|
||||
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if i%10 == 0 {
|
||||
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var percents int
|
||||
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||
percents = 100
|
||||
} else {
|
||||
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||
}
|
||||
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||
}
|
||||
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isCompletelyResolved {
|
||||
log.Infof("Resolved virtual")
|
||||
err = flow.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
type ibdProgressReporter struct {
|
||||
lowDAAScore uint64
|
||||
highDAAScore uint64
|
||||
objectName string
|
||||
totalDAAScoreDifference uint64
|
||||
lastReportedProgressPercent int
|
||||
processed int
|
||||
}
|
||||
|
||||
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
|
||||
if highDAAScore <= lowDAAScore {
|
||||
// Avoid a zero or negative diff
|
||||
highDAAScore = lowDAAScore + 1
|
||||
}
|
||||
return &ibdProgressReporter{
|
||||
lowDAAScore: lowDAAScore,
|
||||
highDAAScore: highDAAScore,
|
||||
objectName: objectName,
|
||||
totalDAAScoreDifference: highDAAScore - lowDAAScore,
|
||||
lastReportedProgressPercent: 0,
|
||||
processed: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
|
||||
ipr.processed += processedDelta
|
||||
|
||||
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
|
||||
if highestProcessedDAAScore > ipr.highDAAScore {
|
||||
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
|
||||
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
relativeDAAScore := uint64(0)
|
||||
if highestProcessedDAAScore > ipr.lowDAAScore {
|
||||
// Avoid a negative diff
|
||||
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
|
||||
if progressPercent > ipr.lastReportedProgressPercent {
|
||||
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
|
||||
ipr.lastReportedProgressPercent = progressPercent
|
||||
}
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
package v5
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
type protocolManager interface {
|
||||
RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
|
||||
isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
|
||||
messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
Context() *flowcontext.FlowContext
|
||||
}
|
||||
|
||||
// Register is used in order to register all the protocol flows to the given router.
|
||||
func Register(m protocolManager, router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*common.Flow) {
|
||||
flows = registerAddressFlows(m, router, isStopping, errChan)
|
||||
flows = append(flows, registerBlockRelayFlows(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerPingFlows(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerTransactionRelayFlow(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerRejectsFlow(m, router, isStopping, errChan)...)
|
||||
|
||||
return flows
|
||||
}
|
||||
|
||||
func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return addressexchange.ReceiveAddresses(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
|
||||
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.SendVirtualSelectedParentInv(m.Context(), outgoingRoute, peer)
|
||||
}),
|
||||
|
||||
m.RegisterFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayInvs(m.Context(), incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBD", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdBlockHeaders, appmessage.CmdIBDBlockLocatorHighestHash, appmessage.CmdBlockWithTrustedDataV4,
|
||||
appmessage.CmdDoneBlocksWithTrustedData, appmessage.CmdIBDBlockLocatorHighestHashNotFound,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
|
||||
appmessage.CmdPruningPointProof,
|
||||
appmessage.CmdTrustedData,
|
||||
appmessage.CmdIBDChainBlockLocator,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBD(m.Context(), incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayBlockRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestBlockLocator(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestHeaders", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestHeaders(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBDBlockRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBDBlockRequests(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestPruningPointUTXOSet", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSet,
|
||||
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestPruningPointUTXOSet(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone, appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBDBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBDBlockLocator(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestIBDChainBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDChainBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestIBDChainBlockLocator(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestAnticone", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestAnticone}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestAnticone(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointProofRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointProofRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerPingFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ping.ReceivePings(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ping.SendPings(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerTransactionRelayFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRelayedTransactions(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
m.RegisterFlow("HandleRequestTransactions", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRequestedTransactions(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerRejectsFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlow("HandleRejects", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return rejects.HandleRejects(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package protocol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
@@ -72,16 +71,11 @@ func (m *Manager) AddBlock(block *externalapi.DomainBlock) error {
|
||||
return m.context.AddBlock(block)
|
||||
}
|
||||
|
||||
// Context returns the manager's flow context
|
||||
func (m *Manager) Context() *flowcontext.FlowContext {
|
||||
return m.context
|
||||
}
|
||||
|
||||
func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-chan error, flowsWaitGroup *sync.WaitGroup) error {
|
||||
func (m *Manager) runFlows(flows []*flow, peer *peerpkg.Peer, errChan <-chan error, flowsWaitGroup *sync.WaitGroup) error {
|
||||
flowsWaitGroup.Add(len(flows))
|
||||
for _, flow := range flows {
|
||||
executeFunc := flow.ExecuteFunc // extract to new variable so that it's not overwritten
|
||||
spawn(fmt.Sprintf("flow-%s", flow.Name), func() {
|
||||
executeFunc := flow.executeFunc // extract to new variable so that it's not overwritten
|
||||
spawn(fmt.Sprintf("flow-%s", flow.name), func() {
|
||||
executeFunc(peer)
|
||||
flowsWaitGroup.Done()
|
||||
})
|
||||
@@ -90,21 +84,11 @@ func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-c
|
||||
return <-errChan
|
||||
}
|
||||
|
||||
// SetOnVirtualChange sets the onVirtualChangeHandler handler
|
||||
func (m *Manager) SetOnVirtualChange(onVirtualChangeHandler flowcontext.OnVirtualChangeHandler) {
|
||||
m.context.SetOnVirtualChangeHandler(onVirtualChangeHandler)
|
||||
}
|
||||
|
||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
||||
func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowcontext.OnBlockAddedToDAGHandler) {
|
||||
m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)
|
||||
}
|
||||
|
||||
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplate handler
|
||||
func (m *Manager) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler flowcontext.OnNewBlockTemplateHandler) {
|
||||
m.context.SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler)
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
|
||||
func (m *Manager) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler flowcontext.OnPruningPointUTXOSetOverrideHandler) {
|
||||
m.context.SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler)
|
||||
@@ -118,7 +102,7 @@ func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMemp
|
||||
// ShouldMine returns whether it's ok to use block template from this node
|
||||
// for mining purposes.
|
||||
func (m *Manager) ShouldMine() (bool, error) {
|
||||
return m.context.IsNearlySynced()
|
||||
return m.context.ShouldMine()
|
||||
}
|
||||
|
||||
// IsIBDRunning returns true if IBD is currently marked as running
|
||||
|
||||
@@ -13,6 +13,10 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
)
|
||||
|
||||
// maxProtocolVersion version is the maximum supported protocol
|
||||
// version this kaspad node supports
|
||||
const maxProtocolVersion = 3
|
||||
|
||||
// Peer holds data about a peer.
|
||||
type Peer struct {
|
||||
connection *netadapter.NetConnection
|
||||
@@ -31,8 +35,6 @@ type Peer struct {
|
||||
lastPingNonce uint64 // The nonce of the last ping we sent
|
||||
lastPingTime time.Time // Time we sent last ping
|
||||
lastPingDuration time.Duration // Time for last ping to return
|
||||
|
||||
ibdRequestChannel chan *externalapi.DomainBlock // A channel used to communicate IBD requests between flows
|
||||
}
|
||||
|
||||
// New returns a new Peer
|
||||
@@ -40,7 +42,6 @@ func New(connection *netadapter.NetConnection) *Peer {
|
||||
return &Peer{
|
||||
connection: connection,
|
||||
connectionStarted: time.Now(),
|
||||
ibdRequestChannel: make(chan *externalapi.DomainBlock),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,11 +76,6 @@ func (p *Peer) AdvertisedProtocolVersion() uint32 {
|
||||
return p.advertisedProtocolVerion
|
||||
}
|
||||
|
||||
// ProtocolVersion returns the protocol version which is used when communicating with the peer.
|
||||
func (p *Peer) ProtocolVersion() uint32 {
|
||||
return p.protocolVersion
|
||||
}
|
||||
|
||||
// TimeConnected returns the time since the connection to this been has been started.
|
||||
func (p *Peer) TimeConnected() time.Duration {
|
||||
return time.Since(p.connectionStarted)
|
||||
@@ -91,7 +87,7 @@ func (p *Peer) IsOutbound() bool {
|
||||
}
|
||||
|
||||
// UpdateFieldsFromMsgVersion updates the peer with the data from the version message.
|
||||
func (p *Peer) UpdateFieldsFromMsgVersion(msg *appmessage.MsgVersion, maxProtocolVersion uint32) {
|
||||
func (p *Peer) UpdateFieldsFromMsgVersion(msg *appmessage.MsgVersion) {
|
||||
// Negotiate the protocol version.
|
||||
p.advertisedProtocolVerion = msg.ProtocolVersion
|
||||
p.protocolVersion = mathUtil.MinUint32(maxProtocolVersion, p.advertisedProtocolVerion)
|
||||
@@ -146,8 +142,3 @@ func (p *Peer) LastPingDuration() time.Duration {
|
||||
|
||||
return p.lastPingDuration
|
||||
}
|
||||
|
||||
// IBDRequestChannel returns the channel used in order to communicate an IBD request between peer flows
|
||||
func (p *Peer) IBDRequestChannel() chan *externalapi.DomainBlock {
|
||||
return p.ibdRequestChannel
|
||||
}
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/handshake"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
@@ -18,6 +20,14 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type flowInitializeFunc func(route *routerpkg.Route, peer *peerpkg.Peer) error
|
||||
type flowExecuteFunc func(peer *peerpkg.Peer)
|
||||
|
||||
type flow struct {
|
||||
name string
|
||||
executeFunc flowExecuteFunc
|
||||
}
|
||||
|
||||
func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *netadapter.NetConnection) {
|
||||
// isStopping flag is raised the moment that the connection associated with this router is disconnected
|
||||
// errChan is used by the flow goroutines to return to runFlows when an error occurs.
|
||||
@@ -25,7 +35,8 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
isStopping := uint32(0)
|
||||
errChan := make(chan error)
|
||||
|
||||
receiveVersionRoute, sendVersionRoute, receiveReadyRoute := registerHandshakeRoutes(router)
|
||||
flows := m.registerFlows(router, errChan, &isStopping)
|
||||
receiveVersionRoute, sendVersionRoute := registerHandshakeRoutes(router)
|
||||
|
||||
// After flows were registered - spawn a new thread that will wait for connection to finish initializing
|
||||
// and start receiving messages
|
||||
@@ -73,21 +84,6 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
}
|
||||
defer m.context.RemoveFromPeers(peer)
|
||||
|
||||
var flows []*common.Flow
|
||||
log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion())
|
||||
switch peer.ProtocolVersion() {
|
||||
case 5:
|
||||
flows = v5.Register(m, router, errChan, &isStopping)
|
||||
default:
|
||||
panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion()))
|
||||
}
|
||||
|
||||
err = ready.HandleReady(receiveReadyRoute, router.OutgoingRoute(), peer)
|
||||
if err != nil {
|
||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||
return
|
||||
}
|
||||
|
||||
removeHandshakeRoutes(router)
|
||||
|
||||
flowsWaitGroup := &sync.WaitGroup{}
|
||||
@@ -134,9 +130,167 @@ func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// RegisterFlow registers a flow to the given router.
|
||||
func (m *Manager) RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow {
|
||||
func (m *Manager) registerFlows(router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*flow) {
|
||||
flows = m.registerAddressFlows(router, isStopping, errChan)
|
||||
flows = append(flows, m.registerBlockRelayFlows(router, isStopping, errChan)...)
|
||||
flows = append(flows, m.registerPingFlows(router, isStopping, errChan)...)
|
||||
flows = append(flows, m.registerTransactionRelayFlow(router, isStopping, errChan)...)
|
||||
flows = append(flows, m.registerRejectsFlow(router, isStopping, errChan)...)
|
||||
|
||||
return flows
|
||||
}
|
||||
|
||||
func (m *Manager) registerAddressFlows(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return addressexchange.SendAddresses(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return addressexchange.ReceiveAddresses(m.context, incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) registerBlockRelayFlows(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
|
||||
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.SendVirtualSelectedParentInv(m.context, outgoingRoute, peer)
|
||||
}),
|
||||
|
||||
m.registerFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator,
|
||||
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdBlockHeaders, appmessage.CmdIBDBlockLocatorHighestHash, appmessage.CmdBlockWithTrustedData,
|
||||
appmessage.CmdDoneBlocksWithTrustedData, appmessage.CmdIBDBlockLocatorHighestHashNotFound,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
|
||||
appmessage.CmdPruningPointProof,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayInvs(m.context, incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayBlockRequests(m.context, incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleRequestBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestBlockLocator(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleRequestHeaders", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestHeaders(m.context, incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleIBDBlockRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBDBlockRequests(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleRequestPruningPointUTXOSet", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSet,
|
||||
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestPruningPointUTXOSet(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandlePruningPointAndItsAnticoneRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.context, incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleIBDBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBDBlockLocator(m.context, incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandlePruningPointProofRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointProofRequests(m.context, incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) registerPingFlows(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ping.ReceivePings(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ping.SendPings(m.context, incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) registerTransactionRelayFlow(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRelayedTransactions(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
m.registerFlow("HandleRequestTransactions", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRequestedTransactions(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) registerRejectsFlow(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerFlow("HandleRejects", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return rejects.HandleRejects(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) registerFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc flowInitializeFunc) *flow {
|
||||
|
||||
route, err := router.AddIncomingRoute(name, messageTypes)
|
||||
if err != nil {
|
||||
@@ -146,10 +300,9 @@ func (m *Manager) RegisterFlow(name string, router *routerpkg.Router, messageTyp
|
||||
return m.registerFlowForRoute(route, name, isStopping, errChan, initializeFunc)
|
||||
}
|
||||
|
||||
// RegisterFlowWithCapacity registers a flow to the given router with a custom capacity.
|
||||
func (m *Manager) RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
|
||||
func (m *Manager) registerFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
|
||||
messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow {
|
||||
errChan chan error, initializeFunc flowInitializeFunc) *flow {
|
||||
|
||||
route, err := router.AddIncomingRouteWithCapacity(name, capacity, messageTypes)
|
||||
if err != nil {
|
||||
@@ -160,11 +313,11 @@ func (m *Manager) RegisterFlowWithCapacity(name string, capacity int, router *ro
|
||||
}
|
||||
|
||||
func (m *Manager) registerFlowForRoute(route *routerpkg.Route, name string, isStopping *uint32,
|
||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow {
|
||||
errChan chan error, initializeFunc flowInitializeFunc) *flow {
|
||||
|
||||
return &common.Flow{
|
||||
Name: name,
|
||||
ExecuteFunc: func(peer *peerpkg.Peer) {
|
||||
return &flow{
|
||||
name: name,
|
||||
executeFunc: func(peer *peerpkg.Peer) {
|
||||
err := initializeFunc(route, peer)
|
||||
if err != nil {
|
||||
m.context.HandleError(err, name, isStopping, errChan)
|
||||
@@ -174,18 +327,17 @@ func (m *Manager) registerFlowForRoute(route *routerpkg.Route, name string, isSt
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterOneTimeFlow registers a one-time flow (that exits once some operations are done) to the given router.
|
||||
func (m *Manager) RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
|
||||
isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow {
|
||||
func (m *Manager) registerOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
|
||||
isStopping *uint32, stopChan chan error, initializeFunc flowInitializeFunc) *flow {
|
||||
|
||||
route, err := router.AddIncomingRoute(name, messageTypes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &common.Flow{
|
||||
Name: name,
|
||||
ExecuteFunc: func(peer *peerpkg.Peer) {
|
||||
return &flow{
|
||||
name: name,
|
||||
executeFunc: func(peer *peerpkg.Peer) {
|
||||
defer func() {
|
||||
err := router.RemoveRoute(messageTypes)
|
||||
if err != nil {
|
||||
@@ -203,7 +355,7 @@ func (m *Manager) RegisterOneTimeFlow(name string, router *routerpkg.Router, mes
|
||||
}
|
||||
|
||||
func registerHandshakeRoutes(router *routerpkg.Router) (
|
||||
receiveVersionRoute, sendVersionRoute, receiveReadyRoute *routerpkg.Route) {
|
||||
receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route) {
|
||||
receiveVersionRoute, err := router.AddIncomingRoute("recieveVersion - incoming", []appmessage.MessageCommand{appmessage.CmdVersion})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -214,16 +366,11 @@ func registerHandshakeRoutes(router *routerpkg.Router) (
|
||||
panic(err)
|
||||
}
|
||||
|
||||
receiveReadyRoute, err = router.AddIncomingRoute("recieveReady - incoming", []appmessage.MessageCommand{appmessage.CmdReady})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return receiveVersionRoute, sendVersionRoute, receiveReadyRoute
|
||||
return receiveVersionRoute, sendVersionRoute
|
||||
}
|
||||
|
||||
func removeHandshakeRoutes(router *routerpkg.Router) {
|
||||
err := router.RemoveRoute([]appmessage.MessageCommand{appmessage.CmdVersion, appmessage.CmdVerAck, appmessage.CmdReady})
|
||||
err := router.RemoveRoute([]appmessage.MessageCommand{appmessage.CmdVersion, appmessage.CmdVerAck})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -48,31 +48,12 @@ func NewManager(
|
||||
}
|
||||
|
||||
// NotifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
|
||||
func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
|
||||
defer onEnd()
|
||||
|
||||
err := m.NotifyVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
|
||||
err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
|
||||
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||
}
|
||||
|
||||
// NotifyVirtualChange notifies the manager that the virtual block has been changed.
|
||||
func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualChange")
|
||||
defer onEnd()
|
||||
|
||||
if m.context.Config.UTXOIndex {
|
||||
err := m.notifyUTXOsChanged(virtualChangeSet)
|
||||
err := m.notifyUTXOsChanged(blockInsertionResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -88,19 +69,18 @@ func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChang
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.notifyVirtualSelectedParentChainChanged(virtualChangeSet)
|
||||
err = m.notifyVirtualSelectedParentChainChanged(blockInsertionResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplate notifies the manager that a new
|
||||
// block template is available for miners
|
||||
func (m *Manager) NotifyNewBlockTemplate() error {
|
||||
notification := appmessage.NewNewBlockTemplateNotificationMessage()
|
||||
return m.context.NotificationManager.NotifyNewBlockTemplate(notification)
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
|
||||
err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
|
||||
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
|
||||
@@ -137,11 +117,11 @@ func (m *Manager) NotifyFinalityConflictResolved(finalityBlockHash string) error
|
||||
return m.context.NotificationManager.NotifyFinalityConflictResolved(notification)
|
||||
}
|
||||
|
||||
func (m *Manager) notifyUTXOsChanged(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
func (m *Manager) notifyUTXOsChanged(blockInsertionResult *externalapi.BlockInsertionResult) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyUTXOsChanged")
|
||||
defer onEnd()
|
||||
|
||||
utxoIndexChanges, err := m.context.UTXOIndex.Update(virtualChangeSet)
|
||||
utxoIndexChanges, err := m.context.UTXOIndex.Update(blockInsertionResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -191,12 +171,12 @@ func (m *Manager) notifyVirtualDaaScoreChanged() error {
|
||||
return m.context.NotificationManager.NotifyVirtualDaaScoreChanged(notification)
|
||||
}
|
||||
|
||||
func (m *Manager) notifyVirtualSelectedParentChainChanged(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
func (m *Manager) notifyVirtualSelectedParentChainChanged(blockInsertionResult *externalapi.BlockInsertionResult) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged")
|
||||
defer onEnd()
|
||||
|
||||
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
virtualChangeSet.VirtualSelectedParentChainChanges)
|
||||
blockInsertionResult.VirtualSelectedParentChainChanges)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdGetVirtualSelectedParentChainFromBlockRequestMessage: rpchandlers.HandleGetVirtualSelectedParentChainFromBlock,
|
||||
appmessage.CmdGetBlocksRequestMessage: rpchandlers.HandleGetBlocks,
|
||||
appmessage.CmdGetBlockCountRequestMessage: rpchandlers.HandleGetBlockCount,
|
||||
appmessage.CmdGetBalanceByAddressRequestMessage: rpchandlers.HandleGetBalanceByAddress,
|
||||
appmessage.CmdGetBlockDAGInfoRequestMessage: rpchandlers.HandleGetBlockDAGInfo,
|
||||
appmessage.CmdResolveFinalityConflictRequestMessage: rpchandlers.HandleResolveFinalityConflict,
|
||||
appmessage.CmdNotifyFinalityConflictsRequestMessage: rpchandlers.HandleNotifyFinalityConflicts,
|
||||
@@ -38,7 +37,6 @@ var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdNotifyUTXOsChangedRequestMessage: rpchandlers.HandleNotifyUTXOsChanged,
|
||||
appmessage.CmdStopNotifyingUTXOsChangedRequestMessage: rpchandlers.HandleStopNotifyingUTXOsChanged,
|
||||
appmessage.CmdGetUTXOsByAddressesRequestMessage: rpchandlers.HandleGetUTXOsByAddresses,
|
||||
appmessage.CmdGetBalancesByAddressesRequestMessage: rpchandlers.HandleGetBalancesByAddresses,
|
||||
appmessage.CmdGetVirtualSelectedParentBlueScoreRequestMessage: rpchandlers.HandleGetVirtualSelectedParentBlueScore,
|
||||
appmessage.CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualSelectedParentBlueScoreChanged,
|
||||
appmessage.CmdBanRequestMessage: rpchandlers.HandleBan,
|
||||
@@ -48,7 +46,6 @@ var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
|
||||
appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond,
|
||||
appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged,
|
||||
appmessage.CmdNotifyNewBlockTemplateRequestMessage: rpchandlers.HandleNotifyNewBlockTemplate,
|
||||
}
|
||||
|
||||
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {
|
||||
|
||||
@@ -32,7 +32,6 @@ type NotificationListener struct {
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications bool
|
||||
propagateVirtualDaaScoreChangedNotifications bool
|
||||
propagatePruningPointUTXOSetOverrideNotifications bool
|
||||
propagateNewBlockTemplateNotifications bool
|
||||
|
||||
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
||||
}
|
||||
@@ -202,25 +201,6 @@ func (nm *NotificationManager) NotifyVirtualDaaScoreChanged(
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplate notifies the notification manager that a new
|
||||
// block template is available for miners
|
||||
func (nm *NotificationManager) NotifyNewBlockTemplate(
|
||||
notification *appmessage.NewBlockTemplateNotificationMessage) error {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateNewBlockTemplateNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverride notifies the notification manager that the UTXO index
|
||||
// reset due to pruning point change via IBD.
|
||||
func (nm *NotificationManager) NotifyPruningPointUTXOSetOverride() error {
|
||||
@@ -246,7 +226,6 @@ func newNotificationListener() *NotificationListener {
|
||||
propagateFinalityConflictResolvedNotifications: false,
|
||||
propagateUTXOsChangedNotifications: false,
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications: false,
|
||||
propagateNewBlockTemplateNotifications: false,
|
||||
propagatePruningPointUTXOSetOverrideNotifications: false,
|
||||
}
|
||||
}
|
||||
@@ -355,12 +334,6 @@ func (nl *NotificationListener) PropagateVirtualDaaScoreChangedNotifications() {
|
||||
nl.propagateVirtualDaaScoreChangedNotifications = true
|
||||
}
|
||||
|
||||
// PropagateNewBlockTemplateNotifications instructs the listener to send
|
||||
// new block template notifications to the remote listener
|
||||
func (nl *NotificationListener) PropagateNewBlockTemplateNotifications() {
|
||||
nl.propagateNewBlockTemplateNotifications = true
|
||||
}
|
||||
|
||||
// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications
|
||||
// to the remote listener.
|
||||
func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() {
|
||||
|
||||
@@ -56,29 +56,21 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
|
||||
"invalid block")
|
||||
}
|
||||
|
||||
_, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isChainBlock, err := ctx.Domain.Consensus().IsChainBlock(blockHash)
|
||||
_, selectedParentHash, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block.VerboseData = &appmessage.RPCBlockVerboseData{
|
||||
Hash: blockHash.String(),
|
||||
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
|
||||
ChildrenHashes: hashes.ToStrings(childrenHashes),
|
||||
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
|
||||
BlueScore: blockInfo.BlueScore,
|
||||
MergeSetBluesHashes: hashes.ToStrings(blockInfo.MergeSetBlues),
|
||||
MergeSetRedsHashes: hashes.ToStrings(blockInfo.MergeSetReds),
|
||||
IsChainBlock: isChainBlock,
|
||||
Hash: blockHash.String(),
|
||||
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
|
||||
ChildrenHashes: hashes.ToStrings(childrenHashes),
|
||||
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
|
||||
BlueScore: blockInfo.BlueScore,
|
||||
}
|
||||
// selectedParentHash will be nil in the genesis block
|
||||
if blockInfo.SelectedParent != nil {
|
||||
block.VerboseData.SelectedParentHash = blockInfo.SelectedParent.String()
|
||||
if selectedParentHash != nil {
|
||||
block.VerboseData.SelectedParentHash = selectedParentHash.String()
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleGetBalanceByAddress handles the respectively named RPC command
|
||||
func HandleGetBalanceByAddress(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if !context.Config.UTXOIndex {
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
getBalanceByAddressRequest := request.(*appmessage.GetBalanceByAddressRequestMessage)
|
||||
|
||||
balance, err := getBalanceByAddress(context, getBalanceByAddressRequest.Address)
|
||||
if err != nil {
|
||||
rpcError := &appmessage.RPCError{}
|
||||
if !errors.As(err, rpcError) {
|
||||
return nil, err
|
||||
}
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
errorMessage.Error = rpcError
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
response := appmessage.NewGetBalanceByAddressResponse(balance)
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func getBalanceByAddress(context *rpccontext.Context, addressString string) (uint64, error) {
|
||||
address, err := util.DecodeAddress(addressString, context.Config.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
return 0, appmessage.RPCErrorf("Couldn't decode address '%s': %s", addressString, err)
|
||||
}
|
||||
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
return 0, appmessage.RPCErrorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||
}
|
||||
utxoOutpointEntryPairs, err := context.UTXOIndex.UTXOs(scriptPublicKey)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
balance := uint64(0)
|
||||
for _, utxoOutpointEntryPair := range utxoOutpointEntryPairs {
|
||||
balance += utxoOutpointEntryPair.Amount()
|
||||
}
|
||||
return balance, nil
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleGetBalancesByAddresses handles the respectively named RPC command
|
||||
func HandleGetBalancesByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if !context.Config.UTXOIndex {
|
||||
errorMessage := &appmessage.GetBalancesByAddressesResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
getBalancesByAddressesRequest := request.(*appmessage.GetBalancesByAddressesRequestMessage)
|
||||
|
||||
allEntries := make([]*appmessage.BalancesByAddressesEntry, len(getBalancesByAddressesRequest.Addresses))
|
||||
for i, address := range getBalancesByAddressesRequest.Addresses {
|
||||
balance, err := getBalanceByAddress(context, address)
|
||||
|
||||
if err != nil {
|
||||
rpcError := &appmessage.RPCError{}
|
||||
if !errors.As(err, rpcError) {
|
||||
return nil, err
|
||||
}
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
errorMessage.Error = rpcError
|
||||
return errorMessage, nil
|
||||
}
|
||||
allEntries[i] = &appmessage.BalancesByAddressesEntry{
|
||||
Address: address,
|
||||
Balance: balance,
|
||||
}
|
||||
}
|
||||
|
||||
response := appmessage.NewGetBalancesByAddressesResponse(allEntries)
|
||||
return response, nil
|
||||
}
|
||||
@@ -4,11 +4,9 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
)
|
||||
|
||||
// HandleGetBlockTemplate handles the respectively named RPC command
|
||||
@@ -17,7 +15,7 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
|
||||
payAddress, err := util.DecodeAddress(getBlockTemplateRequest.PayAddress, context.Config.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||
errorMessage := &appmessage.GetBlockResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not decode address: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
@@ -27,18 +25,12 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
return nil, err
|
||||
}
|
||||
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version() + "/" + getBlockTemplateRequest.ExtraData)}
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey}
|
||||
|
||||
templateBlock, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint64(len(templateBlock.Transactions[transactionhelper.CoinbaseTransactionIndex].Payload)) > context.Config.NetParams().MaxCoinbasePayloadLength {
|
||||
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Coinbase payload is above max length (%d). Try to shorten the extra data.", context.Config.NetParams().MaxCoinbasePayloadLength)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
|
||||
|
||||
isSynced, err := context.ProtocolManager.ShouldMine()
|
||||
|
||||
@@ -31,7 +31,7 @@ func (d fakeDomain) StagingConsensus() externalapi.Consensus {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d fakeDomain) InitStagingConsensusWithoutGenesis() error {
|
||||
func (d fakeDomain) InitStagingConsensus() error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleNotifyNewBlockTemplate handles the respectively named RPC command
|
||||
func HandleNotifyNewBlockTemplate(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateNewBlockTemplateNotifications()
|
||||
|
||||
response := appmessage.NewNotifyNewBlockTemplateResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
@@ -35,23 +34,6 @@ func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request ap
|
||||
}, nil
|
||||
}
|
||||
|
||||
if !submitBlockRequest.AllowNonDAABlocks {
|
||||
virtualDAAScore, err := context.Domain.Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// A simple heuristic check which signals that the mined block is out of date
|
||||
// and should not be accepted unless user explicitly requests
|
||||
daaWindowSize := uint64(context.Config.NetParams().DifficultyAdjustmentWindowSize)
|
||||
if virtualDAAScore > daaWindowSize && domainBlock.Header.DAAScore() < virtualDAAScore-daaWindowSize {
|
||||
return &appmessage.SubmitBlockResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Block rejected. Reason: block DAA score %d is too far "+
|
||||
"behind virtual's DAA score %d", domainBlock.Header.DAAScore(), virtualDAAScore),
|
||||
RejectReason: appmessage.RejectReasonBlockInvalid,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
err = context.ProtocolManager.AddBlock(domainBlock)
|
||||
if err != nil {
|
||||
isProtocolOrRuleError := errors.As(err, &ruleerrors.RuleError{}) || errors.As(err, &protocolerrors.ProtocolError{})
|
||||
@@ -59,12 +41,6 @@ func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request ap
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jsonBytes, _ := json.MarshalIndent(submitBlockRequest.Block.Header, "", " ")
|
||||
if jsonBytes != nil {
|
||||
log.Warnf("The RPC submitted block triggered a rule/protocol error (%s), printing "+
|
||||
"the full header for debug purposes: \n%s", err, string(jsonBytes))
|
||||
}
|
||||
|
||||
return &appmessage.SubmitBlockResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Block rejected. Reason: %s", err),
|
||||
RejectReason: appmessage.RejectReasonBlockInvalid,
|
||||
|
||||
@@ -5,8 +5,9 @@ FLAGS=$@
|
||||
go version
|
||||
|
||||
go get $FLAGS -t -d ./...
|
||||
GO111MODULE=off go get $FLAGS golang.org/x/lint/golint
|
||||
go install $FLAGS honnef.co/go/tools/cmd/staticcheck@latest
|
||||
# This is to bypass a go bug: https://github.com/golang/go/issues/27643
|
||||
GO111MODULE=off go get $FLAGS golang.org/x/lint/golint \
|
||||
honnef.co/go/tools/cmd/staticcheck
|
||||
|
||||
test -z "$(go fmt ./...)"
|
||||
|
||||
|
||||
126
changelog.txt
126
changelog.txt
@@ -1,129 +1,3 @@
|
||||
Kaspad v0.12.0 - 2022-04-14
|
||||
===========================
|
||||
Breaking changes:
|
||||
Hard-fork at DAA score 14687583 (estimated to be on 28/04 16:38 UTC) which includes:
|
||||
* Using separate depth than finality depth for merge set calculations (#2013)
|
||||
* Not counting the header size as part of the block mass (#2013)
|
||||
* Increasing block version to 1 (#2013)
|
||||
* Removing the limit on amount of KAS that can be sent in one transaction (#2013)
|
||||
|
||||
Bug fixes:
|
||||
* Making a workaround for the UTXO diff child bug (#2020)
|
||||
* Use cosigner index 0 for read only wallets (#2014)
|
||||
|
||||
Non-breaking changes:
|
||||
* Adding a "sweep" command to `kaspawallet` (#2018)
|
||||
* Use `blue work` heuristic to skip irrelevant relay blocks
|
||||
* Kaspawallet daemon: Add Send and Sign commands (#2016)
|
||||
|
||||
Kaspad v0.11.17 - 2022-04-06
|
||||
===========================
|
||||
* Decrement estimatedHeaderUpperBound from mempool's MaxBlockMass (#2009)
|
||||
|
||||
Kaspad v0.11.16 - 2022-04-05
|
||||
===========================
|
||||
* Don't skip wallet address with different cosigner index (#2007)
|
||||
|
||||
Kaspad v0.11.15 - 2022-04-05
|
||||
===========================
|
||||
* Add support for auto-compound in `kaspawallet send` (#1951)
|
||||
* Unite reachability stores (#1963, #1993, #2001)
|
||||
* Add names to nameless routes (#1986)
|
||||
* Optimize the miner-kaspad flow and latency (#1988)
|
||||
* Upgrade to go 1.18 (#1992)
|
||||
* Add package name to kaspawalletd .proto file (#1991)
|
||||
* Block template cache (#1994)
|
||||
* Add extra data to GetBlockTemplate request (#1995, #1997)
|
||||
* New definition for "out of sync" (#1996)
|
||||
* Remove v4 p2p version (#1998)
|
||||
* Remove increase pagefile from deploy.yaml (#2000)
|
||||
* Cache the pruning point anticone (#2002)
|
||||
* Add DB compaction after the deletion of a DB prefix (#2003)
|
||||
* Fixed a bug in staging of pruning point by index (#2005)
|
||||
* Clean up debug log level by moving many frequent logs to trace level (#2004)
|
||||
|
||||
Kaspad v0.11.14 - 2022-03-20
|
||||
===========================
|
||||
* Fix a bug in the new p2p v5 IBD chain negotiation (#1981)
|
||||
|
||||
Kaspad v0.11.13 - 2022-03-16
|
||||
===========================
|
||||
* Display progress of IBD process in Kaspad logs (#1938, #1939, #1949, #1977)
|
||||
* Optimize DB writes during fresh IBD (#1937)
|
||||
* Add AllowConnectionToDifferentVersions flag to kaspactl (#1940)
|
||||
* Drop support for p2p v3 (#1942)
|
||||
* Various transaction processing fixes and workarounds (#1943, #1946, #1971, #1974)
|
||||
* Make kaspawallet store the utxos sorted by amount (#1947)
|
||||
* Implement a `parse` sub command in the kaspawallet (#1953)
|
||||
* Set MaxBlockLevels for non-mainnet networks to 250 (#1952)
|
||||
* Add cache to DAA block window (#1948)
|
||||
* kaspactl: string slice parser for GetUtxosByAddresses (#1955, first contribution by @icook)
|
||||
* Add MergeSet and IsChainBlock to RPC (#1961)
|
||||
* Ignore transaction invs during IBD (#1960)
|
||||
* Optimize validation of expected header pruning point (#1962)
|
||||
* Fix a bug in bounded marge depth validation (#1966)
|
||||
* Don't relay blocks in virtual anticone (#1970)
|
||||
* Add version to block template to allow tracking of miner's kaspad version (#1967)
|
||||
* New p2p version: v5 (#1969)
|
||||
* Fix IBD shared past negotiation to be non quadratic also in the worst-case (#1969, p2p v5)
|
||||
* Send pruning point anticone in batches (#1973, p2p v5)
|
||||
* Cleanup log output mistakes and try to be more clear to the user (#1976, #1978)
|
||||
* Apply avoiding IBD logic from patch10 to p2p v4 IBD handling (#1979)
|
||||
|
||||
Kaspad v0.11.11 - 2022-01-27
|
||||
===========================
|
||||
* Fix for rare consensus bug regarding DAA window order. The bug only affected IBD from scratch and only today (#1934)
|
||||
|
||||
Kaspad v0.11.10 - 2022-01-27
|
||||
===========================
|
||||
* Add monitoring of heap and save heap profile if size is over some limit (#1932)
|
||||
* Extract IBD management from invs relay flow to a new separated flow (#1930)
|
||||
* Add --transaction-file options to the `sign` and `broadcast` wallet subcommands (#1927)
|
||||
* Filter redundant blocks from daa window on IBD (#1925)
|
||||
* Implement a P2P upgrade mechanism (#1921)
|
||||
|
||||
Kaspad v0.11.9 - 2021-12-30
|
||||
===========================
|
||||
Breaking changes:
|
||||
* Implement the new monetary policy. Breaking change effective only in ~4 months (#1892)
|
||||
|
||||
Bug fixes:
|
||||
* Fix two pruning proof IBD crash bugs (#1913)
|
||||
* Fix UTXO index bug showing wrong wallet balance (#1891)
|
||||
|
||||
Non-breaking changes:
|
||||
* Address search: cleanup repetitively-offline addresses and use randomization weighted by connection failures (#1899, #1916)
|
||||
* New DNS seeders and removal of offline one (#1901, #1910, #1918)
|
||||
* Add request balance by address to kaspactl (#1885)
|
||||
* Wallet: show balance by addresses (#1904)
|
||||
* Reject outdated non-DAA blocks submitted via RPC (#1914)
|
||||
* Add a profile option to kaspawallet daemon (#1854)
|
||||
|
||||
Kaspad v0.11.8 - 2021-12-13
|
||||
===========================
|
||||
Bug fixes:
|
||||
* Update reindex root for each block level (#1881)
|
||||
|
||||
Non-breaking changes:
|
||||
* Update readme (#1848)
|
||||
* Lower devnet's initial difficulty (#1869)
|
||||
|
||||
Kaspad v0.11.7 - 2021-12-11
|
||||
===========================
|
||||
Breaking changes:
|
||||
* kaspawallet: show-address →new-address + show-addresses (#1870)
|
||||
|
||||
Bug fixes:
|
||||
* Fix numThreads using getAEAD instead of decryptMnemonic (#1859)
|
||||
* Apply ResolveVirtual diffs to the UTXO index (#1868)
|
||||
|
||||
Non-breaking changes:
|
||||
* Ignore header mass in devnet and testnet (#1879)
|
||||
* Remove unused args from CalcSubsidy (#1877)
|
||||
* ExpectedHeaderPruningPoint fix (#1876)
|
||||
* Changes to libkaspawallet to support Kaspaper (#1878)
|
||||
* Get rid of genesis's UTXO dump (#1867)
|
||||
|
||||
Kaspad v0.11.2 - 2021-11-11
|
||||
===========================
|
||||
Bug fixes:
|
||||
|
||||
@@ -4,7 +4,7 @@ kaspactl is an RPC client for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.18 or later.
|
||||
Go 1.16 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -150,24 +149,12 @@ func stringToValue(parameterDesc *parameterDescription, valueStr string) (reflec
|
||||
|
||||
value = pointer.Interface()
|
||||
|
||||
case reflect.Slice:
|
||||
sliceType := parameterDesc.typeof.Elem()
|
||||
if sliceType.Kind() != reflect.String {
|
||||
return reflect.Value{},
|
||||
errors.Errorf("Unsupported slice type '%s' for parameter '%s'",
|
||||
sliceType,
|
||||
parameterDesc.name)
|
||||
}
|
||||
if valueStr == "" {
|
||||
value = []string{}
|
||||
} else {
|
||||
value = strings.Split(valueStr, ",")
|
||||
}
|
||||
// Int and uint are not supported because their size is platform-dependant
|
||||
case reflect.Int,
|
||||
reflect.Uint,
|
||||
// Other types are not supported simply because they are not used in any command right now
|
||||
// but support can be added if and when needed
|
||||
reflect.Slice,
|
||||
reflect.Func,
|
||||
reflect.Interface,
|
||||
reflect.Map,
|
||||
|
||||
@@ -34,7 +34,6 @@ var commandTypes = []reflect.Type{
|
||||
reflect.TypeOf(protowire.KaspadMessage_SubmitTransactionRequest{}),
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetUtxosByAddressesRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetBalanceByAddressRequest{}),
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_BanRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_UnbanRequest{}),
|
||||
|
||||
@@ -12,12 +12,11 @@ var (
|
||||
)
|
||||
|
||||
type configFlags struct {
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
Timeout uint64 `short:"t" long:"timeout" description:"Timeout for the request (in seconds)"`
|
||||
RequestJSON string `short:"j" long:"json" description:"The request in JSON format"`
|
||||
ListCommands bool `short:"l" long:"list-commands" description:"List all commands and exit"`
|
||||
AllowConnectionToDifferentVersions bool `short:"a" long:"allow-connection-to-different-versions" description:"Allow connections to versions different than kaspactl's version'"`
|
||||
CommandAndParameters []string
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
Timeout uint64 `short:"t" long:"timeout" description:"Timeout for the request (in seconds)"`
|
||||
RequestJSON string `short:"j" long:"json" description:"The request in JSON format"`
|
||||
ListCommands bool `short:"l" long:"list-commands" description:"List all commands and exit"`
|
||||
CommandAndParameters []string
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.18-alpine AS build
|
||||
FROM golang:1.16-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
@@ -34,18 +34,16 @@ func main() {
|
||||
}
|
||||
defer client.Disconnect()
|
||||
|
||||
if !cfg.AllowConnectionToDifferentVersions {
|
||||
kaspadMessage, err := client.Post(&protowire.KaspadMessage{Payload: &protowire.KaspadMessage_GetInfoRequest{GetInfoRequest: &protowire.GetInfoRequestMessage{}}})
|
||||
if err != nil {
|
||||
printErrorAndExit(fmt.Sprintf("Cannot post GetInfo message: %s", err))
|
||||
}
|
||||
kaspadMessage, err := client.Post(&protowire.KaspadMessage{Payload: &protowire.KaspadMessage_GetInfoRequest{GetInfoRequest: &protowire.GetInfoRequestMessage{}}})
|
||||
if err != nil {
|
||||
printErrorAndExit(fmt.Sprintf("Cannot post GetInfo message: %s", err))
|
||||
}
|
||||
|
||||
localVersion := version.Version()
|
||||
remoteVersion := kaspadMessage.GetGetInfoResponse().ServerVersion
|
||||
localVersion := version.Version()
|
||||
remoteVersion := kaspadMessage.GetGetInfoResponse().ServerVersion
|
||||
|
||||
if localVersion != remoteVersion {
|
||||
printErrorAndExit(fmt.Sprintf("Server version mismatch, expect: %s, got: %s", localVersion, remoteVersion))
|
||||
}
|
||||
if localVersion != remoteVersion {
|
||||
printErrorAndExit(fmt.Sprintf("Server version mismatch, expect: %s, got: %s", localVersion, remoteVersion))
|
||||
}
|
||||
|
||||
responseChan := make(chan string)
|
||||
|
||||
@@ -4,7 +4,7 @@ Kaspaminer is a CPU-based miner for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.18 or later.
|
||||
Go 1.16 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@ const minerTimeout = 10 * time.Second
|
||||
type minerClient struct {
|
||||
*rpcclient.RPCClient
|
||||
|
||||
cfg *configFlags
|
||||
newBlockTemplateNotificationChan chan struct{}
|
||||
cfg *configFlags
|
||||
blockAddedNotificationChan chan struct{}
|
||||
}
|
||||
|
||||
func (mc *minerClient) connect() error {
|
||||
@@ -30,14 +30,14 @@ func (mc *minerClient) connect() error {
|
||||
mc.SetTimeout(minerTimeout)
|
||||
mc.SetLogger(backendLog, logger.LevelTrace)
|
||||
|
||||
err = mc.RegisterForNewBlockTemplateNotifications(func(_ *appmessage.NewBlockTemplateNotificationMessage) {
|
||||
err = mc.RegisterForBlockAddedNotifications(func(_ *appmessage.BlockAddedNotificationMessage) {
|
||||
select {
|
||||
case mc.newBlockTemplateNotificationChan <- struct{}{}:
|
||||
case mc.blockAddedNotificationChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error requesting new-block-template notifications")
|
||||
return errors.Wrapf(err, "error requesting block-added notifications")
|
||||
}
|
||||
|
||||
log.Infof("Connected to %s", rpcAddress)
|
||||
@@ -47,8 +47,8 @@ func (mc *minerClient) connect() error {
|
||||
|
||||
func newMinerClient(cfg *configFlags) (*minerClient, error) {
|
||||
minerClient := &minerClient{
|
||||
cfg: cfg,
|
||||
newBlockTemplateNotificationChan: make(chan struct{}),
|
||||
cfg: cfg,
|
||||
blockAddedNotificationChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
err := minerClient.connect()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.18-alpine AS build
|
||||
FROM golang:1.16-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
nativeerrors "errors"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -188,7 +187,7 @@ func getBlockForMining(mineWhenNotSynced bool) (*externalapi.DomainBlock, *pow.S
|
||||
|
||||
func templatesLoop(client *minerClient, miningAddr util.Address, errChan chan error) {
|
||||
getBlockTemplate := func() {
|
||||
template, err := client.GetBlockTemplate(miningAddr.String(), "kaspaminer-"+version.Version())
|
||||
template, err := client.GetBlockTemplate(miningAddr.String())
|
||||
if nativeerrors.Is(err, router.ErrTimeout) {
|
||||
log.Warnf("Got timeout while requesting block template from %s: %s", client.Address(), err)
|
||||
reconnectErr := client.Reconnect()
|
||||
@@ -218,7 +217,7 @@ func templatesLoop(client *minerClient, miningAddr util.Address, errChan chan er
|
||||
ticker := time.NewTicker(tickerTime)
|
||||
for {
|
||||
select {
|
||||
case <-client.newBlockTemplateNotificationChan:
|
||||
case <-client.blockAddedNotificationChan:
|
||||
getBlockTemplate()
|
||||
ticker.Reset(tickerTime)
|
||||
case <-ticker.C:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user