Compare commits

..

No commits in common. "master" and "v0.11.14-rc1" have entirely different histories.

415 changed files with 12242 additions and 21253 deletions

View File

@ -1,7 +1,7 @@
name: Build and upload assets name: Build and upload assets
on: on:
release: release:
types: [published] types: [ published ]
jobs: jobs:
build: build:
@ -9,7 +9,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-latest, windows-latest, macos-latest] os: [ ubuntu-latest, windows-latest, macos-latest ]
name: Building, ${{ matrix.os }} name: Building, ${{ matrix.os }}
steps: steps:
- name: Fix CRLF on Windows - name: Fix CRLF on Windows
@ -17,12 +17,18 @@ jobs:
run: git config --global core.autocrlf false run: git config --global core.autocrlf false
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v2
# Increase the pagefile size on Windows to aviod running out of memory
- name: Increase pagefile size on Windows
if: runner.os == 'Windows'
run: powershell -command .github\workflows\SetPageFileSize.ps1
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: 1.21 go-version: 1.16
- name: Build on Linux - name: Build on Linux
if: runner.os == 'Linux' if: runner.os == 'Linux'
@ -30,7 +36,7 @@ jobs:
# `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net" # `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
# `-s -w` strips the binary to produce smaller size binaries # `-s -w` strips the binary to produce smaller size binaries
run: | run: |
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./cmd/... go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ . ./cmd/...
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip" archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip" asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
zip -r "${archive}" ./bin/* zip -r "${archive}" ./bin/*
@ -41,7 +47,7 @@ jobs:
if: runner.os == 'Windows' if: runner.os == 'Windows'
shell: bash shell: bash
run: | run: |
go build -v -ldflags="-s -w" -o bin/ ./cmd/... go build -v -ldflags="-s -w" -o bin/ . ./cmd/...
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip" archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip" asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
powershell "Compress-Archive bin/* \"${archive}\"" powershell "Compress-Archive bin/* \"${archive}\""
@ -51,13 +57,14 @@ jobs:
- name: Build on MacOS - name: Build on MacOS
if: runner.os == 'macOS' if: runner.os == 'macOS'
run: | run: |
go build -v -ldflags="-s -w" -o ./bin/ ./cmd/... go build -v -ldflags="-s -w" -o ./bin/ . ./cmd/...
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip" archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip" asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
zip -r "${archive}" ./bin/* zip -r "${archive}" ./bin/*
echo "archive=${archive}" >> $GITHUB_ENV echo "archive=${archive}" >> $GITHUB_ENV
echo "asset_name=${asset_name}" >> $GITHUB_ENV echo "asset_name=${asset_name}" >> $GITHUB_ENV
- name: Upload release asset - name: Upload release asset
uses: actions/upload-release-asset@v1 uses: actions/upload-release-asset@v1
env: env:

View File

@ -11,18 +11,18 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
branch: [master, latest] branch: [ master, latest ]
name: Race detection on ${{ matrix.branch }} name: Race detection on ${{ matrix.branch }}
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: 1.23 go-version: 1.16
- name: Set scheduled branch name - name: Set scheduled branch name
shell: bash shell: bash

View File

@ -8,20 +8,22 @@ on:
types: [opened, synchronize, edited] types: [opened, synchronize, edited]
jobs: jobs:
build: build:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-latest, macos-latest] os: [ ubuntu-latest, macos-latest ]
name: Tests, ${{ matrix.os }} name: Tests, ${{ matrix.os }}
steps: steps:
- name: Fix CRLF on Windows - name: Fix CRLF on Windows
if: runner.os == 'Windows' if: runner.os == 'Windows'
run: git config --global core.autocrlf false run: git config --global core.autocrlf false
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v2
# Increase the pagefile size on Windows to aviod running out of memory # Increase the pagefile size on Windows to aviod running out of memory
- name: Increase pagefile size on Windows - name: Increase pagefile size on Windows
@ -29,13 +31,14 @@ jobs:
run: powershell -command .github\workflows\SetPageFileSize.ps1 run: powershell -command .github\workflows\SetPageFileSize.ps1
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: 1.23 go-version: 1.16
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules # Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
- name: Go Cache - name: Go Cache
uses: actions/cache@v4 uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@ -46,17 +49,19 @@ jobs:
shell: bash shell: bash
run: ./build_and_test.sh -v run: ./build_and_test.sh -v
stability-test-fast: stability-test-fast:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Fast stability tests, ${{ github.head_ref }} name: Fast stability tests, ${{ github.head_ref }}
steps: steps:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: 1.23 go-version: 1.16
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
@ -70,17 +75,18 @@ jobs:
working-directory: stability-tests working-directory: stability-tests
run: ./install_and_test.sh run: ./install_and_test.sh
coverage: coverage:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Produce code coverage name: Produce code coverage
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: 1.23 go-version: 1.16
- name: Delete the stability tests from coverage - name: Delete the stability tests from coverage
run: rm -r stability-tests run: rm -r stability-tests

1
.gitignore vendored
View File

@ -53,7 +53,6 @@ _testmain.go
debug debug
debug.test debug.test
__debug_bin __debug_bin
*__debug_*
# CI # CI
version.txt version.txt

View File

@ -1,43 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainers on this [Google form][gform]. The project maintainers will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project maintainers are obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[gform]: https://forms.gle/dnKXMJL7VxdUjt3x5
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

View File

@ -1,15 +1,13 @@
# DEPRECATED
The full node reference implementation was [rewritten in Rust](https://github.com/kaspanet/rusty-kaspa), as a result, the Go implementation is now deprecated. Kaspad
====
PLEASE NOTE: Any pull requests or issues that will be opened in this repository will be closed without treatment, except for issues or pull requests related to the kaspawallet, which remains maintained. In any other case, please use the [Rust implementation](https://github.com/kaspanet/rusty-kaspa) instead.
# Kaspad
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) [![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad)
Kaspad was the reference full node Kaspa implementation written in Go (golang). Kaspad is the reference full node Kaspa implementation written in Go (golang).
This project is currently under active development and is in Beta state.
## What is kaspa ## What is kaspa
@ -17,7 +15,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
## Requirements ## Requirements
Go 1.23 or later. Go 1.16 or later.
## Installation ## Installation
@ -44,6 +42,7 @@ $ go install . ./cmd/...
not already add the bin directory to your system path during Go installation, not already add the bin directory to your system path during Go installation,
you are encouraged to do so now. you are encouraged to do so now.
## Getting Started ## Getting Started
Kaspad has several configuration options available to tweak how it runs, but all Kaspad has several configuration options available to tweak how it runs, but all
@ -54,7 +53,6 @@ $ kaspad
``` ```
## Discord ## Discord
Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2 Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
## Issue Tracker ## Issue Tracker

View File

@ -6,7 +6,7 @@ supported kaspa messages to and from the appmessage. This package does not deal
with the specifics of message handling such as what to do when a message is with the specifics of message handling such as what to do when a message is
received. This provides the caller with a high level of flexibility. received. This provides the caller with a high level of flexibility.
# Kaspa Message Overview Kaspa Message Overview
The kaspa protocol consists of exchanging messages between peers. Each The kaspa protocol consists of exchanging messages between peers. Each
message is preceded by a header which identifies information about it such as message is preceded by a header which identifies information about it such as
@ -22,7 +22,7 @@ messages, all of the details of marshalling and unmarshalling to and from the
appmessage using kaspa encoding are handled so the caller doesn't have to concern appmessage using kaspa encoding are handled so the caller doesn't have to concern
themselves with the specifics. themselves with the specifics.
# Message Interaction Message Interaction
The following provides a quick summary of how the kaspa messages are intended The following provides a quick summary of how the kaspa messages are intended
to interact with one another. As stated above, these interactions are not to interact with one another. As stated above, these interactions are not
@ -45,13 +45,13 @@ interactions in no particular order.
notfound message (MsgNotFound) notfound message (MsgNotFound)
ping message (MsgPing) pong message (MsgPong) ping message (MsgPing) pong message (MsgPong)
# Common Parameters Common Parameters
There are several common parameters that arise when using this package to read There are several common parameters that arise when using this package to read
and write kaspa messages. The following sections provide a quick overview of and write kaspa messages. The following sections provide a quick overview of
these parameters so the next sections can build on them. these parameters so the next sections can build on them.
# Protocol Version Protocol Version
The protocol version should be negotiated with the remote peer at a higher The protocol version should be negotiated with the remote peer at a higher
level than this package via the version (MsgVersion) message exchange, however, level than this package via the version (MsgVersion) message exchange, however,
@ -60,7 +60,7 @@ latest protocol version this package supports and is typically the value to use
for all outbound connections before a potentially lower protocol version is for all outbound connections before a potentially lower protocol version is
negotiated. negotiated.
# Kaspa Network Kaspa Network
The kaspa network is a magic number which is used to identify the start of a The kaspa network is a magic number which is used to identify the start of a
message and which kaspa network the message applies to. This package provides message and which kaspa network the message applies to. This package provides
@ -71,7 +71,7 @@ the following constants:
appmessage.Simnet (Simulation test network) appmessage.Simnet (Simulation test network)
appmessage.Devnet (Development network) appmessage.Devnet (Development network)
# Determining Message Type Determining Message Type
As discussed in the kaspa message overview section, this package reads As discussed in the kaspa message overview section, this package reads
and writes kaspa messages using a generic interface named Message. In and writes kaspa messages using a generic interface named Message. In
@ -89,7 +89,7 @@ switch or type assertion. An example of a type switch follows:
fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount) fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount)
} }
# Reading Messages Reading Messages
In order to unmarshall kaspa messages from the appmessage, use the ReadMessage In order to unmarshall kaspa messages from the appmessage, use the ReadMessage
function. It accepts any io.Reader, but typically this will be a net.Conn to function. It accepts any io.Reader, but typically this will be a net.Conn to
@ -104,7 +104,7 @@ a remote node running a kaspa peer. Example syntax is:
// Log and handle the error // Log and handle the error
} }
# Writing Messages Writing Messages
In order to marshall kaspa messages to the appmessage, use the WriteMessage In order to marshall kaspa messages to the appmessage, use the WriteMessage
function. It accepts any io.Writer, but typically this will be a net.Conn to function. It accepts any io.Writer, but typically this will be a net.Conn to
@ -122,7 +122,7 @@ from a remote peer is:
// Log and handle the error // Log and handle the error
} }
# Errors Errors
Errors returned by this package are either the raw errors provided by underlying Errors returned by this package are either the raw errors provided by underlying
calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and

View File

@ -2,9 +2,8 @@ package appmessage
import ( import (
"encoding/hex" "encoding/hex"
"math/big"
"github.com/pkg/errors" "github.com/pkg/errors"
"math/big"
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader" "github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes" "github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
@ -219,8 +218,7 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
Outputs: outputs, Outputs: outputs,
LockTime: rpcTransaction.LockTime, LockTime: rpcTransaction.LockTime,
SubnetworkID: *subnetworkID, SubnetworkID: *subnetworkID,
Gas: rpcTransaction.Gas, Gas: rpcTransaction.LockTime,
MassCommitment: rpcTransaction.Mass,
Payload: payload, Payload: payload,
}, nil }, nil
} }
@ -288,8 +286,7 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
Outputs: outputs, Outputs: outputs,
LockTime: transaction.LockTime, LockTime: transaction.LockTime,
SubnetworkID: subnetworkID, SubnetworkID: subnetworkID,
Gas: transaction.Gas, Gas: transaction.LockTime,
Mass: transaction.MassCommitment,
Payload: payload, Payload: payload,
} }
} }

View File

@ -156,17 +156,6 @@ const (
CmdVirtualDaaScoreChangedNotificationMessage CmdVirtualDaaScoreChangedNotificationMessage
CmdGetBalancesByAddressesRequestMessage CmdGetBalancesByAddressesRequestMessage
CmdGetBalancesByAddressesResponseMessage CmdGetBalancesByAddressesResponseMessage
CmdNotifyNewBlockTemplateRequestMessage
CmdNotifyNewBlockTemplateResponseMessage
CmdNewBlockTemplateNotificationMessage
CmdGetMempoolEntriesByAddressesRequestMessage
CmdGetMempoolEntriesByAddressesResponseMessage
CmdGetCoinSupplyRequestMessage
CmdGetCoinSupplyResponseMessage
CmdGetFeeEstimateRequestMessage
CmdGetFeeEstimateResponseMessage
CmdSubmitTransactionReplacementRequestMessage
CmdSubmitTransactionReplacementResponseMessage
) )
// ProtocolMessageCommandToString maps all MessageCommands to their string representation // ProtocolMessageCommandToString maps all MessageCommands to their string representation
@ -297,17 +286,6 @@ var RPCMessageCommandToString = map[MessageCommand]string{
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification", CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest", CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse", CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest",
CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse",
CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification",
CmdGetMempoolEntriesByAddressesRequestMessage: "GetMempoolEntriesByAddressesRequest",
CmdGetMempoolEntriesByAddressesResponseMessage: "GetMempoolEntriesByAddressesResponse",
CmdGetCoinSupplyRequestMessage: "GetCoinSupplyRequest",
CmdGetCoinSupplyResponseMessage: "GetCoinSupplyResponse",
CmdGetFeeEstimateRequestMessage: "GetFeeEstimateRequest",
CmdGetFeeEstimateResponseMessage: "GetFeeEstimateResponse",
CmdSubmitTransactionReplacementRequestMessage: "SubmitTransactionReplacementRequest",
CmdSubmitTransactionReplacementResponseMessage: "SubmitTransactionReplacementResponse",
} }
// Message is an interface that describes a kaspa message. A type that // Message is an interface that describes a kaspa message. A type that

View File

@ -132,7 +132,7 @@ func TestConvertToPartial(t *testing.T) {
} }
} }
// blockOne is the first block in the mainnet block DAG. //blockOne is the first block in the mainnet block DAG.
var blockOne = MsgBlock{ var blockOne = MsgBlock{
Header: MsgBlockHeader{ Header: MsgBlockHeader{
Version: 0, Version: 0,

View File

@ -133,8 +133,8 @@ func TestTx(t *testing.T) {
// TestTxHash tests the ability to generate the hash of a transaction accurately. // TestTxHash tests the ability to generate the hash of a transaction accurately.
func TestTxHashAndID(t *testing.T) { func TestTxHashAndID(t *testing.T) {
txHash1Str := "b06f8b650115b5cf4d59499e10764a9312742930cb43c9b4ff6495d76f332ed7" txHash1Str := "93663e597f6c968d32d229002f76408edf30d6a0151ff679fc729812d8cb2acc"
txID1Str := "e20225c3d065ee41743607ee627db44d01ef396dc9779b05b2caf55bac50e12d" txID1Str := "24079c6d2bdf602fc389cc307349054937744a9c8dc0f07c023e6af0e949a4e7"
wantTxID1, err := transactionid.FromString(txID1Str) wantTxID1, err := transactionid.FromString(txID1Str)
if err != nil { if err != nil {
t.Fatalf("NewTxIDFromStr: %v", err) t.Fatalf("NewTxIDFromStr: %v", err)
@ -185,7 +185,7 @@ func TestTxHashAndID(t *testing.T) {
spew.Sprint(tx1ID), spew.Sprint(wantTxID1)) spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
} }
hash2Str := "fa16a8ce88d52ca1ff45187bbba0d33044e9f5fe309e8d0b22d4812dcf1782b7" hash2Str := "8dafd1bec24527d8e3b443ceb0a3b92fffc0d60026317f890b2faf5e9afc177a"
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str) wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
if err != nil { if err != nil {
t.Errorf("NewTxIDFromStr: %v", err) t.Errorf("NewTxIDFromStr: %v", err)

View File

@ -1,47 +0,0 @@
package appmessage
// GetFeeEstimateRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetFeeEstimateRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *GetFeeEstimateRequestMessage) Command() MessageCommand {
return CmdGetFeeEstimateRequestMessage
}
// NewGetFeeEstimateRequestMessage returns a instance of the message
func NewGetFeeEstimateRequestMessage() *GetFeeEstimateRequestMessage {
return &GetFeeEstimateRequestMessage{}
}
type RPCFeeRateBucket struct {
Feerate float64
EstimatedSeconds float64
}
type RPCFeeEstimate struct {
PriorityBucket RPCFeeRateBucket
NormalBuckets []RPCFeeRateBucket
LowBuckets []RPCFeeRateBucket
}
// GetCoinSupplyResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetFeeEstimateResponseMessage struct {
baseMessage
Estimate RPCFeeEstimate
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetFeeEstimateResponseMessage) Command() MessageCommand {
return CmdGetFeeEstimateResponseMessage
}
// NewGetFeeEstimateResponseMessage returns a instance of the message
func NewGetFeeEstimateResponseMessage() *GetFeeEstimateResponseMessage {
return &GetFeeEstimateResponseMessage{}
}

View File

@ -5,7 +5,6 @@ package appmessage
type GetBlockTemplateRequestMessage struct { type GetBlockTemplateRequestMessage struct {
baseMessage baseMessage
PayAddress string PayAddress string
ExtraData string
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -14,10 +13,9 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
} }
// NewGetBlockTemplateRequestMessage returns a instance of the message // NewGetBlockTemplateRequestMessage returns a instance of the message
func NewGetBlockTemplateRequestMessage(payAddress, extraData string) *GetBlockTemplateRequestMessage { func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
return &GetBlockTemplateRequestMessage{ return &GetBlockTemplateRequestMessage{
PayAddress: payAddress, PayAddress: payAddress,
ExtraData: extraData,
} }
} }

View File

@ -1,40 +0,0 @@
package appmessage
// GetCoinSupplyRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetCoinSupplyRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *GetCoinSupplyRequestMessage) Command() MessageCommand {
return CmdGetCoinSupplyRequestMessage
}
// NewGetCoinSupplyRequestMessage returns a instance of the message
func NewGetCoinSupplyRequestMessage() *GetCoinSupplyRequestMessage {
return &GetCoinSupplyRequestMessage{}
}
// GetCoinSupplyResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetCoinSupplyResponseMessage struct {
baseMessage
MaxSompi uint64
CirculatingSompi uint64
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetCoinSupplyResponseMessage) Command() MessageCommand {
return CmdGetCoinSupplyResponseMessage
}
// NewGetCoinSupplyResponseMessage returns a instance of the message
func NewGetCoinSupplyResponseMessage(maxSompi uint64, circulatingSompi uint64) *GetCoinSupplyResponseMessage {
return &GetCoinSupplyResponseMessage{
MaxSompi: maxSompi,
CirculatingSompi: circulatingSompi,
}
}

View File

@ -23,8 +23,6 @@ type GetInfoResponseMessage struct {
P2PID string P2PID string
MempoolSize uint64 MempoolSize uint64
ServerVersion string ServerVersion string
IsUtxoIndexed bool
IsSynced bool
Error *RPCError Error *RPCError
} }
@ -35,12 +33,10 @@ func (msg *GetInfoResponseMessage) Command() MessageCommand {
} }
// NewGetInfoResponseMessage returns a instance of the message // NewGetInfoResponseMessage returns a instance of the message
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string, isUtxoIndexed bool, isSynced bool) *GetInfoResponseMessage { func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string) *GetInfoResponseMessage {
return &GetInfoResponseMessage{ return &GetInfoResponseMessage{
P2PID: p2pID, P2PID: p2pID,
MempoolSize: mempoolSize, MempoolSize: mempoolSize,
ServerVersion: serverVersion, ServerVersion: serverVersion,
IsUtxoIndexed: isUtxoIndexed,
IsSynced: isSynced,
} }
} }

View File

@ -4,8 +4,6 @@ package appmessage
// its respective RPC message // its respective RPC message
type GetMempoolEntriesRequestMessage struct { type GetMempoolEntriesRequestMessage struct {
baseMessage baseMessage
IncludeOrphanPool bool
FilterTransactionPool bool
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -14,11 +12,8 @@ func (msg *GetMempoolEntriesRequestMessage) Command() MessageCommand {
} }
// NewGetMempoolEntriesRequestMessage returns a instance of the message // NewGetMempoolEntriesRequestMessage returns a instance of the message
func NewGetMempoolEntriesRequestMessage(includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesRequestMessage { func NewGetMempoolEntriesRequestMessage() *GetMempoolEntriesRequestMessage {
return &GetMempoolEntriesRequestMessage{ return &GetMempoolEntriesRequestMessage{}
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
} }
// GetMempoolEntriesResponseMessage is an appmessage corresponding to // GetMempoolEntriesResponseMessage is an appmessage corresponding to

View File

@ -1,52 +0,0 @@
package appmessage
// MempoolEntryByAddress represents MempoolEntries associated with some address
type MempoolEntryByAddress struct {
Address string
Receiving []*MempoolEntry
Sending []*MempoolEntry
}
// GetMempoolEntriesByAddressesRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetMempoolEntriesByAddressesRequestMessage struct {
baseMessage
Addresses []string
IncludeOrphanPool bool
FilterTransactionPool bool
}
// Command returns the protocol command string for the message
func (msg *GetMempoolEntriesByAddressesRequestMessage) Command() MessageCommand {
return CmdGetMempoolEntriesByAddressesRequestMessage
}
// NewGetMempoolEntriesByAddressesRequestMessage returns a instance of the message
func NewGetMempoolEntriesByAddressesRequestMessage(addresses []string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesByAddressesRequestMessage {
return &GetMempoolEntriesByAddressesRequestMessage{
Addresses: addresses,
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
}
// GetMempoolEntriesByAddressesResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetMempoolEntriesByAddressesResponseMessage struct {
baseMessage
Entries []*MempoolEntryByAddress
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetMempoolEntriesByAddressesResponseMessage) Command() MessageCommand {
return CmdGetMempoolEntriesByAddressesResponseMessage
}
// NewGetMempoolEntriesByAddressesResponseMessage returns a instance of the message
func NewGetMempoolEntriesByAddressesResponseMessage(entries []*MempoolEntryByAddress) *GetMempoolEntriesByAddressesResponseMessage {
return &GetMempoolEntriesByAddressesResponseMessage{
Entries: entries,
}
}

View File

@ -5,8 +5,6 @@ package appmessage
type GetMempoolEntryRequestMessage struct { type GetMempoolEntryRequestMessage struct {
baseMessage baseMessage
TxID string TxID string
IncludeOrphanPool bool
FilterTransactionPool bool
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -15,12 +13,8 @@ func (msg *GetMempoolEntryRequestMessage) Command() MessageCommand {
} }
// NewGetMempoolEntryRequestMessage returns a instance of the message // NewGetMempoolEntryRequestMessage returns a instance of the message
func NewGetMempoolEntryRequestMessage(txID string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntryRequestMessage { func NewGetMempoolEntryRequestMessage(txID string) *GetMempoolEntryRequestMessage {
return &GetMempoolEntryRequestMessage{ return &GetMempoolEntryRequestMessage{TxID: txID}
TxID: txID,
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
} }
// GetMempoolEntryResponseMessage is an appmessage corresponding to // GetMempoolEntryResponseMessage is an appmessage corresponding to
@ -36,7 +30,6 @@ type GetMempoolEntryResponseMessage struct {
type MempoolEntry struct { type MempoolEntry struct {
Fee uint64 Fee uint64
Transaction *RPCTransaction Transaction *RPCTransaction
IsOrphan bool
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -45,12 +38,11 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
} }
// NewGetMempoolEntryResponseMessage returns a instance of the message // NewGetMempoolEntryResponseMessage returns a instance of the message
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction, isOrphan bool) *GetMempoolEntryResponseMessage { func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
return &GetMempoolEntryResponseMessage{ return &GetMempoolEntryResponseMessage{
Entry: &MempoolEntry{ Entry: &MempoolEntry{
Fee: fee, Fee: fee,
Transaction: transaction, Transaction: transaction,
IsOrphan: isOrphan,
}, },
} }
} }

View File

@ -5,7 +5,6 @@ package appmessage
type GetVirtualSelectedParentChainFromBlockRequestMessage struct { type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
baseMessage baseMessage
StartHash string StartHash string
IncludeAcceptedTransactionIDs bool
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -14,29 +13,18 @@ func (msg *GetVirtualSelectedParentChainFromBlockRequestMessage) Command() Messa
} }
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message // NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
func NewGetVirtualSelectedParentChainFromBlockRequestMessage( func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *GetVirtualSelectedParentChainFromBlockRequestMessage {
startHash string, includeAcceptedTransactionIDs bool) *GetVirtualSelectedParentChainFromBlockRequestMessage {
return &GetVirtualSelectedParentChainFromBlockRequestMessage{ return &GetVirtualSelectedParentChainFromBlockRequestMessage{
StartHash: startHash, StartHash: startHash,
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
} }
} }
// AcceptedTransactionIDs is a part of the GetVirtualSelectedParentChainFromBlockResponseMessage and
// VirtualSelectedParentChainChangedNotificationMessage appmessages
type AcceptedTransactionIDs struct {
AcceptingBlockHash string
AcceptedTransactionIDs []string
}
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to // GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
// its respective RPC message // its respective RPC message
type GetVirtualSelectedParentChainFromBlockResponseMessage struct { type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
baseMessage baseMessage
RemovedChainBlockHashes []string RemovedChainBlockHashes []string
AddedChainBlockHashes []string AddedChainBlockHashes []string
AcceptedTransactionIDs []*AcceptedTransactionIDs
Error *RPCError Error *RPCError
} }
@ -48,11 +36,10 @@ func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() Mess
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message // NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes, func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes,
addedChainBlockHashes []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *GetVirtualSelectedParentChainFromBlockResponseMessage { addedChainBlockHashes []string) *GetVirtualSelectedParentChainFromBlockResponseMessage {
return &GetVirtualSelectedParentChainFromBlockResponseMessage{ return &GetVirtualSelectedParentChainFromBlockResponseMessage{
RemovedChainBlockHashes: removedChainBlockHashes, RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlockHashes: addedChainBlockHashes, AddedChainBlockHashes: addedChainBlockHashes,
AcceptedTransactionIDs: acceptedTransactionIDs,
} }
} }

View File

@ -1,50 +0,0 @@
package appmessage
// NotifyNewBlockTemplateRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyNewBlockTemplateRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NotifyNewBlockTemplateRequestMessage) Command() MessageCommand {
return CmdNotifyNewBlockTemplateRequestMessage
}
// NewNotifyNewBlockTemplateRequestMessage returns an instance of the message
func NewNotifyNewBlockTemplateRequestMessage() *NotifyNewBlockTemplateRequestMessage {
return &NotifyNewBlockTemplateRequestMessage{}
}
// NotifyNewBlockTemplateResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyNewBlockTemplateResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyNewBlockTemplateResponseMessage) Command() MessageCommand {
return CmdNotifyNewBlockTemplateResponseMessage
}
// NewNotifyNewBlockTemplateResponseMessage returns an instance of the message
func NewNotifyNewBlockTemplateResponseMessage() *NotifyNewBlockTemplateResponseMessage {
return &NotifyNewBlockTemplateResponseMessage{}
}
// NewBlockTemplateNotificationMessage is an appmessage corresponding to
// its respective RPC message
type NewBlockTemplateNotificationMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NewBlockTemplateNotificationMessage) Command() MessageCommand {
return CmdNewBlockTemplateNotificationMessage
}
// NewNewBlockTemplateNotificationMessage returns an instance of the message
func NewNewBlockTemplateNotificationMessage() *NewBlockTemplateNotificationMessage {
return &NewBlockTemplateNotificationMessage{}
}

View File

@ -4,7 +4,6 @@ package appmessage
// its respective RPC message // its respective RPC message
type NotifyVirtualSelectedParentChainChangedRequestMessage struct { type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
baseMessage baseMessage
IncludeAcceptedTransactionIDs bool
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -12,13 +11,9 @@ func (msg *NotifyVirtualSelectedParentChainChangedRequestMessage) Command() Mess
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
} }
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns an instance of the message // NewNotifyVirtualSelectedParentChainChangedRequestMessage returns a instance of the message
func NewNotifyVirtualSelectedParentChainChangedRequestMessage( func NewNotifyVirtualSelectedParentChainChangedRequestMessage() *NotifyVirtualSelectedParentChainChangedRequestMessage {
includeAcceptedTransactionIDs bool) *NotifyVirtualSelectedParentChainChangedRequestMessage { return &NotifyVirtualSelectedParentChainChangedRequestMessage{}
return &NotifyVirtualSelectedParentChainChangedRequestMessage{
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
}
} }
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to // NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
@ -44,7 +39,6 @@ type VirtualSelectedParentChainChangedNotificationMessage struct {
baseMessage baseMessage
RemovedChainBlockHashes []string RemovedChainBlockHashes []string
AddedChainBlockHashes []string AddedChainBlockHashes []string
AcceptedTransactionIDs []*AcceptedTransactionIDs
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -54,11 +48,10 @@ func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() Messa
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message // NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes, func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes,
addedChainBlocks []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *VirtualSelectedParentChainChangedNotificationMessage { addedChainBlocks []string) *VirtualSelectedParentChainChangedNotificationMessage {
return &VirtualSelectedParentChainChangedNotificationMessage{ return &VirtualSelectedParentChainChangedNotificationMessage{
RemovedChainBlockHashes: removedChainBlockHashes, RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlockHashes: addedChainBlocks, AddedChainBlockHashes: addedChainBlocks,
AcceptedTransactionIDs: acceptedTransactionIDs,
} }
} }

View File

@ -52,7 +52,6 @@ type RPCTransaction struct {
SubnetworkID string SubnetworkID string
Gas uint64 Gas uint64
Payload string Payload string
Mass uint64
VerboseData *RPCTransactionVerboseData VerboseData *RPCTransactionVerboseData
} }

View File

@ -1,42 +0,0 @@
package appmessage
// SubmitTransactionReplacementRequestMessage is an appmessage corresponding to
// its respective RPC message
type SubmitTransactionReplacementRequestMessage struct {
baseMessage
Transaction *RPCTransaction
}
// Command returns the protocol command string for the message
func (msg *SubmitTransactionReplacementRequestMessage) Command() MessageCommand {
return CmdSubmitTransactionReplacementRequestMessage
}
// NewSubmitTransactionReplacementRequestMessage returns a instance of the message
func NewSubmitTransactionReplacementRequestMessage(transaction *RPCTransaction) *SubmitTransactionReplacementRequestMessage {
return &SubmitTransactionReplacementRequestMessage{
Transaction: transaction,
}
}
// SubmitTransactionReplacementResponseMessage is an appmessage corresponding to
// its respective RPC message
type SubmitTransactionReplacementResponseMessage struct {
baseMessage
TransactionID string
ReplacedTransaction *RPCTransaction
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *SubmitTransactionReplacementResponseMessage) Command() MessageCommand {
return CmdSubmitTransactionReplacementResponseMessage
}
// NewSubmitTransactionReplacementResponseMessage returns a instance of the message
func NewSubmitTransactionReplacementResponseMessage(transactionID string) *SubmitTransactionReplacementResponseMessage {
return &SubmitTransactionReplacementResponseMessage{
TransactionID: transactionID,
}
}

View File

@ -4,8 +4,6 @@ import (
"fmt" "fmt"
"sync/atomic" "sync/atomic"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool" "github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/app/protocol" "github.com/kaspanet/kaspad/app/protocol"
@ -69,7 +67,6 @@ func (a *ComponentManager) Stop() {
} }
a.protocolManager.Close() a.protocolManager.Close()
close(a.protocolManager.Context().Domain().ConsensusEventsChannel())
return return
} }
@ -121,7 +118,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
if err != nil { if err != nil {
return nil, err return nil, err
} }
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, domain.ConsensusEventsChannel(), interrupt) rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, interrupt)
return &ComponentManager{ return &ComponentManager{
cfg: cfg, cfg: cfg,
@ -142,7 +139,6 @@ func setupRPC(
connectionManager *connmanager.ConnectionManager, connectionManager *connmanager.ConnectionManager,
addressManager *addressmanager.AddressManager, addressManager *addressmanager.AddressManager,
utxoIndex *utxoindex.UTXOIndex, utxoIndex *utxoindex.UTXOIndex,
consensusEventsChan chan externalapi.ConsensusEvent,
shutDownChan chan<- struct{}, shutDownChan chan<- struct{},
) *rpc.Manager { ) *rpc.Manager {
@ -154,10 +150,10 @@ func setupRPC(
connectionManager, connectionManager,
addressManager, addressManager,
utxoIndex, utxoIndex,
consensusEventsChan,
shutDownChan, shutDownChan,
) )
protocolManager.SetOnNewBlockTemplateHandler(rpcManager.NotifyNewBlockTemplate) protocolManager.SetOnVirtualChange(rpcManager.NotifyVirtualChange)
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride) protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
return rpcManager return rpcManager

View File

@ -16,42 +16,53 @@ import (
// OnNewBlock updates the mempool after a new block arrival, and // OnNewBlock updates the mempool after a new block arrival, and
// relays newly unorphaned transactions and possibly rebroadcast // relays newly unorphaned transactions and possibly rebroadcast
// manually added transactions when not in IBD. // manually added transactions when not in IBD.
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock) error { func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
virtualChangeSet *externalapi.VirtualChangeSet) error {
hash := consensushashing.BlockHash(block) hash := consensushashing.BlockHash(block)
log.Tracef("OnNewBlock start for block %s", hash) log.Debugf("OnNewBlock start for block %s", hash)
defer log.Tracef("OnNewBlock end for block %s", hash) defer log.Debugf("OnNewBlock end for block %s", hash)
unorphanedBlocks, err := f.UnorphanBlocks(block) unorphaningResults, err := f.UnorphanBlocks(block)
if err != nil { if err != nil {
return err return err
} }
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphanedBlocks)) log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
newBlocks := []*externalapi.DomainBlock{block} newBlocks := []*externalapi.DomainBlock{block}
newBlocks = append(newBlocks, unorphanedBlocks...) newVirtualChangeSets := []*externalapi.VirtualChangeSet{virtualChangeSet}
for _, unorphaningResult := range unorphaningResults {
newBlocks = append(newBlocks, unorphaningResult.block)
newVirtualChangeSets = append(newVirtualChangeSets, unorphaningResult.virtualChangeSet)
}
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0) allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
for _, newBlock := range newBlocks { for i, newBlock := range newBlocks {
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash) log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions) acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
if err != nil { if err != nil {
return err return err
} }
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...) allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
if f.onBlockAddedToDAGHandler != nil {
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
virtualChangeSet = newVirtualChangeSets[i]
err := f.onBlockAddedToDAGHandler(newBlock, virtualChangeSet)
if err != nil {
return err
}
}
} }
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions) return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
} }
// OnNewBlockTemplate calls the handler function whenever a new block template is available for miners. // OnVirtualChange calls the handler function whenever the virtual block changes.
func (f *FlowContext) OnNewBlockTemplate() error { func (f *FlowContext) OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
// Clear current template cache. Note we call this even if the handler is nil, in order to keep the if f.onVirtualChangeHandler != nil && virtualChangeSet != nil {
// state consistent without dependency on external event registration return f.onVirtualChangeHandler(virtualChangeSet)
f.Domain().MiningManager().ClearBlockTemplate()
if f.onNewBlockTemplateHandler != nil {
return f.onNewBlockTemplateHandler()
} }
return nil return nil
@ -107,18 +118,14 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
return protocolerrors.Errorf(false, "cannot add header only block") return protocolerrors.Errorf(false, "cannot add header only block")
} }
err := f.Domain().Consensus().ValidateAndInsertBlock(block, true) virtualChangeSet, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
if err != nil { if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) { if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err) log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
} }
return err return err
} }
err = f.OnNewBlockTemplate() err = f.OnNewBlock(block, virtualChangeSet)
if err != nil {
return err
}
err = f.OnNewBlock(block)
if err != nil { if err != nil {
return err return err
} }

View File

@ -18,8 +18,12 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
) )
// OnNewBlockTemplateHandler is a handler function that's triggered when a new block template is available // OnBlockAddedToDAGHandler is a handler function that's triggered
type OnNewBlockTemplateHandler func() error // when a block is added to the DAG
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
// OnVirtualChangeHandler is a handler function that's triggered when the virtual changes
type OnVirtualChangeHandler func(virtualChangeSet *externalapi.VirtualChangeSet) error
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set // OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
// resets due to pruning point change via IBD. // resets due to pruning point change via IBD.
@ -40,7 +44,8 @@ type FlowContext struct {
timeStarted int64 timeStarted int64
onNewBlockTemplateHandler OnNewBlockTemplateHandler onVirtualChangeHandler OnVirtualChangeHandler
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
@ -97,14 +102,14 @@ func (f *FlowContext) ShutdownChan() <-chan struct{} {
return f.shutdownChan return f.shutdownChan
} }
// IsNearlySynced returns whether current consensus is considered synced or close to being synced. // SetOnVirtualChangeHandler sets the onVirtualChangeHandler handler
func (f *FlowContext) IsNearlySynced() (bool, error) { func (f *FlowContext) SetOnVirtualChangeHandler(onVirtualChangeHandler OnVirtualChangeHandler) {
return f.Domain().Consensus().IsNearlySynced() f.onVirtualChangeHandler = onVirtualChangeHandler
} }
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplateHandler handler // SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
func (f *FlowContext) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler OnNewBlockTemplateHandler) { func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
f.onNewBlockTemplateHandler = onNewBlockTemplateHandler f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
} }
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler // SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler

View File

@ -72,10 +72,3 @@ func (f *FlowContext) Peers() []*peerpkg.Peer {
} }
return peers return peers
} }
// HasPeers returns whether there are currently active peers
func (f *FlowContext) HasPeers() bool {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
return len(f.peers) > 0
}

View File

@ -15,6 +15,12 @@ import (
// on: 2^orphanResolutionRange * PHANTOM K. // on: 2^orphanResolutionRange * PHANTOM K.
const maxOrphans = 600 const maxOrphans = 600
// UnorphaningResult is the result of unorphaning a block
type UnorphaningResult struct {
block *externalapi.DomainBlock
virtualChangeSet *externalapi.VirtualChangeSet
}
// AddOrphan adds the block to the orphan set // AddOrphan adds the block to the orphan set
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) { func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
f.orphansMutex.Lock() f.orphansMutex.Lock()
@ -51,7 +57,7 @@ func (f *FlowContext) IsOrphan(blockHash *externalapi.DomainHash) bool {
} }
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore. // UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*externalapi.DomainBlock, error) { func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*UnorphaningResult, error) {
f.orphansMutex.Lock() f.orphansMutex.Lock()
defer f.orphansMutex.Unlock() defer f.orphansMutex.Unlock()
@ -60,7 +66,7 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*ext
rootBlockHash := consensushashing.BlockHash(rootBlock) rootBlockHash := consensushashing.BlockHash(rootBlock)
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{}) processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
var unorphanedBlocks []*externalapi.DomainBlock var unorphaningResults []*UnorphaningResult
for len(processQueue) > 0 { for len(processQueue) > 0 {
var orphanHash externalapi.DomainHash var orphanHash externalapi.DomainHash
orphanHash, processQueue = processQueue[0], processQueue[1:] orphanHash, processQueue = processQueue[0], processQueue[1:]
@ -84,18 +90,21 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*ext
} }
} }
if canBeUnorphaned { if canBeUnorphaned {
unorphaningSucceeded, err := f.unorphanBlock(orphanHash) virtualChangeSet, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if unorphaningSucceeded { if unorphaningSucceeded {
unorphanedBlocks = append(unorphanedBlocks, orphanBlock) unorphaningResults = append(unorphaningResults, &UnorphaningResult{
block: orphanBlock,
virtualChangeSet: virtualChangeSet,
})
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue) processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
} }
} }
} }
return unorphanedBlocks, nil return unorphaningResults, nil
} }
// addChildOrphansToProcessQueue finds all child orphans of `blockHash` // addChildOrphansToProcessQueue finds all child orphans of `blockHash`
@ -134,24 +143,24 @@ func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash)
return childOrphans return childOrphans
} }
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (bool, error) { func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.VirtualChangeSet, bool, error) {
orphanBlock, ok := f.orphans[orphanHash] orphanBlock, ok := f.orphans[orphanHash]
if !ok { if !ok {
return false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash) return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
} }
delete(f.orphans, orphanHash) delete(f.orphans, orphanHash)
err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true) virtualChangeSet, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
if err != nil { if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) { if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err) log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
return false, nil return nil, false, nil
} }
return false, err return nil, false, err
} }
log.Infof("Unorphaned block %s", orphanHash) log.Infof("Unorphaned block %s", orphanHash)
return true, nil return virtualChangeSet, true, nil
} }
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan // GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan

View File

@ -0,0 +1,47 @@
package flowcontext
import "github.com/kaspanet/kaspad/util/mstime"
const (
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
)
// ShouldMine returns whether it's ok to use block template from this node
// for mining purposes.
func (f *FlowContext) ShouldMine() (bool, error) {
peers := f.Peers()
if len(peers) == 0 {
log.Debugf("The node is not connected, so ShouldMine returns false")
return false, nil
}
if f.IsIBDRunning() {
log.Debugf("IBD is running, so ShouldMine returns false")
return false, nil
}
virtualSelectedParent, err := f.domain.Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
if virtualSelectedParent.Equal(f.Config().NetParams().GenesisHash) {
return false, nil
}
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
if err != nil {
return false, err
}
now := mstime.Now().UnixMilliseconds()
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
virtualSelectedParentHeader.TimeInMilliseconds())
return true, nil
}
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
virtualSelectedParentHeader.TimeInMilliseconds())
return false, nil
}

View File

@ -18,7 +18,7 @@ var (
// minAcceptableProtocolVersion is the lowest protocol version that a // minAcceptableProtocolVersion is the lowest protocol version that a
// connected peer may support. // connected peer may support.
minAcceptableProtocolVersion = uint32(5) minAcceptableProtocolVersion = uint32(4)
maxAcceptableProtocolVersion = uint32(5) maxAcceptableProtocolVersion = uint32(5)
) )

View File

@ -0,0 +1,39 @@
package addressexchange
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// ReceiveAddressesContext is the interface for the context needed for the ReceiveAddresses flow.
type ReceiveAddressesContext interface {
AddressManager() *addressmanager.AddressManager
}
// ReceiveAddresses asks a peer for more addresses if needed.
func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
subnetworkID := peer.SubnetworkID()
msgGetAddresses := appmessage.NewMsgRequestAddresses(false, subnetworkID)
err := outgoingRoute.Enqueue(msgGetAddresses)
if err != nil {
return err
}
message, err := incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
msgAddresses := message.(*appmessage.MsgAddresses)
if len(msgAddresses.AddressList) > addressmanager.GetAddressesMax {
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
}
return context.AddressManager().AddAddresses(msgAddresses.AddressList...)
}

View File

@ -0,0 +1,52 @@
package addressexchange
import (
"math/rand"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// SendAddressesContext is the interface for the context needed for the SendAddresses flow.
type SendAddressesContext interface {
AddressManager() *addressmanager.AddressManager
}
// SendAddresses sends addresses to a peer that requests it.
func SendAddresses(context SendAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
for {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
addresses := context.AddressManager().Addresses()
msgAddresses := appmessage.NewMsgAddresses(shuffleAddresses(addresses))
err = outgoingRoute.Enqueue(msgAddresses)
if err != nil {
return err
}
}
}
// shuffleAddresses randomizes the given addresses sent if there are more than the maximum allowed in one message.
func shuffleAddresses(addresses []*appmessage.NetAddress) []*appmessage.NetAddress {
addressCount := len(addresses)
if addressCount < appmessage.MaxAddressesPerMsg {
return addresses
}
shuffleAddresses := make([]*appmessage.NetAddress, addressCount)
copy(shuffleAddresses, addresses)
rand.Shuffle(addressCount, func(i, j int) {
shuffleAddresses[i], shuffleAddresses[j] = shuffleAddresses[j], shuffleAddresses[i]
})
// Truncate it to the maximum size.
shuffleAddresses = shuffleAddresses[:appmessage.MaxAddressesPerMsg]
return shuffleAddresses
}

View File

@ -0,0 +1,16 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"testing"
)
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
if ibdBatchSize > router.DefaultMaxMessages {
t.Fatalf("IBD batch size (%d) must be smaller than or equal to router.DefaultMaxMessages (%d)",
ibdBatchSize, router.DefaultMaxMessages)
}
}

View File

@ -0,0 +1,33 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.DomainHash, limit uint32) error {
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, limit)
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
}
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
switch message := message.(type) {
case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlockLocator:
return message.BlockLocatorHashes, nil
default:
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
}
}
}

View File

@ -0,0 +1,86 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleIBDBlockLocatorContext is the interface for the context needed for the HandleIBDBlockLocator flow.
type HandleIBDBlockLocatorContext interface {
Domain() domain.Domain
}
// HandleIBDBlockLocator listens to appmessage.MsgIBDBlockLocator messages and sends
// the highest known block that's in the selected parent chain of `targetHash` to the
// requesting peer.
func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peer.Peer) error {
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
ibdBlockLocatorMessage := message.(*appmessage.MsgIBDBlockLocator)
targetHash := ibdBlockLocatorMessage.TargetHash
log.Debugf("Received IBDBlockLocator from %s with targetHash %s", peer, targetHash)
blockInfo, err := context.Domain().Consensus().GetBlockInfo(targetHash)
if err != nil {
return err
}
if !blockInfo.Exists {
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
"with an unknown targetHash %s", targetHash)
}
foundHighestHashInTheSelectedParentChainOfTargetHash := false
for _, blockLocatorHash := range ibdBlockLocatorMessage.BlockLocatorHashes {
blockInfo, err := context.Domain().Consensus().GetBlockInfo(blockLocatorHash)
if err != nil {
return err
}
// The IBD block locator is checking only existing blocks with bodies.
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
continue
}
isBlockLocatorHashInSelectedParentChainOfHighHash, err :=
context.Domain().Consensus().IsInSelectedParentChainOf(blockLocatorHash, targetHash)
if err != nil {
return err
}
if !isBlockLocatorHashInSelectedParentChainOfHighHash {
continue
}
foundHighestHashInTheSelectedParentChainOfTargetHash = true
log.Debugf("Found a known hash %s amongst peer %s's "+
"blockLocator that's in the selected parent chain of targetHash %s", blockLocatorHash, peer, targetHash)
ibdBlockLocatorHighestHashMessage := appmessage.NewMsgIBDBlockLocatorHighestHash(blockLocatorHash)
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashMessage)
if err != nil {
return err
}
break
}
if !foundHighestHashInTheSelectedParentChainOfTargetHash {
log.Warnf("no hash was found in the blockLocator "+
"that was in the selected parent chain of targetHash %s", targetHash)
ibdBlockLocatorHighestHashNotFoundMessage := appmessage.NewMsgIBDBlockLocatorHighestHashNotFound()
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashNotFoundMessage)
if err != nil {
return err
}
}
}
}

View File

@ -0,0 +1,54 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// HandleIBDBlockRequestsContext is the interface for the context needed for the HandleIBDBlockRequests flow.
type HandleIBDBlockRequestsContext interface {
Domain() domain.Domain
}
// HandleIBDBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
// their corresponding blocks to the requesting peer.
func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route) error {
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
msgRequestIBDBlocks := message.(*appmessage.MsgRequestIBDBlocks)
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
for i, hash := range msgRequestIBDBlocks.Hashes {
// Fetch the block from the database.
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed
blockMessage := appmessage.DomainBlockToMsgBlock(block)
ibdBlockMessage := appmessage.NewMsgIBDBlock(blockMessage)
err = outgoingRoute.Enqueue(ibdBlockMessage)
if err != nil {
return err
}
log.Debugf("sent %d out of %d", i+1, len(msgRequestIBDBlocks.Hashes))
}
}
}

View File

@ -0,0 +1,145 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"sync/atomic"
)
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
type PruningPointAndItsAnticoneRequestsContext interface {
Domain() domain.Domain
Config() *config.Config
}
var isBusy uint32
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
// the pruning point and its anticone to the requesting peer.
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
err := func() error {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
}
defer atomic.StoreUint32(&isBusy, 0)
log.Debugf("Got request for pruning point and its anticone from %s", peer)
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
if err != nil {
return err
}
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
for i, header := range pruningPointHeaders {
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
}
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
if err != nil {
return err
}
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
if err != nil {
return err
}
windowSize := context.Config().NetParams().DifficultyAdjustmentWindowSize
daaWindowBlocks := make([]*externalapi.TrustedDataDataDAAHeader, 0, windowSize)
daaWindowHashesToIndex := make(map[externalapi.DomainHash]int, windowSize)
trustedDataDAABlockIndexes := make(map[externalapi.DomainHash][]uint64)
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, 0)
ghostdagDataHashToIndex := make(map[externalapi.DomainHash]int)
trustedDataGHOSTDAGDataIndexes := make(map[externalapi.DomainHash][]uint64)
for _, blockHash := range pointAndItsAnticone {
blockDAAWindowHashes, err := context.Domain().Consensus().BlockDAAWindowHashes(blockHash)
if err != nil {
return err
}
trustedDataDAABlockIndexes[*blockHash] = make([]uint64, 0, windowSize)
for i, daaBlockHash := range blockDAAWindowHashes {
index, exists := daaWindowHashesToIndex[*daaBlockHash]
if !exists {
trustedDataDataDAAHeader, err := context.Domain().Consensus().TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i))
if err != nil {
return err
}
daaWindowBlocks = append(daaWindowBlocks, trustedDataDataDAAHeader)
index = len(daaWindowBlocks) - 1
daaWindowHashesToIndex[*daaBlockHash] = index
}
trustedDataDAABlockIndexes[*blockHash] = append(trustedDataDAABlockIndexes[*blockHash], uint64(index))
}
ghostdagDataBlockHashes, err := context.Domain().Consensus().TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash)
if err != nil {
return err
}
trustedDataGHOSTDAGDataIndexes[*blockHash] = make([]uint64, 0, context.Config().NetParams().K)
for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes {
index, exists := ghostdagDataHashToIndex[*ghostdagDataBlockHash]
if !exists {
data, err := context.Domain().Consensus().TrustedGHOSTDAGData(ghostdagDataBlockHash)
if err != nil {
return err
}
ghostdagData = append(ghostdagData, &externalapi.BlockGHOSTDAGDataHashPair{
Hash: ghostdagDataBlockHash,
GHOSTDAGData: data,
})
index = len(ghostdagData) - 1
ghostdagDataHashToIndex[*ghostdagDataBlockHash] = index
}
trustedDataGHOSTDAGDataIndexes[*blockHash] = append(trustedDataGHOSTDAGDataIndexes[*blockHash], uint64(index))
}
}
err = outgoingRoute.Enqueue(appmessage.DomainTrustedDataToTrustedData(daaWindowBlocks, ghostdagData))
if err != nil {
return err
}
for _, blockHash := range pointAndItsAnticone {
block, err := context.Domain().Consensus().GetBlock(blockHash)
if err != nil {
return err
}
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
if err != nil {
return err
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
if err != nil {
return err
}
log.Debugf("Sent pruning point and its anticone to %s", peer)
return nil
}()
if err != nil {
return err
}
}
}

View File

@ -0,0 +1,40 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// PruningPointProofRequestsContext is the interface for the context needed for the HandlePruningPointProofRequests flow.
type PruningPointProofRequestsContext interface {
Domain() domain.Domain
}
// HandlePruningPointProofRequests listens to appmessage.MsgRequestPruningPointProof messages and sends
// the pruning point proof to the requesting peer.
func HandlePruningPointProofRequests(context PruningPointProofRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
log.Debugf("Got request for pruning point proof from %s", peer)
pruningPointProof, err := context.Domain().Consensus().BuildPruningPointProof()
if err != nil {
return err
}
pruningPointProofMessage := appmessage.DomainPruningPointProofToMsgPruningPointProof(pruningPointProof)
err = outgoingRoute.Enqueue(pruningPointProofMessage)
if err != nil {
return err
}
log.Debugf("Sent pruning point proof to %s", peer)
}
}

View File

@ -0,0 +1,53 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// RelayBlockRequestsContext is the interface for the context needed for the HandleRelayBlockRequests flow.
type RelayBlockRequestsContext interface {
Domain() domain.Domain
}
// HandleRelayBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
// their corresponding blocks to the requesting peer.
func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
getRelayBlocksMessage := message.(*appmessage.MsgRequestRelayBlocks)
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
for _, hash := range getRelayBlocksMessage.Hashes {
// Fetch the block from the database.
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
if err != nil {
return err
}
log.Debugf("Relayed block with hash %s", hash)
}
}
}

View File

@ -0,0 +1,416 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// orphanResolutionRange is the maximum amount of blockLocator hashes
// to search for known blocks. See isBlockInOrphanResolutionRange for
// further details
var orphanResolutionRange uint32 = 5
// RelayInvsContext is the interface for the context needed for the HandleRelayInvs flow.
type RelayInvsContext interface {
Domain() domain.Domain
Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
OnPruningPointUTXOSetOverride() error
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
Broadcast(message appmessage.Message) error
AddOrphan(orphanBlock *externalapi.DomainBlock)
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
IsOrphan(blockHash *externalapi.DomainHash) bool
IsIBDRunning() bool
IsRecoverableError(err error) bool
}
type handleRelayInvsFlow struct {
RelayInvsContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
invsQueue []*appmessage.MsgInvRelayBlock
}
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
// are missing, adds them to the DAG and propagates them to the rest of the network.
func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
flow := &handleRelayInvsFlow{
RelayInvsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
}
err := flow.start()
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
close(peer.IBDRequestChannel())
return err
}
func (flow *handleRelayInvsFlow) start() error {
for {
log.Debugf("Waiting for inv")
inv, err := flow.readInv()
if err != nil {
return err
}
log.Debugf("Got relay inv for block %s", inv.Hash)
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(inv.Hash)
if err != nil {
return err
}
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
if blockInfo.BlockStatus == externalapi.StatusInvalid {
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
inv.Hash)
}
log.Debugf("Block %s already exists. continuing...", inv.Hash)
continue
}
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if flow.IsOrphan(inv.Hash) {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent {
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", inv.Hash)
continue
}
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
err := flow.AddOrphanRootsToQueue(inv.Hash)
if err != nil {
return err
}
continue
}
// Block relay is disabled during IBD
if flow.IsIBDRunning() {
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
continue
}
log.Debugf("Requesting block %s", inv.Hash)
block, exists, err := flow.requestBlock(inv.Hash)
if err != nil {
return err
}
if exists {
log.Debugf("Aborting requesting block %s because it already exists", inv.Hash)
continue
}
err = flow.banIfBlockIsHeaderOnly(block)
if err != nil {
return err
}
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) {
log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block))
continue
}
log.Debugf("Processing block %s", inv.Hash)
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
if err != nil {
return err
}
missingParents, virtualChangeSet, err := flow.processBlock(block)
if err != nil {
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
log.Infof("Ignoring pruned block %s", inv.Hash)
continue
}
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Infof("Ignoring duplicate block %s", inv.Hash)
continue
}
return err
}
if len(missingParents) > 0 {
log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents)
err := flow.processOrphan(block)
if err != nil {
return err
}
continue
}
oldVirtualParents := hashset.New()
for _, parent := range oldVirtualInfo.ParentHashes {
oldVirtualParents.Add(parent)
}
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
if err != nil {
return err
}
for _, parent := range newVirtualInfo.ParentHashes {
if oldVirtualParents.Contains(parent) {
continue
}
block, err := flow.Domain().Consensus().GetBlock(parent)
if err != nil {
return err
}
blockHash := consensushashing.BlockHash(block)
log.Debugf("Relaying block %s", blockHash)
err = flow.relayBlock(block)
if err != nil {
return err
}
}
log.Infof("Accepted block %s via relay", inv.Hash)
err = flow.OnNewBlock(block, virtualChangeSet)
if err != nil {
return err
}
}
}
func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
if len(block.Transactions) == 0 {
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
consensushashing.BlockHash(block))
}
return nil
}
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
if len(flow.invsQueue) > 0 {
var inv *appmessage.MsgInvRelayBlock
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
return inv, nil
}
msg, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
if !ok {
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
"expecting an inv message", msg.Command())
}
return inv, nil
}
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash)
if exists {
return nil, true, nil
}
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
// clean from any pending blocks.
defer flow.SharedRequestedBlocks().Remove(requestHash)
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
if err != nil {
return nil, false, err
}
msgBlock, err := flow.readMsgBlock()
if err != nil {
return nil, false, err
}
block := appmessage.MsgBlockToDomainBlock(msgBlock)
blockHash := consensushashing.BlockHash(block)
if !blockHash.Equal(requestHash) {
return nil, false, protocolerrors.Errorf(true, "got unrequested block %s", blockHash)
}
return block, false, nil
}
// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive.
//
// Note: this function assumes msgChan can contain only appmessage.MsgInvRelayBlock and appmessage.MsgBlock messages.
func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock, err error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
switch message := message.(type) {
case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlock:
return message, nil
default:
return nil, errors.Errorf("unexpected message %s", message.Command())
}
}
}
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
blockHash := consensushashing.BlockHash(block)
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
}
missingParentsError := &ruleerrors.ErrMissingParents{}
if errors.As(err, missingParentsError) {
return missingParentsError.MissingParentHashes, nil, nil
}
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
}
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
}
return nil, virtualChangeSet, nil
}
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
blockHash := consensushashing.BlockHash(block)
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
}
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) error {
blockHash := consensushashing.BlockHash(block)
// Return if the block has been orphaned from elsewhere already
if flow.IsOrphan(blockHash) {
log.Debugf("Skipping orphan processing for block %s because it is already an orphan", blockHash)
return nil
}
// Add the block to the orphan set if it's within orphan resolution range
isBlockInOrphanResolutionRange, err := flow.isBlockInOrphanResolutionRange(blockHash)
if err != nil {
return err
}
if isBlockInOrphanResolutionRange {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if isGenesisVirtualSelectedParent {
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", blockHash)
return nil
}
}
log.Debugf("Block %s is within orphan resolution range. "+
"Adding it to the orphan set", blockHash)
flow.AddOrphan(block)
log.Debugf("Requesting block %s missing ancestors", blockHash)
return flow.AddOrphanRootsToQueue(blockHash)
}
// Start IBD unless we already are in IBD
log.Debugf("Block %s is out of orphan resolution range. "+
"Attempting to start IBD against it.", blockHash)
// Send the block to IBD flow via the IBDRequestChannel.
// Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it
select {
case flow.peer.IBDRequestChannel() <- block:
default:
}
return nil
}
func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
}
func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool {
parents := block.Header.DirectParents()
return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash)
}
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
// retrieved via the unorphaning mechanism or via IBD. This method sends a
// getBlockLocator request to the peer with a limit of orphanResolutionRange.
// In the response, if we know none of the hashes, we should retrieve the given
// blockHash via IBD. Otherwise, via unorphaning.
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
err := flow.sendGetBlockLocator(blockHash, orphanResolutionRange)
if err != nil {
return false, err
}
blockLocatorHashes, err := flow.receiveBlockLocator()
if err != nil {
return false, err
}
for _, blockLocatorHash := range blockLocatorHashes {
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockLocatorHash)
if err != nil {
return false, err
}
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
return true, nil
}
}
return false, nil
}
func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.DomainHash) error {
orphanRoots, orphanExists, err := flow.GetOrphanRoots(orphan)
if err != nil {
return err
}
if !orphanExists {
log.Infof("Orphan block %s was missing from the orphan pool while requesting for its roots. This "+
"probably happened because it was randomly evicted immediately after it was added.", orphan)
}
if len(orphanRoots) == 0 {
// In some rare cases we get here when there are no orphan roots already
return nil
}
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
for i, root := range orphanRoots {
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
invMessages[i] = appmessage.NewMsgInvBlock(root)
}
flow.invsQueue = append(invMessages, flow.invsQueue...)
return nil
}

View File

@ -0,0 +1,75 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// RequestBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
type RequestBlockLocatorContext interface {
Domain() domain.Domain
}
type handleRequestBlockLocatorFlow struct {
RequestBlockLocatorContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestBlockLocator handles getBlockLocator messages
func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute *router.Route,
outgoingRoute *router.Route) error {
flow := &handleRequestBlockLocatorFlow{
RequestBlockLocatorContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestBlockLocatorFlow) start() error {
for {
highHash, limit, err := flow.receiveGetBlockLocator()
if err != nil {
return err
}
log.Debugf("Received getBlockLocator with highHash: %s, limit: %d", highHash, limit)
locator, err := flow.Domain().Consensus().CreateBlockLocatorFromPruningPoint(highHash, limit)
if err != nil || len(locator) == 0 {
if err != nil {
log.Debugf("Received error from CreateBlockLocatorFromPruningPoint: %s", err)
}
return protocolerrors.Errorf(true, "couldn't build a block "+
"locator between the pruning point and %s", highHash)
}
err = flow.sendBlockLocator(locator)
if err != nil {
return err
}
}
}
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (highHash *externalapi.DomainHash, limit uint32, err error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, 0, err
}
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
return msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
}
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {
msgBlockLocator := appmessage.NewMsgBlockLocator(locator)
err := flow.outgoingRoute.Enqueue(msgBlockLocator)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,107 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
const ibdBatchSize = 100
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
type RequestHeadersContext interface {
Domain() domain.Domain
}
type handleRequestHeadersFlow struct {
RequestHeadersContext
incomingRoute, outgoingRoute *router.Route
peer *peer.Peer
}
// HandleRequestHeaders handles RequestHeaders messages
func HandleRequestHeaders(context RequestHeadersContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peer.Peer) error {
flow := &handleRequestHeadersFlow{
RequestHeadersContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleRequestHeadersFlow) start() error {
for {
lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute)
if err != nil {
return err
}
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
for !lowHash.Equal(highHash) {
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
// GetHashesBetween is a relatively heavy operation so we limit it
// in order to avoid locking the consensus for too long
// maxBlocks MUST be >= MergeSetSizeLimit + 1
const maxBlocks = 1 << 10
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
if err != nil {
return err
}
log.Debugf("Got %d header hashes above lowHash %s", len(blockHashes), lowHash)
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
for i, blockHash := range blockHashes {
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
if err != nil {
return err
}
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
}
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
if err != nil {
return err
}
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
if _, ok := message.(*appmessage.MsgRequestNextHeaders); !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextHeaders, message.Command())
}
// The next lowHash is the last element in blockHashes
lowHash = blockHashes[len(blockHashes)-1]
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
return err
}
}
}
func receiveRequestHeaders(incomingRoute *router.Route) (lowHash *externalapi.DomainHash,
highHash *externalapi.DomainHash, err error) {
message, err := incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgRequestIBDBlocks := message.(*appmessage.MsgRequestHeaders)
return msgRequestIBDBlocks.LowHash, msgRequestIBDBlocks.HighHash, nil
}

View File

@ -0,0 +1,140 @@
package blockrelay
import (
"errors"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleRequestPruningPointUTXOSetContext is the interface for the context needed for the HandleRequestPruningPointUTXOSet flow.
type HandleRequestPruningPointUTXOSetContext interface {
Domain() domain.Domain
}
type handleRequestPruningPointUTXOSetFlow struct {
HandleRequestPruningPointUTXOSetContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestPruningPointUTXOSet listens to appmessage.MsgRequestPruningPointUTXOSet messages and sends
// the pruning point UTXO set and block body.
func HandleRequestPruningPointUTXOSet(context HandleRequestPruningPointUTXOSetContext, incomingRoute,
outgoingRoute *router.Route) error {
flow := &handleRequestPruningPointUTXOSetFlow{
HandleRequestPruningPointUTXOSetContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestPruningPointUTXOSetFlow) start() error {
for {
msgRequestPruningPointUTXOSet, err := flow.waitForRequestPruningPointUTXOSetMessages()
if err != nil {
return err
}
err = flow.handleRequestPruningPointUTXOSetMessage(msgRequestPruningPointUTXOSet)
if err != nil {
return err
}
}
}
func (flow *handleRequestPruningPointUTXOSetFlow) handleRequestPruningPointUTXOSetMessage(
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetFlow")
defer onEnd()
log.Debugf("Got request for pruning point UTXO set")
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSet)
}
func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXOSetMessages() (
*appmessage.MsgRequestPruningPointUTXOSet, error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
if !ok {
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
}
return msgRequestPruningPointUTXOSet, nil
}
func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
// Send the UTXO set in `step`-sized chunks
const step = 1000
var fromOutpoint *externalapi.DomainOutpoint
chunksSent := 0
for {
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
msgRequestPruningPointUTXOSet.PruningPointHash, fromOutpoint, step)
if err != nil {
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
}
}
log.Debugf("Retrieved %d UTXOs for pruning block %s",
len(pruningPointUTXOs), msgRequestPruningPointUTXOSet.PruningPointHash)
outpointAndUTXOEntryPairs :=
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
if err != nil {
return err
}
finished := len(pruningPointUTXOs) < step
if finished && chunksSent%ibdBatchSize != 0 {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
if len(pruningPointUTXOs) > 0 {
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
}
chunksSent++
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
if chunksSent%ibdBatchSize == 0 {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
if !ok {
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
}
if finished {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
}
}
}

View File

@ -0,0 +1,645 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
"math/big"
"time"
)
// IBDContext is the interface for the context needed for the HandleIBD flow.
type IBDContext interface {
Domain() domain.Domain
Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
OnPruningPointUTXOSetOverride() error
IsIBDRunning() bool
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
UnsetIBDRunning()
IsRecoverableError(err error) bool
}
type handleIBDFlow struct {
IBDContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// HandleIBD handles IBD
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
flow := &handleIBDFlow{
IBDContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleIBDFlow) start() error {
for {
// Wait for IBD requests triggered by other flows
block, ok := <-flow.peer.IBDRequestChannel()
if !ok {
return nil
}
err := flow.runIBDIfNotRunning(block)
if err != nil {
return err
}
}
}
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
highHash := consensushashing.BlockHash(block)
// Temp code to avoid IBD from lagging nodes publishing their side-chain. This patch
// is applied only to p2p v4 since the implemented IBD negotiation has quadratic complexity in this worst-case.
// See IBD logic of p2p v5 for further details.
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err == nil {
virtualSelectedParentHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualSelectedParent)
if err == nil {
// We first check that DAA score of the relay block is at distance of more than DAA window size.
// This indicates a side-chain which is not in the future of any block in the current virtual DAA window.
if virtualSelectedParentHeader.DAAScore() > block.Header.DAAScore()+2641 {
// We then find the 'unit' of current virtual difficulty. We check if the relay block is at least
// at distance of 180 such units. This signals another condition for a pow-weak side-chain.
virtualDifficulty := difficulty.CalcWork(virtualSelectedParentHeader.Bits())
var virtualSub, difficultyMul big.Int
if difficultyMul.Mul(virtualDifficulty, big.NewInt(180)).
Cmp(virtualSub.Sub(virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())) < 0 {
log.Criticalf("Avoiding IBD triggered by relay %s with %d DAA score diff and lower blue work (%d, %d)",
highHash,
virtualSelectedParentHeader.DAAScore()-block.Header.DAAScore(),
virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())
return nil
}
}
}
}
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
if !wasIBDNotRunning {
log.Debugf("IBD is already running")
return nil
}
isFinishedSuccessfully := false
defer func() {
flow.UnsetIBDRunning()
flow.logIBDFinished(isFinishedSuccessfully)
}()
log.Infof("IBD started with peer %s and highHash %s", flow.peer, highHash)
log.Infof("Syncing blocks up to %s", highHash)
log.Infof("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
if err != nil {
return err
}
log.Infof("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
if err != nil {
return err
}
if !shouldSync {
return nil
}
if shouldDownloadHeadersProof {
log.Infof("Starting IBD with headers proof")
err := flow.ibdWithHeadersProof(highHash, block.Header.DAAScore())
if err != nil {
return err
}
} else {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if isGenesisVirtualSelectedParent {
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", highHash)
return nil
}
}
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash, block.Header.DAAScore())
if err != nil {
return err
}
}
err = flow.syncMissingBlockBodies(highHash)
if err != nil {
return err
}
log.Debugf("Finished syncing blocks up to %s", highHash)
isFinishedSuccessfully = true
return nil
}
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
}
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
successString := "successfully"
if !isFinishedSuccessfully {
successString = "(interrupted)"
}
log.Infof("IBD finished %s", successString)
}
// findHighestSharedBlock attempts to find the highest shared block between the peer
// and this node. This method may fail because the peer and us have conflicting pruning
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
func (flow *handleIBDFlow) findHighestSharedBlockHash(
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
if err != nil {
return nil, false, err
}
for {
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
if err != nil {
return nil, false, err
}
if !highestHashFound {
return nil, false, nil
}
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
if err != nil {
return nil, false, err
}
if highestHashIndex == 0 ||
// If the block locator contains only two adjacent chain blocks, the
// syncer will always find the same highest chain block, so to avoid
// an endless loop, we explicitly stop the loop in such situation.
(len(blockLocator) == 2 && highestHashIndex == 1) {
return highestHash, true, nil
}
locatorHashAboveHighestHash := highestHash
if highestHashIndex > 0 {
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
}
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
if err != nil {
return nil, false, err
}
}
}
func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
if err != nil {
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
return nil, err
}
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
"restarting with full block locator")
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
if err != nil {
return nil, err
}
}
return blockLocator, nil
}
func (flow *handleIBDFlow) findHighestHashIndex(
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
highestHashIndex := 0
highestHashIndexFound := false
for i, blockLocatorHash := range blockLocator {
if highestHash.Equal(blockLocatorHash) {
highestHashIndex = i
highestHashIndexFound = true
break
}
}
if !highestHashIndexFound {
return 0, protocolerrors.Errorf(true, "highest hash %s "+
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
}
log.Debugf("The index of the highest hash in the original "+
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
return highestHashIndex, nil
}
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
// blockLocator. This method may fail because the peer and us have conflicting pruning
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
func (flow *handleIBDFlow) fetchHighestHash(
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
if err != nil {
return nil, false, err
}
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch message := message.(type) {
case *appmessage.MsgIBDBlockLocatorHighestHash:
highestHash := message.HighestHash
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
return highestHash, true, nil
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
log.Debugf("Peer %s does not know any block within our blockLocator. "+
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
return nil, false, nil
default:
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
}
}
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
log.Infof("Downloading headers from %s", flow.peer)
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
if err != nil {
return err
}
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestSharedBlockHash)
if err != nil {
return err
}
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScore, "block headers")
// Keep a short queue of BlockHeadersMessages so that there's
// never a moment when the node is not validating and inserting
// headers
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
errChan := make(chan error)
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
for {
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
if err != nil {
errChan <- err
return
}
if doneIBD {
close(blockHeadersMessageChan)
return
}
blockHeadersMessageChan <- blockHeadersMessage
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
if err != nil {
errChan <- err
return
}
}
})
for {
select {
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
if !ok {
// If the highHash has not been received, the peer is misbehaving
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
if err != nil {
return err
}
if !highHashBlockInfo.Exists {
return protocolerrors.Errorf(true, "did not receive "+
"highHash block %s from peer %s during block download", highHash, flow.peer)
}
return nil
}
for _, header := range ibdBlocksMessage.BlockHeaders {
err = flow.processHeader(consensus, header)
if err != nil {
return err
}
}
lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1]
progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore)
case err := <-errChan:
return err
}
}
}
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
peerSelectedTipHash *externalapi.DomainHash) error {
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
}
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch message := message.(type) {
case *appmessage.BlockHeadersMessage:
return message, false, nil
case *appmessage.MsgDoneHeaders:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
appmessage.CmdBlockHeaders,
appmessage.CmdDoneHeaders,
message.Command())
}
}
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
block := &externalapi.DomainBlock{
Header: header,
Transactions: nil,
}
blockHash := consensushashing.BlockHash(block)
blockInfo, err := consensus.GetBlockInfo(blockHash)
if err != nil {
return err
}
if blockInfo.Exists {
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
return nil
}
_, err = consensus.ValidateAndInsertBlock(block, false)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
}
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
} else {
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
}
}
return nil
}
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
if err != nil {
return err
}
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
if err != nil {
return err
}
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
if err != nil {
return err
}
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
if err != nil {
return err
}
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
"tip is smaller than the current selected tip")
}
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
return protocolerrors.Errorf(false, "difference between the timestamps of "+
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
}
return nil
}
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
defer onEnd()
receivedChunkCount := 0
receivedUTXOCount := 0
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return false, err
}
switch message := message.(type) {
case *appmessage.MsgPruningPointUTXOSetChunk:
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
domainOutpointAndUTXOEntryPairs :=
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
if err != nil {
return false, err
}
receivedChunkCount++
if receivedChunkCount%ibdBatchSize == 0 {
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
receivedChunkCount, receivedUTXOCount)
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
if err != nil {
return false, err
}
}
case *appmessage.MsgDonePruningPointUTXOSetChunks:
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
return true, nil
case *appmessage.MsgUnexpectedPruningPoint:
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
return false, nil
default:
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
)
}
}
}
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
if err != nil {
return err
}
if len(hashes) == 0 {
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
// In these cases - GetMissingBlockBodyHashes would return an empty array.
log.Debugf("No missing block body hashes found.")
return nil
}
lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0])
if err != nil {
return err
}
highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1])
if err != nil {
return err
}
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
highestProcessedDAAScore := lowBlockHeader.DAAScore()
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
var hashesToRequest []*externalapi.DomainHash
if offset+ibdBatchSize < len(hashes) {
hashesToRequest = hashes[offset : offset+ibdBatchSize]
} else {
hashesToRequest = hashes[offset:]
}
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
if err != nil {
return err
}
for _, expectedHash := range hashesToRequest {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
}
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
blockHash := consensushashing.BlockHash(block)
if !expectedHash.Equal(blockHash) {
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
}
err = flow.banIfBlockIsHeaderOnly(block)
if err != nil {
return err
}
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
if err != nil {
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
continue
}
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
}
err = flow.OnNewBlock(block, virtualChangeSet)
if err != nil {
return err
}
highestProcessedDAAScore = block.Header.DAAScore()
}
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
}
return flow.resolveVirtual(highestProcessedDAAScore)
}
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
if len(block.Transactions) == 0 {
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
consensushashing.BlockHash(block))
}
return nil
}
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
for i := 0; ; i++ {
if i%10 == 0 {
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
var percents int
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
percents = 100
} else {
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
}
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
}
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
if err != nil {
return err
}
err = flow.OnVirtualChange(virtualChangeSet)
if err != nil {
return err
}
if isCompletelyResolved {
log.Infof("Resolved virtual")
return nil
}
}
}

View File

@ -0,0 +1,45 @@
package blockrelay
type ibdProgressReporter struct {
lowDAAScore uint64
highDAAScore uint64
objectName string
totalDAAScoreDifference uint64
lastReportedProgressPercent int
processed int
}
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
if highDAAScore <= lowDAAScore {
// Avoid a zero or negative diff
highDAAScore = lowDAAScore + 1
}
return &ibdProgressReporter{
lowDAAScore: lowDAAScore,
highDAAScore: highDAAScore,
objectName: objectName,
totalDAAScoreDifference: highDAAScore - lowDAAScore,
lastReportedProgressPercent: 0,
processed: 0,
}
}
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
ipr.processed += processedDelta
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
if highestProcessedDAAScore > ipr.highDAAScore {
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
}
relativeDAAScore := uint64(0)
if highestProcessedDAAScore > ipr.lowDAAScore {
// Avoid a negative diff
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
}
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
if progressPercent > ipr.lastReportedProgressPercent {
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
ipr.lastReportedProgressPercent = progressPercent
}
}

View File

@ -0,0 +1,394 @@
package blockrelay
import (
"fmt"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/pkg/errors"
"time"
)
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
err := flow.Domain().InitStagingConsensus()
if err != nil {
return err
}
err = flow.downloadHeadersAndPruningUTXOSet(highHash, highBlockDAAScore)
if err != nil {
if !flow.IsRecoverableError(err) {
return err
}
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus.", flow.peer)
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
if deleteStagingConsensusErr != nil {
return deleteStagingConsensusErr
}
return err
}
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
err = flow.Domain().CommitStagingConsensus()
if err != nil {
return err
}
err = flow.OnPruningPointUTXOSetOverride()
if err != nil {
return err
}
return nil
}
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *externalapi.DomainBlock,
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
if !highestSharedBlockFound {
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock)
if err != nil {
return false, false, err
}
if hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore {
return true, true, nil
}
return false, false, nil
}
return false, true, nil
}
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock *externalapi.DomainBlock) (bool, error) {
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
if err != nil {
return false, err
}
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
if err != nil {
return false, err
}
if highBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
return false, nil
}
return highBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
}
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
log.Infof("Downloading the pruning point proof from %s", flow.peer)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointProof())
if err != nil {
return nil, err
}
message, err := flow.incomingRoute.DequeueWithTimeout(10 * time.Minute)
if err != nil {
return nil, err
}
pruningPointProofMessage, ok := message.(*appmessage.MsgPruningPointProof)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdPruningPointProof, message.Command())
}
pruningPointProof := appmessage.MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage)
err = flow.Domain().Consensus().ValidatePruningPointProof(pruningPointProof)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
return nil, protocolerrors.Wrapf(true, err, "pruning point proof validation failed")
}
return nil, err
}
err = flow.Domain().StagingConsensus().ApplyPruningPointProof(pruningPointProof)
if err != nil {
return nil, err
}
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
}
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
if err != nil {
return err
}
err = flow.syncPruningPointsAndPruningPointAnticone(proofPruningPoint)
if err != nil {
return err
}
// TODO: Remove this condition once there's more proper way to check finality violation
// in the headers proof.
if proofPruningPoint.Equal(flow.Config().NetParams().GenesisHash) {
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
}
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash, highBlockDAAScore)
if err != nil {
return err
}
log.Infof("Headers downloaded from peer %s", flow.peer)
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
if err != nil {
return err
}
if !highHashInfo.Exists {
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
}
err = flow.validatePruningPointFutureHeaderTimestamps()
if err != nil {
return err
}
log.Debugf("Syncing the current pruning point UTXO set")
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet(flow.Domain().StagingConsensus(), proofPruningPoint)
if err != nil {
return err
}
if !syncedPruningPointUTXOSetSuccessfully {
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
return nil
}
log.Debugf("Finished syncing the current pruning point UTXO set")
return nil
}
func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error {
log.Infof("Downloading the past pruning points and the pruning point anticone from %s", flow.peer)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone())
if err != nil {
return err
}
err = flow.validateAndInsertPruningPoints(proofPruningPoint)
if err != nil {
return err
}
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
msgTrustedData, ok := message.(*appmessage.MsgTrustedData)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdTrustedData, message.Command())
}
pruningPointWithMetaData, done, err := flow.receiveBlockWithTrustedData()
if err != nil {
return err
}
if done {
return protocolerrors.Errorf(true, "got `done` message before receiving the pruning point")
}
if !pruningPointWithMetaData.Block.Header.BlockHash().Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "first block with trusted data is not the pruning point")
}
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData, msgTrustedData)
if err != nil {
return err
}
for {
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
if err != nil {
return err
}
if done {
break
}
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData, msgTrustedData)
if err != nil {
return err
}
}
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
return nil
}
func (flow *handleIBDFlow) processBlockWithTrustedData(
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedDataV4, data *appmessage.MsgTrustedData) error {
blockWithTrustedData := &externalapi.BlockWithTrustedData{
Block: appmessage.MsgBlockToDomainBlock(block.Block),
DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(block.DAAWindowIndices)),
GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(block.GHOSTDAGDataIndices)),
}
for _, index := range block.DAAWindowIndices {
blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, appmessage.TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(data.DAAWindow[index]))
}
for _, index := range block.GHOSTDAGDataIndices {
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
}
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
return err
}
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch downCastedMessage := message.(type) {
case *appmessage.MsgBlockWithTrustedDataV4:
return downCastedMessage, false, nil
case *appmessage.MsgDoneBlocksWithTrustedData:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
(&appmessage.MsgBlockWithTrustedData{}).Command(),
(&appmessage.MsgDoneBlocksWithTrustedData{}).Command(),
downCastedMessage.Command())
}
}
func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgPruningPoints, ok := message.(*appmessage.MsgPruningPoints)
if !ok {
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdPruningPoints, message.Command())
}
return msgPruningPoints, nil
}
func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error {
currentPruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return err
}
if currentPruningPoint.Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "the proposed pruning point is the same as the current pruning point")
}
pruningPoints, err := flow.receivePruningPoints()
if err != nil {
return err
}
headers := make([]externalapi.BlockHeader, len(pruningPoints.Headers))
for i, header := range pruningPoints.Headers {
headers[i] = appmessage.BlockHeaderToDomainBlockHeader(header)
}
arePruningPointsViolatingFinality, err := flow.Domain().Consensus().ArePruningPointsViolatingFinality(headers)
if err != nil {
return err
}
if arePruningPointsViolatingFinality {
// TODO: Find a better way to deal with finality conflicts.
return protocolerrors.Errorf(false, "pruning points are violating finality")
}
lastPruningPoint := consensushashing.HeaderHash(headers[len(headers)-1])
if !lastPruningPoint.Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "the proof pruning point is not equal to the last pruning "+
"point in the list")
}
err = flow.Domain().StagingConsensus().ImportPruningPoints(headers)
if err != nil {
return err
}
return nil
}
func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus,
pruningPoint *externalapi.DomainHash) (bool, error) {
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint)
isValid, err := flow.Domain().StagingConsensus().IsValidPruningPoint(pruningPoint)
if err != nil {
return false, err
}
if !isValid {
return false, protocolerrors.Errorf(true, "invalid pruning point %s", pruningPoint)
}
log.Info("Fetching the pruning point UTXO set")
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
if err != nil {
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
return false, err
}
if !isSuccessful {
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
return false, nil
}
log.Info("Fetched the new pruning point UTXO set")
return true, nil
}
func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
defer func() {
err := flow.Domain().StagingConsensus().ClearImportedPruningPointData()
if err != nil {
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
}
}()
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSet(pruningPointHash))
if err != nil {
return false, err
}
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(consensus, pruningPointHash)
if err != nil {
return false, err
}
if !receivedAll {
return false, nil
}
err = flow.Domain().StagingConsensus().ValidateAndInsertImportedPruningPoint(pruningPointHash)
if err != nil {
// TODO: Find a better way to deal with finality conflicts.
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
return false, nil
}
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
}
return true, nil
}

View File

@ -0,0 +1,9 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("PROT")
var spawn = panics.GoroutineWrapperFunc(log)

View File

@ -0,0 +1,35 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// SendVirtualSelectedParentInvContext is the interface for the context needed for the SendVirtualSelectedParentInv flow.
type SendVirtualSelectedParentInvContext interface {
Domain() domain.Domain
Config() *config.Config
}
// SendVirtualSelectedParentInv sends a peer the selected parent hash of the virtual
func SendVirtualSelectedParentInv(context SendVirtualSelectedParentInvContext,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
virtualSelectedParent, err := context.Domain().Consensus().GetVirtualSelectedParent()
if err != nil {
return err
}
if virtualSelectedParent.Equal(context.Config().NetParams().GenesisHash) {
log.Debugf("Skipping sending the virtual selected parent hash to peer %s because it's the genesis", peer)
return nil
}
log.Debugf("Sending virtual selected parent hash %s to peer %s", virtualSelectedParent, peer)
virtualSelectedParentInv := appmessage.NewMsgInvBlock(virtualSelectedParent)
return outgoingRoute.Enqueue(virtualSelectedParentInv)
}

View File

@ -0,0 +1,42 @@
package ping
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// ReceivePingsContext is the interface for the context needed for the ReceivePings flow.
type ReceivePingsContext interface {
}
type receivePingsFlow struct {
ReceivePingsContext
incomingRoute, outgoingRoute *router.Route
}
// ReceivePings handles all ping messages coming through incomingRoute.
// This function assumes that incomingRoute will only return MsgPing.
func ReceivePings(context ReceivePingsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &receivePingsFlow{
ReceivePingsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *receivePingsFlow) start() error {
for {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
pingMessage := message.(*appmessage.MsgPing)
pongMessage := appmessage.NewMsgPong(pingMessage.Nonce)
err = flow.outgoingRoute.Enqueue(pongMessage)
if err != nil {
return err
}
}
}

View File

@ -0,0 +1,77 @@
package ping
import (
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/pkg/errors"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/random"
)
// SendPingsContext is the interface for the context needed for the SendPings flow.
type SendPingsContext interface {
ShutdownChan() <-chan struct{}
}
type sendPingsFlow struct {
SendPingsContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// SendPings starts sending MsgPings every pingInterval seconds to the
// given peer.
// This function assumes that incomingRoute will only return MsgPong.
func SendPings(context SendPingsContext, incomingRoute *router.Route, outgoingRoute *router.Route, peer *peerpkg.Peer) error {
flow := &sendPingsFlow{
SendPingsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *sendPingsFlow) start() error {
const pingInterval = 2 * time.Minute
ticker := time.NewTicker(pingInterval)
defer ticker.Stop()
for {
select {
case <-flow.ShutdownChan():
return nil
case <-ticker.C:
}
nonce, err := random.Uint64()
if err != nil {
return err
}
flow.peer.SetPingPending(nonce)
pingMessage := appmessage.NewMsgPing(nonce)
err = flow.outgoingRoute.Enqueue(pingMessage)
if err != nil {
return err
}
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
if errors.Is(err, router.ErrTimeout) {
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
}
return err
}
pongMessage := message.(*appmessage.MsgPong)
if pongMessage.Nonce != pingMessage.Nonce {
return protocolerrors.New(true, "nonce mismatch between ping and pong")
}
flow.peer.SetPingIdle()
}
}

View File

@ -0,0 +1,194 @@
package v4
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/ping"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/rejects"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type protocolManager interface {
RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
messageTypes []appmessage.MessageCommand, isStopping *uint32,
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
Context() *flowcontext.FlowContext
}
// Register is used in order to register all the protocol flows to the given router.
func Register(m protocolManager, router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*common.Flow) {
flows = registerAddressFlows(m, router, isStopping, errChan)
flows = append(flows, registerBlockRelayFlows(m, router, isStopping, errChan)...)
flows = append(flows, registerPingFlows(m, router, isStopping, errChan)...)
flows = append(flows, registerTransactionRelayFlow(m, router, isStopping, errChan)...)
flows = append(flows, registerRejectsFlow(m, router, isStopping, errChan)...)
return flows
}
func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return addressexchange.ReceiveAddresses(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.SendVirtualSelectedParentInv(m.Context(), outgoingRoute, peer)
}),
m.RegisterFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator,
},
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRelayInvs(m.Context(), incomingRoute,
outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBD", router, []appmessage.MessageCommand{
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
appmessage.CmdBlockHeaders, appmessage.CmdIBDBlockLocatorHighestHash, appmessage.CmdBlockWithTrustedDataV4,
appmessage.CmdDoneBlocksWithTrustedData, appmessage.CmdIBDBlockLocatorHighestHashNotFound,
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
appmessage.CmdPruningPointProof,
appmessage.CmdTrustedData,
},
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBD(m.Context(), incomingRoute,
outgoingRoute, peer)
},
),
m.RegisterFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRelayBlockRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleRequestBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestBlockLocator(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestHeaders", router,
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestHeaders(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBDBlockRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBDBlockRequests(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestPruningPointUTXOSet", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSet,
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestPruningPointUTXOSet(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBDBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBDBlockLocator(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandlePruningPointProofRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandlePruningPointProofRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerPingFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return ping.ReceivePings(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return ping.SendPings(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerTransactionRelayFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return transactionrelay.HandleRelayedTransactions(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestTransactions", router,
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return transactionrelay.HandleRequestedTransactions(m.Context(), incomingRoute, outgoingRoute)
},
),
}
}
func registerRejectsFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("HandleRejects", router,
[]appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return rejects.HandleRejects(m.Context(), incomingRoute, outgoingRoute)
},
),
}
}

View File

@ -0,0 +1,37 @@
package rejects
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleRejectsContext is the interface for the context needed for the HandleRejects flow.
type HandleRejectsContext interface {
}
type handleRejectsFlow struct {
HandleRejectsContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRejects handles all reject messages coming through incomingRoute.
// This function assumes that incomingRoute will only return MsgReject.
func HandleRejects(context HandleRejectsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRejectsFlow{
HandleRejectsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRejectsFlow) start() error {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
rejectMessage := message.(*appmessage.MsgReject)
return protocolerrors.Errorf(false, "got reject message: `%s`", rejectMessage.Reason)
}

View File

@ -0,0 +1,24 @@
package testing
import (
"strings"
"testing"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/pkg/errors"
)
func checkFlowError(t *testing.T, err error, isProtocolError bool, shouldBan bool, contains string) {
pErr := protocolerrors.ProtocolError{}
if errors.As(err, &pErr) != isProtocolError {
t.Fatalf("Unexepcted error %+v", err)
}
if pErr.ShouldBan != shouldBan {
t.Fatalf("Exepcted shouldBan %t but got %t", shouldBan, pErr.ShouldBan)
}
if !strings.Contains(err.Error(), contains) {
t.Fatalf("Unexpected error. Expected error to contain '%s' but got: %+v", contains, err)
}
}

View File

@ -0,0 +1,51 @@
package testing
import (
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
"testing"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type fakeReceiveAddressesContext struct{}
func (f fakeReceiveAddressesContext) AddressManager() *addressmanager.AddressManager {
return nil
}
func TestReceiveAddressesErrors(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
incomingRoute := router.NewRoute("incoming")
outgoingRoute := router.NewRoute("outgoing")
peer := peerpkg.New(nil)
errChan := make(chan error)
go func() {
errChan <- addressexchange.ReceiveAddresses(fakeReceiveAddressesContext{}, incomingRoute, outgoingRoute, peer)
}()
_, err := outgoingRoute.DequeueWithTimeout(time.Second)
if err != nil {
t.Fatalf("DequeueWithTimeout: %+v", err)
}
// Sending addressmanager.GetAddressesMax+1 addresses should trigger a ban
err = incomingRoute.Enqueue(appmessage.NewMsgAddresses(make([]*appmessage.NetAddress,
addressmanager.GetAddressesMax+1)))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
}
select {
case err := <-errChan:
checkFlowError(t, err, true, true, "address count exceeded")
case <-time.After(time.Second):
t.Fatalf("timed out after %s", time.Second)
}
})
}

View File

@ -0,0 +1,4 @@
package testing
// Because of a bug in Go coverage fails if you have packages with test files only. See https://github.com/golang/go/issues/27333
// So this is a dummy non-test go file in the package.

View File

@ -0,0 +1,209 @@
package transactionrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// TransactionsRelayContext is the interface for the context needed for the
// HandleRelayedTransactions and HandleRequestedTransactions flows.
type TransactionsRelayContext interface {
NetAdapter() *netadapter.NetAdapter
Domain() domain.Domain
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
OnTransactionAddedToMempool()
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
IsIBDRunning() bool
}
type handleRelayedTransactionsFlow struct {
TransactionsRelayContext
incomingRoute, outgoingRoute *router.Route
invsQueue []*appmessage.MsgInvTransaction
}
// HandleRelayedTransactions listens to appmessage.MsgInvTransaction messages, requests their corresponding transactions if they
// are missing, adds them to the mempool and propagates them to the rest of the network.
func HandleRelayedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRelayedTransactionsFlow{
TransactionsRelayContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
invsQueue: make([]*appmessage.MsgInvTransaction, 0),
}
return flow.start()
}
func (flow *handleRelayedTransactionsFlow) start() error {
for {
inv, err := flow.readInv()
if err != nil {
return err
}
if flow.IsIBDRunning() {
continue
}
requestedIDs, err := flow.requestInvTransactions(inv)
if err != nil {
return err
}
err = flow.receiveTransactions(requestedIDs)
if err != nil {
return err
}
}
}
func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
inv *appmessage.MsgInvTransaction) (requestedIDs []*externalapi.DomainTransactionID, err error) {
idsToRequest := make([]*externalapi.DomainTransactionID, 0, len(inv.TxIDs))
for _, txID := range inv.TxIDs {
if flow.isKnownTransaction(txID) {
continue
}
exists := flow.SharedRequestedTransactions().AddIfNotExists(txID)
if exists {
continue
}
idsToRequest = append(idsToRequest, txID)
}
if len(idsToRequest) == 0 {
return idsToRequest, nil
}
msgGetTransactions := appmessage.NewMsgRequestTransactions(idsToRequest)
err = flow.outgoingRoute.Enqueue(msgGetTransactions)
if err != nil {
flow.SharedRequestedTransactions().RemoveMany(idsToRequest)
return nil, err
}
return idsToRequest, nil
}
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
// Ask the transaction memory pool if the transaction is known
// to it in any form (main pool or orphan).
if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
return true
}
return false
}
func (flow *handleRelayedTransactionsFlow) readInv() (*appmessage.MsgInvTransaction, error) {
if len(flow.invsQueue) > 0 {
var inv *appmessage.MsgInvTransaction
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
return inv, nil
}
msg, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
inv, ok := msg.(*appmessage.MsgInvTransaction)
if !ok {
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay flow while "+
"expecting an inv message", msg.Command())
}
return inv, nil
}
func (flow *handleRelayedTransactionsFlow) broadcastAcceptedTransactions(acceptedTxIDs []*externalapi.DomainTransactionID) error {
return flow.EnqueueTransactionIDsForPropagation(acceptedTxIDs)
}
// readMsgTxOrNotFound returns the next msgTx or msgTransactionNotFound in incomingRoute,
// returning only one of the message types at a time.
//
// and populates invsQueue with any inv messages that meanwhile arrive.
func (flow *handleRelayedTransactionsFlow) readMsgTxOrNotFound() (
msgTx *appmessage.MsgTx, msgNotFound *appmessage.MsgTransactionNotFound, err error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, nil, err
}
switch message := message.(type) {
case *appmessage.MsgInvTransaction:
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgTx:
return message, nil, nil
case *appmessage.MsgTransactionNotFound:
return nil, message, nil
default:
return nil, nil, errors.Errorf("unexpected message %s", message.Command())
}
}
}
func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransactions []*externalapi.DomainTransactionID) error {
// In case the function returns earlier than expected, we want to make sure sharedRequestedTransactions is
// clean from any pending transactions.
defer flow.SharedRequestedTransactions().RemoveMany(requestedTransactions)
for _, expectedID := range requestedTransactions {
msgTx, msgTxNotFound, err := flow.readMsgTxOrNotFound()
if err != nil {
return err
}
if msgTxNotFound != nil {
if !msgTxNotFound.ID.Equal(expectedID) {
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
expectedID, msgTxNotFound.ID)
}
continue
}
tx := appmessage.MsgTxToDomainTransaction(msgTx)
txID := consensushashing.TransactionID(tx)
if !txID.Equal(expectedID) {
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
expectedID, txID)
}
acceptedTransactions, err :=
flow.Domain().MiningManager().ValidateAndInsertTransaction(tx, false, true)
if err != nil {
ruleErr := &mempool.RuleError{}
if !errors.As(err, ruleErr) {
return errors.Wrapf(err, "failed to process transaction %s", txID)
}
shouldBan := false
if txRuleErr := (&mempool.TxRuleError{}); errors.As(ruleErr.Err, txRuleErr) {
if txRuleErr.RejectCode == mempool.RejectInvalid {
shouldBan = true
}
}
if !shouldBan {
continue
}
return protocolerrors.Errorf(true, "rejected transaction %s: %s", txID, ruleErr)
}
err = flow.broadcastAcceptedTransactions(consensushashing.TransactionIDs(acceptedTransactions))
if err != nil {
return err
}
flow.OnTransactionAddedToMempool()
}
return nil
}

View File

@ -0,0 +1,196 @@
package transactionrelay_test
import (
"errors"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
"strings"
"testing"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type mocTransactionsRelayContext struct {
netAdapter *netadapter.NetAdapter
domain domain.Domain
sharedRequestedTransactions *flowcontext.SharedRequestedTransactions
}
func (m *mocTransactionsRelayContext) NetAdapter() *netadapter.NetAdapter {
return m.netAdapter
}
func (m *mocTransactionsRelayContext) Domain() domain.Domain {
return m.domain
}
func (m *mocTransactionsRelayContext) SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions {
return m.sharedRequestedTransactions
}
func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error {
return nil
}
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
}
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
return false
}
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
// have the requested transactions in the mempool.
func TestHandleRelayedTransactionsNotFound(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
var log = logger.RegisterSubSystem("PROT")
var spawn = panics.GoroutineWrapperFunc(log)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRelayedTransactionsNotFound")
if err != nil {
t.Fatalf("Error setting up test consensus: %+v", err)
}
defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil {
t.Fatalf("Failed to create a NetAdapter: %v", err)
}
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
if err != nil {
t.Fatalf("Failed to set up a domain instance: %v", err)
}
context := &mocTransactionsRelayContext{
netAdapter: adapter,
domain: domainInstance,
sharedRequestedTransactions: sharedRequestedTransactions,
}
incomingRoute := router.NewRoute("incoming")
defer incomingRoute.Close()
peerIncomingRoute := router.NewRoute("outgoing")
defer peerIncomingRoute.Close()
txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02})
txIDs := []*externalapi.DomainTransactionID{txID1, txID2}
invMessage := appmessage.NewMsgInvTransaction(txIDs)
err = incomingRoute.Enqueue(invMessage)
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
// The goroutine is representing the peer's actions.
spawn("peerResponseToTheTransactionsRequest", func() {
msg, err := peerIncomingRoute.Dequeue()
if err != nil {
t.Fatalf("Dequeue: %v", err)
}
inv := msg.(*appmessage.MsgRequestTransactions)
if len(txIDs) != len(inv.IDs) {
t.Fatalf("TestHandleRelayedTransactions: expected %d transactions ID, but got %d", len(txIDs), len(inv.IDs))
}
for i, id := range inv.IDs {
if txIDs[i].String() != id.String() {
t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String())
}
err = incomingRoute.Enqueue(appmessage.NewMsgTransactionNotFound(txIDs[i]))
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
}
// Insert an unexpected message type to stop the infinity loop.
err = incomingRoute.Enqueue(&appmessage.MsgAddresses{})
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
})
err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, peerIncomingRoute)
// Since we inserted an unexpected message type to stop the infinity loop,
// we expect the error will be infected from this specific message and also the
// error will count as a protocol message.
if protocolErr := (protocolerrors.ProtocolError{}); err == nil || !errors.As(err, &protocolErr) {
t.Fatalf("Expected to protocol error")
} else {
if !protocolErr.ShouldBan {
t.Fatalf("Exepcted shouldBan true, but got false.")
}
if !strings.Contains(err.Error(), "unexpected Addresses [code 3] message in the block relay flow while expecting an inv message") {
t.Fatalf("Unexpected error: expected: an error due to existence of an Addresses message "+
"in the block relay flow, but got: %v", protocolErr.Cause)
}
}
})
}
// TestOnClosedIncomingRoute verifies that an appropriate error message will be returned when
// trying to dequeue a message from a closed route.
func TestOnClosedIncomingRoute(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestOnClosedIncomingRoute")
if err != nil {
t.Fatalf("Error setting up test consensus: %+v", err)
}
defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil {
t.Fatalf("Failed to creat a NetAdapter : %v", err)
}
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
if err != nil {
t.Fatalf("Failed to set up a domain instance: %v", err)
}
context := &mocTransactionsRelayContext{
netAdapter: adapter,
domain: domainInstance,
sharedRequestedTransactions: sharedRequestedTransactions,
}
incomingRoute := router.NewRoute("incoming")
outgoingRoute := router.NewRoute("outgoing")
defer outgoingRoute.Close()
txID := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
txIDs := []*externalapi.DomainTransactionID{txID}
err = incomingRoute.Enqueue(&appmessage.MsgInvTransaction{TxIDs: txIDs})
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
incomingRoute.Close()
err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, outgoingRoute)
if err == nil || !errors.Is(err, router.ErrRouteClosed) {
t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err)
}
})
}

View File

@ -0,0 +1,59 @@
package transactionrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type handleRequestedTransactionsFlow struct {
TransactionsRelayContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestedTransactions listens to appmessage.MsgRequestTransactions messages, responding with the requested
// transactions if those are in the mempool.
// Missing transactions would be ignored
func HandleRequestedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRequestedTransactionsFlow{
TransactionsRelayContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestedTransactionsFlow) start() error {
for {
msgRequestTransactions, err := flow.readRequestTransactions()
if err != nil {
return err
}
for _, transactionID := range msgRequestTransactions.IDs {
tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
if !ok {
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
err := flow.outgoingRoute.Enqueue(msgTransactionNotFound)
if err != nil {
return err
}
continue
}
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
if err != nil {
return err
}
}
}
}
func (flow *handleRequestedTransactionsFlow) readRequestTransactions() (*appmessage.MsgRequestTransactions, error) {
msg, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
return msg.(*appmessage.MsgRequestTransactions), nil
}

View File

@ -0,0 +1,91 @@
package transactionrelay_test
import (
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
"testing"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/panics"
"github.com/pkg/errors"
)
// TestHandleRequestedTransactionsNotFound tests the flow of HandleRequestedTransactions
// when the requested transactions don't found in the mempool.
func TestHandleRequestedTransactionsNotFound(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
var log = logger.RegisterSubSystem("PROT")
var spawn = panics.GoroutineWrapperFunc(log)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRequestedTransactionsNotFound")
if err != nil {
t.Fatalf("Error setting up test Consensus: %+v", err)
}
defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil {
t.Fatalf("Failed to create a NetAdapter: %v", err)
}
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
if err != nil {
t.Fatalf("Failed to set up a domain Instance: %v", err)
}
context := &mocTransactionsRelayContext{
netAdapter: adapter,
domain: domainInstance,
sharedRequestedTransactions: sharedRequestedTransactions,
}
incomingRoute := router.NewRoute("incoming")
outgoingRoute := router.NewRoute("outgoing")
defer outgoingRoute.Close()
txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02})
txIDs := []*externalapi.DomainTransactionID{txID1, txID2}
msg := appmessage.NewMsgRequestTransactions(txIDs)
err = incomingRoute.Enqueue(msg)
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
// The goroutine is representing the peer's actions.
spawn("peerResponseToTheTransactionsMessages", func() {
for i, id := range txIDs {
msg, err := outgoingRoute.Dequeue()
if err != nil {
t.Fatalf("Dequeue: %s", err)
}
outMsg := msg.(*appmessage.MsgTransactionNotFound)
if txIDs[i].String() != outMsg.ID.String() {
t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String())
}
}
// Closed the incomingRoute for stop the infinity loop.
incomingRoute.Close()
})
err = transactionrelay.HandleRequestedTransactions(context, incomingRoute, outgoingRoute)
// Make sure the error is due to the closed route.
if err == nil || !errors.Is(err, router.ErrRouteClosed) {
t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err)
}
})
}

View File

@ -21,7 +21,7 @@ func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*ex
switch message := message.(type) { switch message := message.(type) {
case *appmessage.MsgInvRelayBlock: case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false}) flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlockLocator: case *appmessage.MsgBlockLocator:
return message.BlockLocatorHashes, nil return message.BlockLocatorHashes, nil
default: default:

View File

@ -5,6 +5,7 @@ import (
"github.com/kaspanet/kaspad/app/protocol/peer" "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
) )
@ -33,7 +34,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
if err != nil { if err != nil {
return err return err
} }
if !blockInfo.HasHeader() { if !blockInfo.Exists {
return protocolerrors.Errorf(true, "received IBDBlockLocator "+ return protocolerrors.Errorf(true, "received IBDBlockLocator "+
"with an unknown targetHash %s", targetHash) "with an unknown targetHash %s", targetHash)
} }
@ -46,7 +47,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
} }
// The IBD block locator is checking only existing blocks with bodies. // The IBD block locator is checking only existing blocks with bodies.
if !blockInfo.HasBody() { if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
continue continue
} }

View File

@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -27,15 +28,18 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes)) log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
for i, hash := range msgRequestIBDBlocks.Hashes { for i, hash := range msgRequestIBDBlocks.Hashes {
// Fetch the block from the database. // Fetch the block from the database.
block, found, err := context.Domain().Consensus().GetBlock(hash) blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil { if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash) return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
} }
if !found {
return protocolerrors.Errorf(false, "IBD block %s not found", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed // TODO (Partial nodes): Convert block to partial block if needed
blockMessage := appmessage.DomainBlockToMsgBlock(block) blockMessage := appmessage.DomainBlockToMsgBlock(block)

View File

@ -119,15 +119,11 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
} }
for i, blockHash := range pointAndItsAnticone { for i, blockHash := range pointAndItsAnticone {
block, found, err := context.Domain().Consensus().GetBlock(blockHash) block, err := context.Domain().Consensus().GetBlock(blockHash)
if err != nil { if err != nil {
return err return err
} }
if !found {
return protocolerrors.Errorf(false, "pruning point anticone block %s not found", blockHash)
}
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash])) err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
if err != nil { if err != nil {
return err return err

View File

@ -5,6 +5,7 @@ import (
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer" peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -28,15 +29,18 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes) log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
for _, hash := range getRelayBlocksMessage.Hashes { for _, hash := range getRelayBlocksMessage.Hashes {
// Fetch the block from the database. // Fetch the block from the database.
block, found, err := context.Domain().Consensus().GetBlock(hash) blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil { if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash) return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
} }
if !found {
return protocolerrors.Errorf(false, "Relay block %s not found", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed // TODO (Partial nodes): Convert block to partial block if needed
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block)) err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))

View File

@ -7,7 +7,6 @@ import (
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer" peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors" "github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
@ -26,8 +25,8 @@ var orphanResolutionRange uint32 = 5
type RelayInvsContext interface { type RelayInvsContext interface {
Domain() domain.Domain Domain() domain.Domain
Config() *config.Config Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock) error OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
OnNewBlockTemplate() error OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
OnPruningPointUTXOSetOverride() error OnPruningPointUTXOSetOverride() error
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
Broadcast(message appmessage.Message) error Broadcast(message appmessage.Message) error
@ -36,19 +35,13 @@ type RelayInvsContext interface {
IsOrphan(blockHash *externalapi.DomainHash) bool IsOrphan(blockHash *externalapi.DomainHash) bool
IsIBDRunning() bool IsIBDRunning() bool
IsRecoverableError(err error) bool IsRecoverableError(err error) bool
IsNearlySynced() (bool, error)
}
type invRelayBlock struct {
Hash *externalapi.DomainHash
IsOrphanRoot bool
} }
type handleRelayInvsFlow struct { type handleRelayInvsFlow struct {
RelayInvsContext RelayInvsContext
incomingRoute, outgoingRoute *router.Route incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer peer *peerpkg.Peer
invsQueue []invRelayBlock invsQueue []*appmessage.MsgInvRelayBlock
} }
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they // HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
@ -61,7 +54,7 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
incomingRoute: incomingRoute, incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute, outgoingRoute: outgoingRoute,
peer: peer, peer: peer,
invsQueue: make([]invRelayBlock, 0), invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
} }
err := flow.start() err := flow.start()
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now // Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
@ -112,17 +105,11 @@ func (flow *handleRelayInvsFlow) start() error {
continue continue
} }
// Block relay is disabled if the node is already during IBD AND considered out of sync // Block relay is disabled during IBD
if flow.IsIBDRunning() { if flow.IsIBDRunning() {
isNearlySynced, err := flow.IsNearlySynced() log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
if err != nil {
return err
}
if !isNearlySynced {
log.Debugf("Got block %s while in IBD and the node is out of sync. Continuing...", inv.Hash)
continue continue
} }
}
log.Debugf("Requesting block %s", inv.Hash) log.Debugf("Requesting block %s", inv.Hash)
block, exists, err := flow.requestBlock(inv.Hash) block, exists, err := flow.requestBlock(inv.Hash)
@ -144,36 +131,12 @@ func (flow *handleRelayInvsFlow) start() error {
continue continue
} }
// Note we do not apply the heuristic below if inv was queued as an orphan root, since
// that means the process started by a proper and relevant relay block
if !inv.IsOrphanRoot {
// Check bounded merge depth to avoid requesting irrelevant data which cannot be merged under virtual
virtualMergeDepthRoot, err := flow.Domain().Consensus().VirtualMergeDepthRoot()
if err != nil {
return err
}
if !virtualMergeDepthRoot.Equal(model.VirtualGenesisBlockHash) {
mergeDepthRootHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualMergeDepthRoot)
if err != nil {
return err
}
// Since `BlueWork` respects topology, this condition means that the relay
// block is not in the future of virtual's merge depth root, and thus cannot be merged unless
// other valid blocks Kosherize it, in which case it will be obtained once the merger is relayed
if block.Header.BlueWork().Cmp(mergeDepthRootHeader.BlueWork()) <= 0 {
log.Debugf("Block %s has lower blue work than virtual's merge root %s (%d <= %d), hence we are skipping it",
inv.Hash, virtualMergeDepthRoot, block.Header.BlueWork(), mergeDepthRootHeader.BlueWork())
continue
}
}
}
log.Debugf("Processing block %s", inv.Hash) log.Debugf("Processing block %s", inv.Hash)
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo() oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
if err != nil { if err != nil {
return err return err
} }
missingParents, err := flow.processBlock(block) missingParents, virtualChangeSet, err := flow.processBlock(block)
if err != nil { if err != nil {
if errors.Is(err, ruleerrors.ErrPrunedBlock) { if errors.Is(err, ruleerrors.ErrPrunedBlock) {
log.Infof("Ignoring pruned block %s", inv.Hash) log.Infof("Ignoring pruned block %s", inv.Hash)
@ -205,20 +168,15 @@ func (flow *handleRelayInvsFlow) start() error {
return err return err
} }
virtualHasNewParents := false
for _, parent := range newVirtualInfo.ParentHashes { for _, parent := range newVirtualInfo.ParentHashes {
if oldVirtualParents.Contains(parent) { if oldVirtualParents.Contains(parent) {
continue continue
} }
virtualHasNewParents = true
block, found, err := flow.Domain().Consensus().GetBlock(parent) block, err := flow.Domain().Consensus().GetBlock(parent)
if err != nil { if err != nil {
return err return err
} }
if !found {
return protocolerrors.Errorf(false, "Virtual parent %s not found", parent)
}
blockHash := consensushashing.BlockHash(block) blockHash := consensushashing.BlockHash(block)
log.Debugf("Relaying block %s", blockHash) log.Debugf("Relaying block %s", blockHash)
err = flow.relayBlock(block) err = flow.relayBlock(block)
@ -227,16 +185,8 @@ func (flow *handleRelayInvsFlow) start() error {
} }
} }
if virtualHasNewParents {
log.Debugf("Virtual %d has new parents, raising new block template event", newVirtualInfo.DAAScore)
err = flow.OnNewBlockTemplate()
if err != nil {
return err
}
}
log.Infof("Accepted block %s via relay", inv.Hash) log.Infof("Accepted block %s via relay", inv.Hash)
err = flow.OnNewBlock(block) err = flow.OnNewBlock(block, virtualChangeSet)
if err != nil { if err != nil {
return err return err
} }
@ -252,24 +202,24 @@ func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.Domai
return nil return nil
} }
func (flow *handleRelayInvsFlow) readInv() (invRelayBlock, error) { func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
if len(flow.invsQueue) > 0 { if len(flow.invsQueue) > 0 {
var inv invRelayBlock var inv *appmessage.MsgInvRelayBlock
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:] inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
return inv, nil return inv, nil
} }
msg, err := flow.incomingRoute.Dequeue() msg, err := flow.incomingRoute.Dequeue()
if err != nil { if err != nil {
return invRelayBlock{}, err return nil, err
} }
msgInv, ok := msg.(*appmessage.MsgInvRelayBlock) inv, ok := msg.(*appmessage.MsgInvRelayBlock)
if !ok { if !ok {
return invRelayBlock{}, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+ return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
"expecting an inv message", msg.Command()) "expecting an inv message", msg.Command())
} }
return invRelayBlock{Hash: msgInv.Hash, IsOrphanRoot: false}, nil return inv, nil
} }
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) { func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
@ -314,7 +264,7 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
switch message := message.(type) { switch message := message.(type) {
case *appmessage.MsgInvRelayBlock: case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false}) flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlock: case *appmessage.MsgBlock:
return message, nil return message, nil
default: default:
@ -323,25 +273,25 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
} }
} }
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, error) { func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
blockHash := consensushashing.BlockHash(block) blockHash := consensushashing.BlockHash(block)
err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true) virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
if err != nil { if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) { if !errors.As(err, &ruleerrors.RuleError{}) {
return nil, errors.Wrapf(err, "failed to process block %s", blockHash) return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
} }
missingParentsError := &ruleerrors.ErrMissingParents{} missingParentsError := &ruleerrors.ErrMissingParents{}
if errors.As(err, missingParentsError) { if errors.As(err, missingParentsError) {
return missingParentsError.MissingParentHashes, nil return missingParentsError.MissingParentHashes, nil, nil
} }
// A duplicate block should not appear to the user as a warning and is already reported in the calling function // A duplicate block should not appear to the user as a warning and is already reported in the calling function
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) { if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err) log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
} }
return nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash) return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
} }
return nil, nil return nil, virtualChangeSet, nil
} }
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error { func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
@ -455,10 +405,10 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
} }
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots)) log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
invMessages := make([]invRelayBlock, len(orphanRoots)) invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
for i, root := range orphanRoots { for i, root := range orphanRoots {
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root) log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
invMessages[i] = invRelayBlock{Hash: root, IsOrphanRoot: true} invMessages[i] = appmessage.NewMsgInvBlock(root)
} }
flow.invsQueue = append(invMessages, flow.invsQueue...) flow.invsQueue = append(invMessages, flow.invsQueue...)

View File

@ -47,9 +47,9 @@ func (flow *handleRequestAnticoneFlow) start() error {
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip // GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since // intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
// we relay blocks only if they enter virtual's mergeset. We add a 2 factor for possible sync gaps. // we relay blocks only if they enter virtual's mergeset. We add 2 for a small margin error.
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash, blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
flow.Config().ActiveNetParams.MergeSetSizeLimit*2) flow.Config().ActiveNetParams.MergeSetSizeLimit+2)
if err != nil { if err != nil {
return protocolerrors.Wrap(true, err, "Failed querying anticone") return protocolerrors.Wrap(true, err, "Failed querying anticone")
} }

View File

@ -44,27 +44,9 @@ func (flow *handleRequestHeadersFlow) start() error {
if err != nil { if err != nil {
return err return err
} }
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash) log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
consensus := flow.Domain().Consensus() isLowSelectedAncestorOfHigh, err := flow.Domain().Consensus().IsInSelectedParentChainOf(lowHash, highHash)
lowHashInfo, err := consensus.GetBlockInfo(lowHash)
if err != nil {
return err
}
if !lowHashInfo.HasHeader() {
return protocolerrors.Errorf(true, "Block %s does not exist", lowHash)
}
highHashInfo, err := consensus.GetBlockInfo(highHash)
if err != nil {
return err
}
if !highHashInfo.HasHeader() {
return protocolerrors.Errorf(true, "Block %s does not exist", highHash)
}
isLowSelectedAncestorOfHigh, err := consensus.IsInSelectedParentChainOf(lowHash, highHash)
if err != nil { if err != nil {
return err return err
} }
@ -80,7 +62,7 @@ func (flow *handleRequestHeadersFlow) start() error {
// in order to avoid locking the consensus for too long // in order to avoid locking the consensus for too long
// maxBlocks MUST be >= MergeSetSizeLimit + 1 // maxBlocks MUST be >= MergeSetSizeLimit + 1
const maxBlocks = 1 << 10 const maxBlocks = 1 << 10
blockHashes, _, err := consensus.GetHashesBetween(lowHash, highHash, maxBlocks) blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
if err != nil { if err != nil {
return err return err
} }
@ -88,7 +70,7 @@ func (flow *handleRequestHeadersFlow) start() error {
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes)) blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
for i, blockHash := range blockHashes { for i, blockHash := range blockHashes {
blockHeader, err := consensus.GetBlockHeader(blockHash) blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,7 +1,6 @@
package blockrelay package blockrelay
import ( import (
"fmt"
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common" "github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer" peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
@ -21,8 +20,8 @@ import (
type IBDContext interface { type IBDContext interface {
Domain() domain.Domain Domain() domain.Domain
Config() *config.Config Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock) error OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
OnNewBlockTemplate() error OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
OnPruningPointUTXOSetOverride() error OnPruningPointUTXOSetOverride() error
IsIBDRunning() bool IsIBDRunning() bool
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
@ -71,17 +70,16 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
} }
isFinishedSuccessfully := false isFinishedSuccessfully := false
var err error
defer func() { defer func() {
flow.UnsetIBDRunning() flow.UnsetIBDRunning()
flow.logIBDFinished(isFinishedSuccessfully, err) flow.logIBDFinished(isFinishedSuccessfully)
}() }()
relayBlockHash := consensushashing.BlockHash(block) relayBlockHash := consensushashing.BlockHash(block)
log.Infof("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash) log.Debugf("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
log.Infof("Syncing blocks up to %s", relayBlockHash) log.Debugf("Syncing blocks up to %s", relayBlockHash)
log.Infof("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash) log.Debugf("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment() syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
if err != nil { if err != nil {
@ -100,7 +98,7 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
if shouldDownloadHeadersProof { if shouldDownloadHeadersProof {
log.Infof("Starting IBD with headers proof") log.Infof("Starting IBD with headers proof")
err = flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore()) err := flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
if err != nil { if err != nil {
return err return err
} }
@ -175,11 +173,6 @@ func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.Do
chainNegotiationRestartCounter := 0 chainNegotiationRestartCounter := 0
chainNegotiationZoomCounts := 0 chainNegotiationZoomCounts := 0
initialLocatorLen := len(locatorHashes) initialLocatorLen := len(locatorHashes)
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return nil, nil, err
}
for { for {
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
for _, syncerChainHash := range locatorHashes { for _, syncerChainHash := range locatorHashes {
@ -188,26 +181,9 @@ func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.Do
return nil, nil, err return nil, nil, err
} }
if info.Exists { if info.Exists {
if info.BlockStatus == externalapi.StatusInvalid {
return nil, nil, protocolerrors.Errorf(true, "Sent invalid chain block %s", syncerChainHash)
}
isPruningPointOnSyncerChain, err := flow.Domain().Consensus().IsInSelectedParentChainOf(pruningPoint, syncerChainHash)
if err != nil {
log.Errorf("Error checking isPruningPointOnSyncerChain: %s", err)
}
// We're only interested in syncer chain blocks that have our pruning
// point in their selected chain. Otherwise, it means one of the following:
// 1) We will not switch the virtual selected chain to the syncers chain since it will violate finality
// (hence we can ignore it unless merged by others).
// 2) syncerChainHash is actually in the past of our pruning point so there's no
// point in syncing from it.
if err == nil && isPruningPointOnSyncerChain {
currentHighestKnownSyncerChainHash = syncerChainHash currentHighestKnownSyncerChainHash = syncerChainHash
break break
} }
}
lowestUnknownSyncerChainHash = syncerChainHash lowestUnknownSyncerChainHash = syncerChainHash
} }
// No unknown blocks, break. Note this can only happen in the first iteration // No unknown blocks, break. Note this can only happen in the first iteration
@ -285,7 +261,7 @@ func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.Do
} }
} }
log.Infof("Found highest known syncer chain block %s from peer %s", log.Debugf("Found highest known syncer chain block %s from peer %s",
highestKnownSyncerChainHash, flow.peer) highestKnownSyncerChainHash, flow.peer)
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
@ -300,14 +276,10 @@ func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
} }
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool, err error) { func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
successString := "successfully" successString := "successfully"
if !isFinishedSuccessfully { if !isFinishedSuccessfully {
if err != nil { successString = "(interrupted)"
successString = fmt.Sprintf("(interrupted: %s)", err)
} else {
successString = fmt.Sprintf("(interrupted)")
}
} }
log.Infof("IBD with peer %s finished %s", flow.peer, successString) log.Infof("IBD with peer %s finished %s", flow.peer, successString)
} }
@ -516,7 +488,7 @@ func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlo
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash) log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
return nil return nil
} }
err = consensus.ValidateAndInsertBlock(block, false) _, err = consensus.ValidateAndInsertBlock(block, false)
if err != nil { if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) { if !errors.As(err, &ruleerrors.RuleError{}) {
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash) return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
@ -594,7 +566,7 @@ func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
receivedChunkCount++ receivedChunkCount++
if receivedChunkCount%ibdBatchSize == 0 { if receivedChunkCount%ibdBatchSize == 0 {
log.Infof("Received %d UTXO set chunks so far, totaling in %d UTXOs", log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
receivedChunkCount, receivedUTXOCount) receivedChunkCount, receivedUTXOCount)
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk() requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
@ -646,12 +618,6 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks") progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
highestProcessedDAAScore := lowBlockHeader.DAAScore() highestProcessedDAAScore := lowBlockHeader.DAAScore()
// If the IBD is small, we want to update the virtual after each block in order to avoid complications and possible bugs.
updateVirtual, err := flow.Domain().Consensus().IsNearlySynced()
if err != nil {
return err
}
for offset := 0; offset < len(hashes); offset += ibdBatchSize { for offset := 0; offset < len(hashes); offset += ibdBatchSize {
var hashesToRequest []*externalapi.DomainHash var hashesToRequest []*externalapi.DomainHash
if offset+ibdBatchSize < len(hashes) { if offset+ibdBatchSize < len(hashes) {
@ -688,7 +654,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
return err return err
} }
err = flow.Domain().Consensus().ValidateAndInsertBlock(block, updateVirtual) virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
if err != nil { if err != nil {
if errors.Is(err, ruleerrors.ErrDuplicateBlock) { if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash) log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
@ -696,7 +662,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
} }
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash) return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
} }
err = flow.OnNewBlock(block) err = flow.OnNewBlock(block, virtualChangeSet)
if err != nil { if err != nil {
return err return err
} }
@ -707,15 +673,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore) progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
} }
// We need to resolve virtual only if it wasn't updated while syncing block bodies return flow.resolveVirtual(highestProcessedDAAScore)
if !updateVirtual {
err := flow.resolveVirtual(highestProcessedDAAScore)
if err != nil {
return err
}
}
return flow.OnNewBlockTemplate()
} }
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error { func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
@ -728,24 +686,38 @@ func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock
} }
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error { func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
err := flow.Domain().Consensus().ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) { virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
for i := 0; ; i++ {
if i%10 == 0 {
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
var percents int var percents int
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 { if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
percents = 100 percents = 100
} else { } else {
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100) percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
} }
if percents < 0 {
percents = 0
} else if percents > 100 {
percents = 100
}
log.Infof("Resolving virtual. Estimated progress: %d%%", percents) log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
}) }
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
if err != nil { if err != nil {
return err return err
} }
err = flow.OnVirtualChange(virtualChangeSet)
if err != nil {
return err
}
if isCompletelyResolved {
log.Infof("Resolved virtual") log.Infof("Resolved virtual")
return nil return nil
}
}
} }

View File

@ -14,7 +14,7 @@ import (
func (flow *handleIBDFlow) ibdWithHeadersProof( func (flow *handleIBDFlow) ibdWithHeadersProof(
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error { syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
err := flow.Domain().InitStagingConsensusWithoutGenesis() err := flow.Domain().InitStagingConsensus()
if err != nil { if err != nil {
return err return err
} }
@ -25,7 +25,7 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(
return err return err
} }
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus. (%s)", flow.peer, err) log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus.", flow.peer)
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus() deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
if deleteStagingConsensusErr != nil { if deleteStagingConsensusErr != nil {
return deleteStagingConsensusErr return deleteStagingConsensusErr
@ -55,12 +55,7 @@ func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
if highestKnownSyncerChainHash != nil { if highestKnownSyncerChainHash != nil {
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highestKnownSyncerChainHash) highestSharedBlockFound = true
if err != nil {
return false, false, err
}
highestSharedBlockFound = blockInfo.HasBody()
pruningPoint, err := flow.Domain().Consensus().PruningPoint() pruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil { if err != nil {
return false, false, err return false, false, err
@ -85,33 +80,28 @@ func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
return true, true, nil return true, true, nil
} }
if highestKnownSyncerChainHash == nil {
log.Infof("Stopping IBD since IBD from this node will cause a finality conflict")
return false, false, nil return false, false, nil
} }
return false, true, nil
}
return false, true, nil return false, true, nil
} }
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) { func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent() headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
if err != nil { if err != nil {
return false, err return false, err
} }
virtualSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(virtualSelectedParent) headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
if err != nil { if err != nil {
return false, err return false, err
} }
if relayBlock.Header.BlueScore() < virtualSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() { if relayBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
return false, nil return false, nil
} }
return relayBlock.Header.BlueWork().Cmp(virtualSelectedTipInfo.BlueWork) > 0, nil return relayBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
} }
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) { func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
@ -290,14 +280,8 @@ func (flow *handleIBDFlow) processBlockWithTrustedData(
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index])) blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
} }
err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false) _, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
return protocolerrors.Wrapf(true, err, "failed validating block with trusted data")
}
return err return err
}
return nil
} }
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) { func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {

View File

@ -22,7 +22,7 @@ type TransactionsRelayContext interface {
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
OnTransactionAddedToMempool() OnTransactionAddedToMempool()
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
IsNearlySynced() (bool, error) IsIBDRunning() bool
} }
type handleRelayedTransactionsFlow struct { type handleRelayedTransactionsFlow struct {
@ -50,12 +50,7 @@ func (flow *handleRelayedTransactionsFlow) start() error {
return err return err
} }
isNearlySynced, err := flow.IsNearlySynced() if flow.IsIBDRunning() {
if err != nil {
return err
}
// Transaction relay is disabled if the node is out of sync and thus not mining
if !isNearlySynced {
continue continue
} }
@ -102,7 +97,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool { func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
// Ask the transaction memory pool if the transaction is known // Ask the transaction memory pool if the transaction is known
// to it in any form (main pool or orphan). // to it in any form (main pool or orphan).
if _, _, ok := flow.Domain().MiningManager().GetTransaction(txID, true, true); ok { if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
return true return true
} }

View File

@ -47,8 +47,8 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() { func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
} }
func (m *mocTransactionsRelayContext) IsNearlySynced() (bool, error) { func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
return true, nil return false
} }
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't // TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't

View File

@ -30,7 +30,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
} }
for _, transactionID := range msgRequestTransactions.IDs { for _, transactionID := range msgRequestTransactions.IDs {
tx, _, ok := flow.Domain().MiningManager().GetTransaction(transactionID, true, false) tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
if !ok { if !ok {
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID) msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
@ -40,6 +40,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
} }
continue continue
} }
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx)) err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
if err != nil { if err != nil {
return err return err

View File

@ -2,11 +2,10 @@ package protocol
import ( import (
"fmt" "fmt"
"github.com/kaspanet/kaspad/app/protocol/common"
"sync" "sync"
"sync/atomic" "sync/atomic"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
@ -91,9 +90,14 @@ func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-c
return <-errChan return <-errChan
} }
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplate handler // SetOnVirtualChange sets the onVirtualChangeHandler handler
func (m *Manager) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler flowcontext.OnNewBlockTemplateHandler) { func (m *Manager) SetOnVirtualChange(onVirtualChangeHandler flowcontext.OnVirtualChangeHandler) {
m.context.SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler) m.context.SetOnVirtualChangeHandler(onVirtualChangeHandler)
}
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowcontext.OnBlockAddedToDAGHandler) {
m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)
} }
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler // SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
@ -106,6 +110,12 @@ func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMemp
m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler) m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler)
} }
// ShouldMine returns whether it's ok to use block template from this node
// for mining purposes.
func (m *Manager) ShouldMine() (bool, error) {
return m.context.ShouldMine()
}
// IsIBDRunning returns true if IBD is currently marked as running // IsIBDRunning returns true if IBD is currently marked as running
func (m *Manager) IsIBDRunning() bool { func (m *Manager) IsIBDRunning() bool {
return m.context.IsIBDRunning() return m.context.IsIBDRunning()

View File

@ -3,7 +3,8 @@ package protocol
import ( import (
"github.com/kaspanet/kaspad/app/protocol/common" "github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flows/ready" "github.com/kaspanet/kaspad/app/protocol/flows/ready"
"github.com/kaspanet/kaspad/app/protocol/flows/v5" v4 "github.com/kaspanet/kaspad/app/protocol/flows/v4"
v5 "github.com/kaspanet/kaspad/app/protocol/flows/v5"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -23,7 +24,7 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
// errChan is used by the flow goroutines to return to runFlows when an error occurs. // errChan is used by the flow goroutines to return to runFlows when an error occurs.
// They are both initialized here and passed to register flows. // They are both initialized here and passed to register flows.
isStopping := uint32(0) isStopping := uint32(0)
errChan := make(chan error, 1) errChan := make(chan error)
receiveVersionRoute, sendVersionRoute, receiveReadyRoute := registerHandshakeRoutes(router) receiveVersionRoute, sendVersionRoute, receiveReadyRoute := registerHandshakeRoutes(router)
@ -76,6 +77,8 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
var flows []*common.Flow var flows []*common.Flow
log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion()) log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion())
switch peer.ProtocolVersion() { switch peer.ProtocolVersion() {
case 4:
flows = v4.Register(m, router, errChan, &isStopping)
case 5: case 5:
flows = v5.Register(m, router, errChan, &isStopping) flows = v5.Register(m, router, errChan, &isStopping)
default: default:

View File

@ -12,7 +12,6 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager" "github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager" "github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter" "github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/pkg/errors"
) )
// Manager is an RPC manager // Manager is an RPC manager
@ -29,7 +28,6 @@ func NewManager(
connectionManager *connmanager.ConnectionManager, connectionManager *connmanager.ConnectionManager,
addressManager *addressmanager.AddressManager, addressManager *addressmanager.AddressManager,
utxoIndex *utxoindex.UTXOIndex, utxoIndex *utxoindex.UTXOIndex,
consensusEventsChan chan externalapi.ConsensusEvent,
shutDownChan chan<- struct{}) *Manager { shutDownChan chan<- struct{}) *Manager {
manager := Manager{ manager := Manager{
@ -46,90 +44,50 @@ func NewManager(
} }
netAdapter.SetRPCRouterInitializer(manager.routerInitializer) netAdapter.SetRPCRouterInitializer(manager.routerInitializer)
manager.initConsensusEventsHandler(consensusEventsChan)
return &manager return &manager
} }
func (m *Manager) initConsensusEventsHandler(consensusEventsChan chan externalapi.ConsensusEvent) { // NotifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
spawn("consensusEventsHandler", func() { func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error {
for { onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
consensusEvent, ok := <-consensusEventsChan
if !ok {
return
}
switch event := consensusEvent.(type) {
case *externalapi.VirtualChangeSet:
err := m.notifyVirtualChange(event)
if err != nil {
panic(err)
}
case *externalapi.BlockAdded:
err := m.notifyBlockAddedToDAG(event.Block)
if err != nil {
panic(err)
}
default:
panic(errors.Errorf("Got event of unsupported type %T", consensusEvent))
}
}
})
}
// notifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
func (m *Manager) notifyBlockAddedToDAG(block *externalapi.DomainBlock) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.notifyBlockAddedToDAG")
defer onEnd() defer onEnd()
// Before converting the block and populating it, we check if any listeners are interested. err := m.NotifyVirtualChange(virtualChangeSet)
// This is done since most nodes do not use this event. if err != nil {
if !m.context.NotificationManager.HasBlockAddedListeners() { return err
return nil
} }
rpcBlock := appmessage.DomainBlockToRPCBlock(block) rpcBlock := appmessage.DomainBlockToRPCBlock(block)
err := m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, true) err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
if err != nil { if err != nil {
return err return err
} }
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock) blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
err = m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification) return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
if err != nil {
return err
}
return nil
} }
// notifyVirtualChange notifies the manager that the virtual block has been changed. // NotifyVirtualChange notifies the manager that the virtual block has been changed.
func (m *Manager) notifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error { func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualChange") onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
defer onEnd() defer onEnd()
if m.context.Config.UTXOIndex && virtualChangeSet.VirtualUTXODiff != nil { if m.context.Config.UTXOIndex {
err := m.notifyUTXOsChanged(virtualChangeSet) err := m.notifyUTXOsChanged(virtualChangeSet)
if err != nil { if err != nil {
return err return err
} }
} }
err := m.notifyVirtualSelectedParentBlueScoreChanged(virtualChangeSet.VirtualSelectedParentBlueScore) err := m.notifyVirtualSelectedParentBlueScoreChanged()
if err != nil { if err != nil {
return err return err
} }
err = m.notifyVirtualDaaScoreChanged(virtualChangeSet.VirtualDAAScore) err = m.notifyVirtualDaaScoreChanged()
if err != nil { if err != nil {
return err return err
} }
if virtualChangeSet.VirtualSelectedParentChainChanges == nil ||
(len(virtualChangeSet.VirtualSelectedParentChainChanges.Added) == 0 &&
len(virtualChangeSet.VirtualSelectedParentChainChanges.Removed) == 0) {
return nil
}
err = m.notifyVirtualSelectedParentChainChanged(virtualChangeSet) err = m.notifyVirtualSelectedParentChainChanged(virtualChangeSet)
if err != nil { if err != nil {
return err return err
@ -138,13 +96,6 @@ func (m *Manager) notifyVirtualChange(virtualChangeSet *externalapi.VirtualChang
return nil return nil
} }
// NotifyNewBlockTemplate notifies the manager that a new
// block template is available for miners
func (m *Manager) NotifyNewBlockTemplate() error {
notification := appmessage.NewNewBlockTemplateNotificationMessage()
return m.context.NotificationManager.NotifyNewBlockTemplate(notification)
}
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index // NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
// resets due to pruning point change via IBD. // resets due to pruning point change via IBD.
func (m *Manager) NotifyPruningPointUTXOSetOverride() error { func (m *Manager) NotifyPruningPointUTXOSetOverride() error {
@ -187,7 +138,6 @@ func (m *Manager) notifyUTXOsChanged(virtualChangeSet *externalapi.VirtualChange
if err != nil { if err != nil {
return err return err
} }
return m.context.NotificationManager.NotifyUTXOsChanged(utxoIndexChanges) return m.context.NotificationManager.NotifyUTXOsChanged(utxoIndexChanges)
} }
@ -203,18 +153,33 @@ func (m *Manager) notifyPruningPointUTXOSetOverride() error {
return m.context.NotificationManager.NotifyPruningPointUTXOSetOverride() return m.context.NotificationManager.NotifyPruningPointUTXOSetOverride()
} }
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged(virtualSelectedParentBlueScore uint64) error { func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged() error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentBlueScoreChanged") onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentBlueScoreChanged")
defer onEnd() defer onEnd()
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(virtualSelectedParentBlueScore) virtualSelectedParent, err := m.context.Domain.Consensus().GetVirtualSelectedParent()
if err != nil {
return err
}
blockInfo, err := m.context.Domain.Consensus().GetBlockInfo(virtualSelectedParent)
if err != nil {
return err
}
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(blockInfo.BlueScore)
return m.context.NotificationManager.NotifyVirtualSelectedParentBlueScoreChanged(notification) return m.context.NotificationManager.NotifyVirtualSelectedParentBlueScoreChanged(notification)
} }
func (m *Manager) notifyVirtualDaaScoreChanged(virtualDAAScore uint64) error { func (m *Manager) notifyVirtualDaaScoreChanged() error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualDaaScoreChanged") onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualDaaScoreChanged")
defer onEnd() defer onEnd()
virtualDAAScore, err := m.context.Domain.Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
notification := appmessage.NewVirtualDaaScoreChangedNotificationMessage(virtualDAAScore) notification := appmessage.NewVirtualDaaScoreChangedNotificationMessage(virtualDAAScore)
return m.context.NotificationManager.NotifyVirtualDaaScoreChanged(notification) return m.context.NotificationManager.NotifyVirtualDaaScoreChanged(notification)
} }
@ -223,16 +188,10 @@ func (m *Manager) notifyVirtualSelectedParentChainChanged(virtualChangeSet *exte
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged") onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged")
defer onEnd() defer onEnd()
hasListeners, includeAcceptedTransactionIDs := m.context.NotificationManager.HasListenersThatPropagateVirtualSelectedParentChainChanged()
if hasListeners {
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage( notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
virtualChangeSet.VirtualSelectedParentChainChanges, includeAcceptedTransactionIDs) virtualChangeSet.VirtualSelectedParentChainChanges)
if err != nil { if err != nil {
return err return err
} }
return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification) return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification)
}
return nil
} }

View File

@ -48,9 +48,6 @@ var handlers = map[appmessage.MessageCommand]handler{
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest, appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond, appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond,
appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged, appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged,
appmessage.CmdNotifyNewBlockTemplateRequestMessage: rpchandlers.HandleNotifyNewBlockTemplate,
appmessage.CmdGetCoinSupplyRequestMessage: rpchandlers.HandleGetCoinSupply,
appmessage.CmdGetMempoolEntriesByAddressesRequestMessage: rpchandlers.HandleGetMempoolEntriesByAddresses,
} }
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) { func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {

View File

@ -3,14 +3,12 @@ package rpccontext
import ( import (
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
) )
// ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage converts // ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage converts
// VirtualSelectedParentChainChanges to VirtualSelectedParentChainChangedNotificationMessage // VirtualSelectedParentChainChanges to VirtualSelectedParentChainChangedNotificationMessage
func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage( func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
selectedParentChainChanges *externalapi.SelectedChainPath, includeAcceptedTransactionIDs bool) ( selectedParentChainChanges *externalapi.SelectedChainPath) (*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
removedChainBlockHashes := make([]string, len(selectedParentChainChanges.Removed)) removedChainBlockHashes := make([]string, len(selectedParentChainChanges.Removed))
for i, removed := range selectedParentChainChanges.Removed { for i, removed := range selectedParentChainChanges.Removed {
@ -22,58 +20,5 @@ func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotifi
addedChainBlocks[i] = added.String() addedChainBlocks[i] = added.String()
} }
var acceptedTransactionIDs []*appmessage.AcceptedTransactionIDs return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes, addedChainBlocks), nil
if includeAcceptedTransactionIDs {
var err error
acceptedTransactionIDs, err = ctx.getAndConvertAcceptedTransactionIDs(selectedParentChainChanges)
if err != nil {
return nil, err
}
}
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(
removedChainBlockHashes, addedChainBlocks, acceptedTransactionIDs), nil
}
func (ctx *Context) getAndConvertAcceptedTransactionIDs(selectedParentChainChanges *externalapi.SelectedChainPath) (
[]*appmessage.AcceptedTransactionIDs, error) {
acceptedTransactionIDs := make([]*appmessage.AcceptedTransactionIDs, len(selectedParentChainChanges.Added))
const chunk = 1000
position := 0
for position < len(selectedParentChainChanges.Added) {
var chainBlocksChunk []*externalapi.DomainHash
if position+chunk > len(selectedParentChainChanges.Added) {
chainBlocksChunk = selectedParentChainChanges.Added[position:]
} else {
chainBlocksChunk = selectedParentChainChanges.Added[position : position+chunk]
}
// We use chunks in order to avoid blocking consensus for too long
chainBlocksAcceptanceData, err := ctx.Domain.Consensus().GetBlocksAcceptanceData(chainBlocksChunk)
if err != nil {
return nil, err
}
for i, addedChainBlock := range chainBlocksChunk {
chainBlockAcceptanceData := chainBlocksAcceptanceData[i]
acceptedTransactionIDs[position+i] = &appmessage.AcceptedTransactionIDs{
AcceptingBlockHash: addedChainBlock.String(),
AcceptedTransactionIDs: nil,
}
for _, blockAcceptanceData := range chainBlockAcceptanceData {
for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData {
if transactionAcceptanceData.IsAccepted {
acceptedTransactionIDs[position+i].AcceptedTransactionIDs =
append(acceptedTransactionIDs[position+i].AcceptedTransactionIDs,
consensushashing.TransactionID(transactionAcceptanceData.Transaction).String())
}
}
}
}
position += chunk
}
return acceptedTransactionIDs, nil
} }

View File

@ -44,7 +44,7 @@ func NewContext(cfg *config.Config,
UTXOIndex: utxoIndex, UTXOIndex: utxoIndex,
ShutDownChan: shutDownChan, ShutDownChan: shutDownChan,
} }
context.NotificationManager = NewNotificationManager(cfg.ActiveNetParams) context.NotificationManager = NewNotificationManager()
return context return context
} }

View File

@ -3,11 +3,6 @@ package rpccontext
import ( import (
"sync" "sync"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/utxoindex" "github.com/kaspanet/kaspad/domain/utxoindex"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
@ -18,7 +13,6 @@ import (
type NotificationManager struct { type NotificationManager struct {
sync.RWMutex sync.RWMutex
listeners map[*routerpkg.Router]*NotificationListener listeners map[*routerpkg.Router]*NotificationListener
params *dagconfig.Params
} }
// UTXOsChangedNotificationAddress represents a kaspad address. // UTXOsChangedNotificationAddress represents a kaspad address.
@ -30,8 +24,6 @@ type UTXOsChangedNotificationAddress struct {
// NotificationListener represents a registered RPC notification listener // NotificationListener represents a registered RPC notification listener
type NotificationListener struct { type NotificationListener struct {
params *dagconfig.Params
propagateBlockAddedNotifications bool propagateBlockAddedNotifications bool
propagateVirtualSelectedParentChainChangedNotifications bool propagateVirtualSelectedParentChainChangedNotifications bool
propagateFinalityConflictNotifications bool propagateFinalityConflictNotifications bool
@ -40,16 +32,13 @@ type NotificationListener struct {
propagateVirtualSelectedParentBlueScoreChangedNotifications bool propagateVirtualSelectedParentBlueScoreChangedNotifications bool
propagateVirtualDaaScoreChangedNotifications bool propagateVirtualDaaScoreChangedNotifications bool
propagatePruningPointUTXOSetOverrideNotifications bool propagatePruningPointUTXOSetOverrideNotifications bool
propagateNewBlockTemplateNotifications bool
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications bool
} }
// NewNotificationManager creates a new NotificationManager // NewNotificationManager creates a new NotificationManager
func NewNotificationManager(params *dagconfig.Params) *NotificationManager { func NewNotificationManager() *NotificationManager {
return &NotificationManager{ return &NotificationManager{
params: params,
listeners: make(map[*routerpkg.Router]*NotificationListener), listeners: make(map[*routerpkg.Router]*NotificationListener),
} }
} }
@ -59,7 +48,7 @@ func (nm *NotificationManager) AddListener(router *routerpkg.Router) {
nm.Lock() nm.Lock()
defer nm.Unlock() defer nm.Unlock()
listener := newNotificationListener(nm.params) listener := newNotificationListener()
nm.listeners[router] = listener nm.listeners[router] = listener
} }
@ -83,19 +72,6 @@ func (nm *NotificationManager) Listener(router *routerpkg.Router) (*Notification
return listener, nil return listener, nil
} }
// HasBlockAddedListeners indicates if the notification manager has any listeners for `BlockAdded` events
func (nm *NotificationManager) HasBlockAddedListeners() bool {
nm.RLock()
defer nm.RUnlock()
for _, listener := range nm.listeners {
if listener.propagateBlockAddedNotifications {
return true
}
}
return false
}
// NotifyBlockAdded notifies the notification manager that a block has been added to the DAG // NotifyBlockAdded notifies the notification manager that a block has been added to the DAG
func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAddedNotificationMessage) error { func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAddedNotificationMessage) error {
nm.RLock() nm.RLock()
@ -103,8 +79,10 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
for router, listener := range nm.listeners { for router, listener := range nm.listeners {
if listener.propagateBlockAddedNotifications { if listener.propagateBlockAddedNotifications {
err := router.OutgoingRoute().MaybeEnqueue(notification) err := router.OutgoingRoute().Enqueue(notification)
if err != nil { if errors.Is(err, routerpkg.ErrRouteClosed) {
log.Warnf("Couldn't send notification: %s", err)
} else if err != nil {
return err return err
} }
} }
@ -113,27 +91,13 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
} }
// NotifyVirtualSelectedParentChainChanged notifies the notification manager that the DAG's selected parent chain has changed // NotifyVirtualSelectedParentChainChanged notifies the notification manager that the DAG's selected parent chain has changed
func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged( func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
nm.RLock() nm.RLock()
defer nm.RUnlock() defer nm.RUnlock()
notificationWithoutAcceptedTransactionIDs := &appmessage.VirtualSelectedParentChainChangedNotificationMessage{
RemovedChainBlockHashes: notification.RemovedChainBlockHashes,
AddedChainBlockHashes: notification.AddedChainBlockHashes,
}
for router, listener := range nm.listeners { for router, listener := range nm.listeners {
if listener.propagateVirtualSelectedParentChainChangedNotifications { if listener.propagateVirtualSelectedParentChainChangedNotifications {
var err error err := router.OutgoingRoute().Enqueue(notification)
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
err = router.OutgoingRoute().MaybeEnqueue(notification)
} else {
err = router.OutgoingRoute().MaybeEnqueue(notificationWithoutAcceptedTransactionIDs)
}
if err != nil { if err != nil {
return err return err
} }
@ -142,31 +106,6 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(
return nil return nil
} }
// HasListenersThatPropagateVirtualSelectedParentChainChanged returns whether there's any listener that is
// subscribed to VirtualSelectedParentChainChanged notifications as well as checks if any such listener requested
// to include AcceptedTransactionIDs.
func (nm *NotificationManager) HasListenersThatPropagateVirtualSelectedParentChainChanged() (hasListeners, hasListenersThatRequireAcceptedTransactionIDs bool) {
nm.RLock()
defer nm.RUnlock()
hasListeners = false
hasListenersThatRequireAcceptedTransactionIDs = false
for _, listener := range nm.listeners {
if listener.propagateVirtualSelectedParentChainChangedNotifications {
hasListeners = true
// Generating acceptedTransactionIDs is a heavy operation, so we check if it's needed by any listener.
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
hasListenersThatRequireAcceptedTransactionIDs = true
break
}
}
}
return hasListeners, hasListenersThatRequireAcceptedTransactionIDs
}
// NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG // NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG
func (nm *NotificationManager) NotifyFinalityConflict(notification *appmessage.FinalityConflictNotificationMessage) error { func (nm *NotificationManager) NotifyFinalityConflict(notification *appmessage.FinalityConflictNotificationMessage) error {
nm.RLock() nm.RLock()
@ -207,10 +146,7 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
for router, listener := range nm.listeners { for router, listener := range nm.listeners {
if listener.propagateUTXOsChangedNotifications { if listener.propagateUTXOsChangedNotifications {
// Filter utxoChanges and create a notification // Filter utxoChanges and create a notification
notification, err := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges) notification := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges)
if err != nil {
return err
}
// Don't send the notification if it's empty // Don't send the notification if it's empty
if len(notification.Added) == 0 && len(notification.Removed) == 0 { if len(notification.Added) == 0 && len(notification.Removed) == 0 {
@ -218,7 +154,7 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
} }
// Enqueue the notification // Enqueue the notification
err = router.OutgoingRoute().MaybeEnqueue(notification) err := router.OutgoingRoute().Enqueue(notification)
if err != nil { if err != nil {
return err return err
} }
@ -237,7 +173,7 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentBlueScoreChanged(
for router, listener := range nm.listeners { for router, listener := range nm.listeners {
if listener.propagateVirtualSelectedParentBlueScoreChangedNotifications { if listener.propagateVirtualSelectedParentBlueScoreChangedNotifications {
err := router.OutgoingRoute().MaybeEnqueue(notification) err := router.OutgoingRoute().Enqueue(notification)
if err != nil { if err != nil {
return err return err
} }
@ -256,25 +192,6 @@ func (nm *NotificationManager) NotifyVirtualDaaScoreChanged(
for router, listener := range nm.listeners { for router, listener := range nm.listeners {
if listener.propagateVirtualDaaScoreChangedNotifications { if listener.propagateVirtualDaaScoreChangedNotifications {
err := router.OutgoingRoute().MaybeEnqueue(notification)
if err != nil {
return err
}
}
}
return nil
}
// NotifyNewBlockTemplate notifies the notification manager that a new
// block template is available for miners
func (nm *NotificationManager) NotifyNewBlockTemplate(
notification *appmessage.NewBlockTemplateNotificationMessage) error {
nm.RLock()
defer nm.RUnlock()
for router, listener := range nm.listeners {
if listener.propagateNewBlockTemplateNotifications {
err := router.OutgoingRoute().Enqueue(notification) err := router.OutgoingRoute().Enqueue(notification)
if err != nil { if err != nil {
return err return err
@ -301,27 +218,18 @@ func (nm *NotificationManager) NotifyPruningPointUTXOSetOverride() error {
return nil return nil
} }
func newNotificationListener(params *dagconfig.Params) *NotificationListener { func newNotificationListener() *NotificationListener {
return &NotificationListener{ return &NotificationListener{
params: params,
propagateBlockAddedNotifications: false, propagateBlockAddedNotifications: false,
propagateVirtualSelectedParentChainChangedNotifications: false, propagateVirtualSelectedParentChainChangedNotifications: false,
propagateFinalityConflictNotifications: false, propagateFinalityConflictNotifications: false,
propagateFinalityConflictResolvedNotifications: false, propagateFinalityConflictResolvedNotifications: false,
propagateUTXOsChangedNotifications: false, propagateUTXOsChangedNotifications: false,
propagateVirtualSelectedParentBlueScoreChangedNotifications: false, propagateVirtualSelectedParentBlueScoreChangedNotifications: false,
propagateNewBlockTemplateNotifications: false,
propagatePruningPointUTXOSetOverrideNotifications: false, propagatePruningPointUTXOSetOverrideNotifications: false,
} }
} }
// IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications returns true if this listener
// includes accepted transaction IDs in it's virtual-selected-parent-chain-changed notifications
func (nl *NotificationListener) IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications() bool {
return nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications
}
// PropagateBlockAddedNotifications instructs the listener to send block added notifications // PropagateBlockAddedNotifications instructs the listener to send block added notifications
// to the remote listener // to the remote listener
func (nl *NotificationListener) PropagateBlockAddedNotifications() { func (nl *NotificationListener) PropagateBlockAddedNotifications() {
@ -330,9 +238,8 @@ func (nl *NotificationListener) PropagateBlockAddedNotifications() {
// PropagateVirtualSelectedParentChainChangedNotifications instructs the listener to send chain changed notifications // PropagateVirtualSelectedParentChainChangedNotifications instructs the listener to send chain changed notifications
// to the remote listener // to the remote listener
func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications(includeAcceptedTransactionIDs bool) { func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications() {
nl.propagateVirtualSelectedParentChainChangedNotifications = true nl.propagateVirtualSelectedParentChainChangedNotifications = true
nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications = includeAcceptedTransactionIDs
} }
// PropagateFinalityConflictNotifications instructs the listener to send finality conflict notifications // PropagateFinalityConflictNotifications instructs the listener to send finality conflict notifications
@ -351,11 +258,7 @@ func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications()
// to the remote listener for the given addresses. Subsequent calls instruct the listener to // to the remote listener for the given addresses. Subsequent calls instruct the listener to
// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses // send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses
// are ignored. // are ignored.
func (nm *NotificationManager) PropagateUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) { func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
// Apply a write-lock since the internal listener address map is modified
nm.Lock()
defer nm.Unlock()
if !nl.propagateUTXOsChangedNotifications { if !nl.propagateUTXOsChangedNotifications {
nl.propagateUTXOsChangedNotifications = true nl.propagateUTXOsChangedNotifications = true
nl.propagateUTXOsChangedNotificationAddresses = nl.propagateUTXOsChangedNotificationAddresses =
@ -370,11 +273,7 @@ func (nm *NotificationManager) PropagateUTXOsChangedNotifications(nl *Notificati
// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs // StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs
// changed notifications to the remote listener for the given addresses. Addresses for which // changed notifications to the remote listener for the given addresses. Addresses for which
// notifications are not currently sent are ignored. // notifications are not currently sent are ignored.
func (nm *NotificationManager) StopPropagatingUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) { func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
// Apply a write-lock since the internal listener address map is modified
nm.Lock()
defer nm.Unlock()
if !nl.propagateUTXOsChangedNotifications { if !nl.propagateUTXOsChangedNotifications {
return return
} }
@ -385,7 +284,7 @@ func (nm *NotificationManager) StopPropagatingUTXOsChangedNotifications(nl *Noti
} }
func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification( func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
utxoChanges *utxoindex.UTXOChanges) (*appmessage.UTXOsChangedNotificationMessage, error) { utxoChanges *utxoindex.UTXOChanges) *appmessage.UTXOsChangedNotificationMessage {
// As an optimization, we iterate over the smaller set (O(n)) among the two below // As an optimization, we iterate over the smaller set (O(n)) among the two below
// and check existence over the larger set (O(1)) // and check existence over the larger set (O(1))
@ -400,64 +299,27 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
notification.Added = append(notification.Added, utxosByAddressesEntries...) notification.Added = append(notification.Added, utxosByAddressesEntries...)
} }
} }
for scriptPublicKeyString, removedPairs := range utxoChanges.Removed { for scriptPublicKeyString, removedOutpoints := range utxoChanges.Removed {
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok { if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs) utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
notification.Removed = append(notification.Removed, utxosByAddressesEntries...) notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
} }
} }
} else if addressesSize > 0 { } else {
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses { for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok { if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs) utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
notification.Added = append(notification.Added, utxosByAddressesEntries...) notification.Added = append(notification.Added, utxosByAddressesEntries...)
} }
if removedPairs, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok { if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs) utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
notification.Removed = append(notification.Removed, utxosByAddressesEntries...) notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
} }
} }
} else {
for scriptPublicKeyString, addedPairs := range utxoChanges.Added {
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
if err != nil {
return nil, err
} }
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, addedPairs) return notification
notification.Added = append(notification.Added, utxosByAddressesEntries...)
}
for scriptPublicKeyString, removedPAirs := range utxoChanges.Removed {
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
if err != nil {
return nil, err
}
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, removedPAirs)
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
}
}
return notification, nil
}
func (nl *NotificationListener) scriptPubKeyStringToAddressString(scriptPublicKeyString utxoindex.ScriptPublicKeyString) (string, error) {
scriptPubKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString))
// ignore error because it is often returned when the script is of unknown type
scriptType, address, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey, nl.params)
if err != nil {
return "", err
}
var addressString string
if scriptType == txscript.NonStandardTy {
addressString = ""
} else {
addressString = address.String()
}
return addressString, nil
} }
// PropagateVirtualSelectedParentBlueScoreChangedNotifications instructs the listener to send // PropagateVirtualSelectedParentBlueScoreChangedNotifications instructs the listener to send
@ -472,12 +334,6 @@ func (nl *NotificationListener) PropagateVirtualDaaScoreChangedNotifications() {
nl.propagateVirtualDaaScoreChangedNotifications = true nl.propagateVirtualDaaScoreChangedNotifications = true
} }
// PropagateNewBlockTemplateNotifications instructs the listener to send
// new block template notifications to the remote listener
func (nl *NotificationListener) PropagateNewBlockTemplateNotifications() {
nl.propagateNewBlockTemplateNotifications = true
}
// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications // PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications
// to the remote listener. // to the remote listener.
func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() { func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() {

View File

@ -32,6 +32,22 @@ func ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(address string, pair
return utxosByAddressesEntries return utxosByAddressesEntries
} }
// convertUTXOOutpointsToUTXOsByAddressesEntries converts
// UTXOOutpoints to a slice of UTXOsByAddressesEntry
func convertUTXOOutpointsToUTXOsByAddressesEntries(address string, outpoints utxoindex.UTXOOutpoints) []*appmessage.UTXOsByAddressesEntry {
utxosByAddressesEntries := make([]*appmessage.UTXOsByAddressesEntry, 0, len(outpoints))
for outpoint := range outpoints {
utxosByAddressesEntries = append(utxosByAddressesEntries, &appmessage.UTXOsByAddressesEntry{
Address: address,
Outpoint: &appmessage.RPCOutpoint{
TransactionID: outpoint.TransactionID.String(),
Index: outpoint.Index,
},
})
}
return utxosByAddressesEntries
}
// ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings // ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings
// to UTXOsChangedNotificationAddresses // to UTXOsChangedNotificationAddresses
func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses( func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
@ -47,7 +63,7 @@ func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
if err != nil { if err != nil {
return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err) return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
} }
scriptPublicKeyString := utxoindex.ScriptPublicKeyString(scriptPublicKey.String()) scriptPublicKeyString := utxoindex.ConvertScriptPublicKeyToString(scriptPublicKey)
addresses[i] = &UTXOsChangedNotificationAddress{ addresses[i] = &UTXOsChangedNotificationAddress{
Address: addressString, Address: addressString,
ScriptPublicKeyString: scriptPublicKeyString, ScriptPublicKeyString: scriptPublicKeyString,

View File

@ -81,6 +81,10 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
block.VerboseData.SelectedParentHash = blockInfo.SelectedParent.String() block.VerboseData.SelectedParentHash = blockInfo.SelectedParent.String()
} }
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return nil
}
// Get the block if we didn't receive it previously // Get the block if we didn't receive it previously
if domainBlock == nil { if domainBlock == nil {
domainBlock, err = ctx.Domain.Consensus().GetBlockEvenIfHeaderOnly(blockHash) domainBlock, err = ctx.Domain.Consensus().GetBlockEvenIfHeaderOnly(blockHash)
@ -89,10 +93,6 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
} }
} }
if len(domainBlock.Transactions) == 0 {
return nil
}
transactionIDs := make([]string, len(domainBlock.Transactions)) transactionIDs := make([]string, len(domainBlock.Transactions))
for i, transaction := range domainBlock.Transactions { for i, transaction := range domainBlock.Transactions {
transactionIDs[i] = consensushashing.TransactionID(transaction).String() transactionIDs[i] = consensushashing.TransactionID(transaction).String()
@ -122,7 +122,6 @@ func (ctx *Context) PopulateTransactionWithVerboseData(
} }
ctx.Domain.Consensus().PopulateMass(domainTransaction) ctx.Domain.Consensus().PopulateMass(domainTransaction)
transaction.VerboseData = &appmessage.RPCTransactionVerboseData{ transaction.VerboseData = &appmessage.RPCTransactionVerboseData{
TransactionID: consensushashing.TransactionID(domainTransaction).String(), TransactionID: consensushashing.TransactionID(domainTransaction).String(),
Hash: consensushashing.TransactionHash(domainTransaction).String(), Hash: consensushashing.TransactionHash(domainTransaction).String(),

View File

@ -9,14 +9,6 @@ import (
// HandleAddPeer handles the respectively named RPC command // HandleAddPeer handles the respectively named RPC command
func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("AddPeer RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewAddPeerResponseMessage()
response.Error =
appmessage.RPCErrorf("AddPeer RPC command called while node in safe RPC mode")
return response, nil
}
AddPeerRequest := request.(*appmessage.AddPeerRequestMessage) AddPeerRequest := request.(*appmessage.AddPeerRequestMessage)
address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort) address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort)
if err != nil { if err != nil {

View File

@ -9,14 +9,6 @@ import (
// HandleBan handles the respectively named RPC command // HandleBan handles the respectively named RPC command
func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("Ban RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewBanResponseMessage()
response.Error =
appmessage.RPCErrorf("Ban RPC command called while node in safe RPC mode")
return response, nil
}
banRequest := request.(*appmessage.BanRequestMessage) banRequest := request.(*appmessage.BanRequestMessage)
ip := net.ParseIP(banRequest.IP) ip := net.ParseIP(banRequest.IP)
if ip == nil { if ip == nil {

View File

@ -27,27 +27,6 @@ func HandleEstimateNetworkHashesPerSecond(
} }
} }
if context.Config.SafeRPC {
const windowSizeLimit = 10000
if windowSize > windowSizeLimit {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
response.Error =
appmessage.RPCErrorf(
"Requested window size %d is larger than max allowed in RPC safe mode (%d)",
windowSize, windowSizeLimit)
return response, nil
}
}
if uint64(windowSize) > context.Config.ActiveNetParams.PruningDepth() {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
response.Error =
appmessage.RPCErrorf(
"Requested window size %d is larger than pruning point depth %d",
windowSize, context.Config.ActiveNetParams.PruningDepth())
return response, nil
}
networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize) networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize)
if err != nil { if err != nil {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{} response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}

View File

@ -22,7 +22,7 @@ func HandleGetBalanceByAddress(context *rpccontext.Context, _ *router.Router, re
balance, err := getBalanceByAddress(context, getBalanceByAddressRequest.Address) balance, err := getBalanceByAddress(context, getBalanceByAddressRequest.Address)
if err != nil { if err != nil {
rpcError := &appmessage.RPCError{} rpcError := &appmessage.RPCError{}
if !errors.As(err, &rpcError) { if !errors.As(err, rpcError) {
return nil, err return nil, err
} }
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{} errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}

View File

@ -23,7 +23,7 @@ func HandleGetBalancesByAddresses(context *rpccontext.Context, _ *router.Router,
if err != nil { if err != nil {
rpcError := &appmessage.RPCError{} rpcError := &appmessage.RPCError{}
if !errors.As(err, &rpcError) { if !errors.As(err, rpcError) {
return nil, err return nil, err
} }
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{} errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}

View File

@ -4,7 +4,6 @@ import (
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext" "github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript" "github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
@ -17,7 +16,7 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
payAddress, err := util.DecodeAddress(getBlockTemplateRequest.PayAddress, context.Config.ActiveNetParams.Prefix) payAddress, err := util.DecodeAddress(getBlockTemplateRequest.PayAddress, context.Config.ActiveNetParams.Prefix)
if err != nil { if err != nil {
errorMessage := &appmessage.GetBlockTemplateResponseMessage{} errorMessage := &appmessage.GetBlockResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Could not decode address: %s", err) errorMessage.Error = appmessage.RPCErrorf("Could not decode address: %s", err)
return errorMessage, nil return errorMessage, nil
} }
@ -27,20 +26,18 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
return nil, err return nil, err
} }
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version() + "/" + getBlockTemplateRequest.ExtraData)} coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version())}
templateBlock, isNearlySynced, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData) templateBlock, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
if err != nil {
return nil, err
}
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
isSynced, err := context.ProtocolManager.ShouldMine()
if err != nil { if err != nil {
return nil, err return nil, err
} }
if uint64(len(templateBlock.Transactions[transactionhelper.CoinbaseTransactionIndex].Payload)) > context.Config.NetParams().MaxCoinbasePayloadLength { return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, isSynced), nil
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Coinbase payload is above max length (%d). Try to shorten the extra data.", context.Config.NetParams().MaxCoinbasePayloadLength)
return errorMessage, nil
}
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, context.ProtocolManager.Context().HasPeers() && isNearlySynced), nil
} }

View File

@ -37,7 +37,7 @@ func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appm
return nil, err return nil, err
} }
if !blockInfo.HasHeader() { if !blockInfo.Exists {
return &appmessage.GetBlocksResponseMessage{ return &appmessage.GetBlocksResponseMessage{
Error: appmessage.RPCErrorf("Could not find lowHash %s", getBlocksRequest.LowHash), Error: appmessage.RPCErrorf("Could not find lowHash %s", getBlocksRequest.LowHash),
}, nil }, nil

View File

@ -23,10 +23,6 @@ type fakeDomain struct {
testapi.TestConsensus testapi.TestConsensus
} }
func (d fakeDomain) ConsensusEventsChannel() chan externalapi.ConsensusEvent {
panic("implement me")
}
func (d fakeDomain) DeleteStagingConsensus() error { func (d fakeDomain) DeleteStagingConsensus() error {
panic("implement me") panic("implement me")
} }
@ -35,7 +31,7 @@ func (d fakeDomain) StagingConsensus() externalapi.Consensus {
panic("implement me") panic("implement me")
} }
func (d fakeDomain) InitStagingConsensusWithoutGenesis() error { func (d fakeDomain) InitStagingConsensus() error {
panic("implement me") panic("implement me")
} }

View File

@ -1,29 +0,0 @@
package rpchandlers
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleGetCoinSupply handles the respectively named RPC command
func HandleGetCoinSupply(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
if !context.Config.UTXOIndex {
errorMessage := &appmessage.GetCoinSupplyResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
return errorMessage, nil
}
circulatingSompiSupply, err := context.UTXOIndex.GetCirculatingSompiSupply()
if err != nil {
return nil, err
}
response := appmessage.NewGetCoinSupplyResponseMessage(
constants.MaxSompi,
circulatingSompiSupply,
)
return response, nil
}

View File

@ -9,17 +9,10 @@ import (
// HandleGetInfo handles the respectively named RPC command // HandleGetInfo handles the respectively named RPC command
func HandleGetInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { func HandleGetInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
isNearlySynced, err := context.Domain.Consensus().IsNearlySynced()
if err != nil {
return nil, err
}
response := appmessage.NewGetInfoResponseMessage( response := appmessage.NewGetInfoResponseMessage(
context.NetAdapter.ID().String(), context.NetAdapter.ID().String(),
uint64(context.Domain.MiningManager().TransactionCount(true, false)), uint64(context.Domain.MiningManager().TransactionCount()),
version.Version(), version.Version(),
context.Config.UTXOIndex,
context.ProtocolManager.Context().HasPeers() && isNearlySynced,
) )
return response, nil return response, nil

View File

@ -7,15 +7,10 @@ import (
) )
// HandleGetMempoolEntries handles the respectively named RPC command // HandleGetMempoolEntries handles the respectively named RPC command
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
getMempoolEntriesRequest := request.(*appmessage.GetMempoolEntriesRequestMessage) transactions := context.Domain.MiningManager().AllTransactions()
entries := make([]*appmessage.MempoolEntry, 0, len(transactions))
entries := make([]*appmessage.MempoolEntry, 0) for _, transaction := range transactions {
transactionPoolTransactions, orphanPoolTransactions := context.Domain.MiningManager().AllTransactions(!getMempoolEntriesRequest.FilterTransactionPool, getMempoolEntriesRequest.IncludeOrphanPool)
if !getMempoolEntriesRequest.FilterTransactionPool {
for _, transaction := range transactionPoolTransactions {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction) rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil) err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil { if err != nil {
@ -24,24 +19,8 @@ func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, requ
entries = append(entries, &appmessage.MempoolEntry{ entries = append(entries, &appmessage.MempoolEntry{
Fee: transaction.Fee, Fee: transaction.Fee,
Transaction: rpcTransaction, Transaction: rpcTransaction,
IsOrphan: false,
}) })
} }
}
if getMempoolEntriesRequest.IncludeOrphanPool {
for _, transaction := range orphanPoolTransactions {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
entries = append(entries, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: true,
})
}
}
return appmessage.NewGetMempoolEntriesResponseMessage(entries), nil return appmessage.NewGetMempoolEntriesResponseMessage(entries), nil
} }

View File

@ -1,122 +0,0 @@
package rpchandlers
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
)
// HandleGetMempoolEntriesByAddresses handles the respectively named RPC command
func HandleGetMempoolEntriesByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
getMempoolEntriesByAddressesRequest := request.(*appmessage.GetMempoolEntriesByAddressesRequestMessage)
mempoolEntriesByAddresses := make([]*appmessage.MempoolEntryByAddress, 0)
sendingInTransactionPool, receivingInTransactionPool, sendingInOrphanPool, receivingInOrphanPool, err := context.Domain.MiningManager().GetTransactionsByAddresses(!getMempoolEntriesByAddressesRequest.FilterTransactionPool, getMempoolEntriesByAddressesRequest.IncludeOrphanPool)
if err != nil {
return nil, err
}
for _, addressString := range getMempoolEntriesByAddressesRequest.Addresses {
address, err := util.DecodeAddress(addressString, context.Config.NetParams().Prefix)
if err != nil {
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Could not decode address '%s': %s", addressString, err)
return errorMessage, nil
}
sending := make([]*appmessage.MempoolEntry, 0)
receiving := make([]*appmessage.MempoolEntry, 0)
scriptPublicKey, err := txscript.PayToAddrScript(address)
if err != nil {
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Could not extract scriptPublicKey from address '%s': %s", addressString, err)
return errorMessage, nil
}
if !getMempoolEntriesByAddressesRequest.FilterTransactionPool {
if transaction, found := sendingInTransactionPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
sending = append(sending, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: false,
},
)
}
if transaction, found := receivingInTransactionPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
receiving = append(receiving, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: false,
},
)
}
}
if getMempoolEntriesByAddressesRequest.IncludeOrphanPool {
if transaction, found := sendingInOrphanPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
sending = append(sending, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: true,
},
)
}
if transaction, found := receivingInOrphanPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
receiving = append(receiving, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: true,
},
)
}
}
if len(sending) > 0 || len(receiving) > 0 {
mempoolEntriesByAddresses = append(
mempoolEntriesByAddresses,
&appmessage.MempoolEntryByAddress{
Address: address.String(),
Sending: sending,
Receiving: receiving,
},
)
}
}
return appmessage.NewGetMempoolEntriesByAddressesResponseMessage(mempoolEntriesByAddresses), nil
}

View File

@ -3,18 +3,12 @@ package rpchandlers
import ( import (
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext" "github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid" "github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
) )
// HandleGetMempoolEntry handles the respectively named RPC command // HandleGetMempoolEntry handles the respectively named RPC command
func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
transaction := &externalapi.DomainTransaction{}
var found bool
var isOrphan bool
getMempoolEntryRequest := request.(*appmessage.GetMempoolEntryRequestMessage) getMempoolEntryRequest := request.(*appmessage.GetMempoolEntryRequestMessage)
transactionID, err := transactionid.FromString(getMempoolEntryRequest.TxID) transactionID, err := transactionid.FromString(getMempoolEntryRequest.TxID)
@ -24,18 +18,17 @@ func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, reques
return errorMessage, nil return errorMessage, nil
} }
mempoolTransaction, isOrphan, found := context.Domain.MiningManager().GetTransaction(transactionID, !getMempoolEntryRequest.FilterTransactionPool, getMempoolEntryRequest.IncludeOrphanPool) transaction, ok := context.Domain.MiningManager().GetTransaction(transactionID)
if !ok {
if !found {
errorMessage := &appmessage.GetMempoolEntryResponseMessage{} errorMessage := &appmessage.GetMempoolEntryResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Transaction %s was not found", transactionID) errorMessage.Error = appmessage.RPCErrorf("Transaction %s was not found", transactionID)
return errorMessage, nil return errorMessage, nil
} }
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(mempoolTransaction)
err = context.PopulateTransactionWithVerboseData(rpcTransaction, nil) err = context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction, isOrphan), nil
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction), nil
} }

View File

@ -26,14 +26,12 @@ func HandleGetVirtualSelectedParentChainFromBlock(context *rpccontext.Context, _
return response, nil return response, nil
} }
chainChangedNotification, err := context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage( chainChangedNotification, err := context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(virtualSelectedParentChain)
virtualSelectedParentChain, getVirtualSelectedParentChainFromBlockRequest.IncludeAcceptedTransactionIDs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
response := appmessage.NewGetVirtualSelectedParentChainFromBlockResponseMessage( response := appmessage.NewGetVirtualSelectedParentChainFromBlockResponseMessage(
chainChangedNotification.RemovedChainBlockHashes, chainChangedNotification.AddedChainBlockHashes, chainChangedNotification.RemovedChainBlockHashes, chainChangedNotification.AddedChainBlockHashes)
chainChangedNotification.AcceptedTransactionIDs)
return response, nil return response, nil
} }

View File

@ -1,19 +0,0 @@
package rpchandlers
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleNotifyNewBlockTemplate handles the respectively named RPC command
func HandleNotifyNewBlockTemplate(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
listener, err := context.NotificationManager.Listener(router)
if err != nil {
return nil, err
}
listener.PropagateNewBlockTemplateNotifications()
response := appmessage.NewNotifyNewBlockTemplateResponseMessage()
return response, nil
}

View File

@ -26,7 +26,7 @@ func HandleNotifyUTXOsChanged(context *rpccontext.Context, router *router.Router
if err != nil { if err != nil {
return nil, err return nil, err
} }
context.NotificationManager.PropagateUTXOsChangedNotifications(listener, addresses) listener.PropagateUTXOsChangedNotifications(addresses)
response := appmessage.NewNotifyUTXOsChangedResponseMessage() response := appmessage.NewNotifyUTXOsChangedResponseMessage()
return response, nil return response, nil

View File

@ -7,17 +7,12 @@ import (
) )
// HandleNotifyVirtualSelectedParentChainChanged handles the respectively named RPC command // HandleNotifyVirtualSelectedParentChainChanged handles the respectively named RPC command
func HandleNotifyVirtualSelectedParentChainChanged(context *rpccontext.Context, router *router.Router, func HandleNotifyVirtualSelectedParentChainChanged(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
request appmessage.Message) (appmessage.Message, error) {
notifyVirtualSelectedParentChainChangedRequest := request.(*appmessage.NotifyVirtualSelectedParentChainChangedRequestMessage)
listener, err := context.NotificationManager.Listener(router) listener, err := context.NotificationManager.Listener(router)
if err != nil { if err != nil {
return nil, err return nil, err
} }
listener.PropagateVirtualSelectedParentChainChangedNotifications( listener.PropagateVirtualSelectedParentChainChangedNotifications()
notifyVirtualSelectedParentChainChangedRequest.IncludeAcceptedTransactionIDs)
response := appmessage.NewNotifyVirtualSelectedParentChainChangedResponseMessage() response := appmessage.NewNotifyVirtualSelectedParentChainChangedResponseMessage()
return response, nil return response, nil

View File

@ -8,14 +8,6 @@ import (
// HandleResolveFinalityConflict handles the respectively named RPC command // HandleResolveFinalityConflict handles the respectively named RPC command
func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("ResolveFinalityConflict RPC command called while node in safe RPC mode -- ignoring.")
response := &appmessage.ResolveFinalityConflictResponseMessage{}
response.Error =
appmessage.RPCErrorf("ResolveFinalityConflict RPC command called while node in safe RPC mode")
return response, nil
}
response := &appmessage.ResolveFinalityConflictResponseMessage{} response := &appmessage.ResolveFinalityConflictResponseMessage{}
response.Error = appmessage.RPCErrorf("not implemented") response.Error = appmessage.RPCErrorf("not implemented")
return response, nil return response, nil

View File

@ -12,14 +12,6 @@ const pauseBeforeShutDown = time.Second
// HandleShutDown handles the respectively named RPC command // HandleShutDown handles the respectively named RPC command
func HandleShutDown(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { func HandleShutDown(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("ShutDown RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewShutDownResponseMessage()
response.Error =
appmessage.RPCErrorf("ShutDown RPC command called while node in safe RPC mode")
return response, nil
}
log.Warn("ShutDown RPC called.") log.Warn("ShutDown RPC called.")
// Wait a second before shutting down, to allow time to return the response to the caller // Wait a second before shutting down, to allow time to return the response to the caller

Some files were not shown because too many files have changed in this diff Show More