Compare commits

..

No commits in common. "master" and "v0.11.0-rc3" have entirely different histories.

577 changed files with 12493 additions and 34352 deletions

View File

@ -9,7 +9,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-latest, windows-latest, macos-latest] os: [ ubuntu-latest, windows-latest, macos-latest ]
name: Building, ${{ matrix.os }} name: Building, ${{ matrix.os }}
steps: steps:
- name: Fix CRLF on Windows - name: Fix CRLF on Windows
@ -17,12 +17,18 @@ jobs:
run: git config --global core.autocrlf false run: git config --global core.autocrlf false
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v2
# Increase the pagefile size on Windows to aviod running out of memory
- name: Increase pagefile size on Windows
if: runner.os == 'Windows'
run: powershell -command .github\workflows\SetPageFileSize.ps1
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: 1.21 go-version: 1.16
- name: Build on Linux - name: Build on Linux
if: runner.os == 'Linux' if: runner.os == 'Linux'
@ -30,7 +36,7 @@ jobs:
# `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net" # `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
# `-s -w` strips the binary to produce smaller size binaries # `-s -w` strips the binary to produce smaller size binaries
run: | run: |
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./cmd/... go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip" archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip" asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
zip -r "${archive}" ./bin/* zip -r "${archive}" ./bin/*
@ -41,7 +47,7 @@ jobs:
if: runner.os == 'Windows' if: runner.os == 'Windows'
shell: bash shell: bash
run: | run: |
go build -v -ldflags="-s -w" -o bin/ ./cmd/... go build -v -ldflags="-s -w" -o bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip" archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip" asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
powershell "Compress-Archive bin/* \"${archive}\"" powershell "Compress-Archive bin/* \"${archive}\""
@ -51,13 +57,14 @@ jobs:
- name: Build on MacOS - name: Build on MacOS
if: runner.os == 'macOS' if: runner.os == 'macOS'
run: | run: |
go build -v -ldflags="-s -w" -o ./bin/ ./cmd/... go build -v -ldflags="-s -w" -o ./bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip" archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip" asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
zip -r "${archive}" ./bin/* zip -r "${archive}" ./bin/*
echo "archive=${archive}" >> $GITHUB_ENV echo "archive=${archive}" >> $GITHUB_ENV
echo "asset_name=${asset_name}" >> $GITHUB_ENV echo "asset_name=${asset_name}" >> $GITHUB_ENV
- name: Upload release asset - name: Upload release asset
uses: actions/upload-release-asset@v1 uses: actions/upload-release-asset@v1
env: env:

View File

@ -11,18 +11,18 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
branch: [master, latest] branch: [ master, latest ]
name: Race detection on ${{ matrix.branch }} name: Race detection on ${{ matrix.branch }}
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: 1.23 go-version: 1.16
- name: Set scheduled branch name - name: Set scheduled branch name
shell: bash shell: bash
@ -46,4 +46,4 @@ jobs:
run: | run: |
git checkout "${{ env.run_on }}" git checkout "${{ env.run_on }}"
git status git status
go test -timeout 20m -race ./... go test -race ./...

View File

@ -8,20 +8,22 @@ on:
types: [opened, synchronize, edited] types: [opened, synchronize, edited]
jobs: jobs:
build: build:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-latest, macos-latest] os: [ ubuntu-latest, macos-latest, windows-latest ]
name: Tests, ${{ matrix.os }} name: Tests, ${{ matrix.os }}
steps: steps:
- name: Fix CRLF on Windows - name: Fix CRLF on Windows
if: runner.os == 'Windows' if: runner.os == 'Windows'
run: git config --global core.autocrlf false run: git config --global core.autocrlf false
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v2
# Increase the pagefile size on Windows to aviod running out of memory # Increase the pagefile size on Windows to aviod running out of memory
- name: Increase pagefile size on Windows - name: Increase pagefile size on Windows
@ -29,13 +31,14 @@ jobs:
run: powershell -command .github\workflows\SetPageFileSize.ps1 run: powershell -command .github\workflows\SetPageFileSize.ps1
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: 1.23 go-version: 1.16
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules # Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
- name: Go Cache - name: Go Cache
uses: actions/cache@v4 uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@ -46,17 +49,19 @@ jobs:
shell: bash shell: bash
run: ./build_and_test.sh -v run: ./build_and_test.sh -v
stability-test-fast: stability-test-fast:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Fast stability tests, ${{ github.head_ref }} name: Fast stability tests, ${{ github.head_ref }}
steps: steps:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: 1.23 go-version: 1.16
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
@ -70,17 +75,18 @@ jobs:
working-directory: stability-tests working-directory: stability-tests
run: ./install_and_test.sh run: ./install_and_test.sh
coverage: coverage:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Produce code coverage name: Produce code coverage
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: 1.23 go-version: 1.16
- name: Delete the stability tests from coverage - name: Delete the stability tests from coverage
run: rm -r stability-tests run: rm -r stability-tests

1
.gitignore vendored
View File

@ -53,7 +53,6 @@ _testmain.go
debug debug
debug.test debug.test
__debug_bin __debug_bin
*__debug_*
# CI # CI
version.txt version.txt

View File

@ -1,43 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainers on this [Google form][gform]. The project maintainers will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project maintainers are obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[gform]: https://forms.gle/dnKXMJL7VxdUjt3x5
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

View File

@ -12,7 +12,8 @@ If you want to make a big change it's better to discuss it first by opening an i
## Pull Request process ## Pull Request process
Any pull request should be opened against the development branch `dev`. Any pull request should be opened against the development branch of the target version. The development branch format is
as follows: `vx.y.z-dev`, for example: `v0.8.5-dev`.
All pull requests should pass the checks written in `build_and_test.sh`, so it's recommended to run this script before All pull requests should pass the checks written in `build_and_test.sh`, so it's recommended to run this script before
submitting your PR. submitting your PR.

View File

@ -1,15 +1,16 @@
# DEPRECATED
The full node reference implementation was [rewritten in Rust](https://github.com/kaspanet/rusty-kaspa), as a result, the Go implementation is now deprecated. Kaspad
====
PLEASE NOTE: Any pull requests or issues that will be opened in this repository will be closed without treatment, except for issues or pull requests related to the kaspawallet, which remains maintained. In any other case, please use the [Rust implementation](https://github.com/kaspanet/rusty-kaspa) instead. Warning: This is pre-alpha software. There's no guarantee anything works.
====
# Kaspad
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) [![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad)
Kaspad was the reference full node Kaspa implementation written in Go (golang). Kaspad is the reference full node Kaspa implementation written in Go (golang).
This project is currently under active development and is in a pre-Alpha state.
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
## What is kaspa ## What is kaspa
@ -17,7 +18,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
## Requirements ## Requirements
Go 1.23 or later. Go 1.16 or later.
## Installation ## Installation
@ -44,6 +45,7 @@ $ go install . ./cmd/...
not already add the bin directory to your system path during Go installation, not already add the bin directory to your system path during Go installation,
you are encouraged to do so now. you are encouraged to do so now.
## Getting Started ## Getting Started
Kaspad has several configuration options available to tweak how it runs, but all Kaspad has several configuration options available to tweak how it runs, but all
@ -54,7 +56,6 @@ $ kaspad
``` ```
## Discord ## Discord
Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2 Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
## Issue Tracker ## Issue Tracker

View File

@ -20,10 +20,7 @@ import (
"github.com/kaspanet/kaspad/version" "github.com/kaspanet/kaspad/version"
) )
const ( const leveldbCacheSizeMiB = 256
leveldbCacheSizeMiB = 256
defaultDataDirname = "datadir2"
)
var desiredLimits = &limits.DesiredLimits{ var desiredLimits = &limits.DesiredLimits{
FileLimitWant: 2048, FileLimitWant: 2048,
@ -87,7 +84,6 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
if app.cfg.Profile != "" { if app.cfg.Profile != "" {
profiling.Start(app.cfg.Profile, log) profiling.Start(app.cfg.Profile, log)
} }
profiling.TrackHeap(app.cfg.AppDir, log)
// Return now if an interrupt signal was triggered. // Return now if an interrupt signal was triggered.
if signal.InterruptRequested(interrupt) { if signal.InterruptRequested(interrupt) {
@ -163,7 +159,7 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
// dbPath returns the path to the block database given a database type. // dbPath returns the path to the block database given a database type.
func databasePath(cfg *config.Config) string { func databasePath(cfg *config.Config) string {
return filepath.Join(cfg.AppDir, defaultDataDirname) return filepath.Join(cfg.AppDir, "data")
} }
func removeDatabase(cfg *config.Config) error { func removeDatabase(cfg *config.Config) error {

View File

@ -6,7 +6,7 @@ supported kaspa messages to and from the appmessage. This package does not deal
with the specifics of message handling such as what to do when a message is with the specifics of message handling such as what to do when a message is
received. This provides the caller with a high level of flexibility. received. This provides the caller with a high level of flexibility.
# Kaspa Message Overview Kaspa Message Overview
The kaspa protocol consists of exchanging messages between peers. Each The kaspa protocol consists of exchanging messages between peers. Each
message is preceded by a header which identifies information about it such as message is preceded by a header which identifies information about it such as
@ -22,7 +22,7 @@ messages, all of the details of marshalling and unmarshalling to and from the
appmessage using kaspa encoding are handled so the caller doesn't have to concern appmessage using kaspa encoding are handled so the caller doesn't have to concern
themselves with the specifics. themselves with the specifics.
# Message Interaction Message Interaction
The following provides a quick summary of how the kaspa messages are intended The following provides a quick summary of how the kaspa messages are intended
to interact with one another. As stated above, these interactions are not to interact with one another. As stated above, these interactions are not
@ -45,13 +45,13 @@ interactions in no particular order.
notfound message (MsgNotFound) notfound message (MsgNotFound)
ping message (MsgPing) pong message (MsgPong) ping message (MsgPing) pong message (MsgPong)
# Common Parameters Common Parameters
There are several common parameters that arise when using this package to read There are several common parameters that arise when using this package to read
and write kaspa messages. The following sections provide a quick overview of and write kaspa messages. The following sections provide a quick overview of
these parameters so the next sections can build on them. these parameters so the next sections can build on them.
# Protocol Version Protocol Version
The protocol version should be negotiated with the remote peer at a higher The protocol version should be negotiated with the remote peer at a higher
level than this package via the version (MsgVersion) message exchange, however, level than this package via the version (MsgVersion) message exchange, however,
@ -60,18 +60,18 @@ latest protocol version this package supports and is typically the value to use
for all outbound connections before a potentially lower protocol version is for all outbound connections before a potentially lower protocol version is
negotiated. negotiated.
# Kaspa Network Kaspa Network
The kaspa network is a magic number which is used to identify the start of a The kaspa network is a magic number which is used to identify the start of a
message and which kaspa network the message applies to. This package provides message and which kaspa network the message applies to. This package provides
the following constants: the following constants:
appmessage.Mainnet appmessage.Mainnet
appmessage.Testnet (Test network) appmessage.Testnet (Test network)
appmessage.Simnet (Simulation test network) appmessage.Simnet (Simulation test network)
appmessage.Devnet (Development network) appmessage.Devnet (Development network)
# Determining Message Type Determining Message Type
As discussed in the kaspa message overview section, this package reads As discussed in the kaspa message overview section, this package reads
and writes kaspa messages using a generic interface named Message. In and writes kaspa messages using a generic interface named Message. In
@ -89,7 +89,7 @@ switch or type assertion. An example of a type switch follows:
fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount) fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount)
} }
# Reading Messages Reading Messages
In order to unmarshall kaspa messages from the appmessage, use the ReadMessage In order to unmarshall kaspa messages from the appmessage, use the ReadMessage
function. It accepts any io.Reader, but typically this will be a net.Conn to function. It accepts any io.Reader, but typically this will be a net.Conn to
@ -104,7 +104,7 @@ a remote node running a kaspa peer. Example syntax is:
// Log and handle the error // Log and handle the error
} }
# Writing Messages Writing Messages
In order to marshall kaspa messages to the appmessage, use the WriteMessage In order to marshall kaspa messages to the appmessage, use the WriteMessage
function. It accepts any io.Writer, but typically this will be a net.Conn to function. It accepts any io.Writer, but typically this will be a net.Conn to
@ -122,7 +122,7 @@ from a remote peer is:
// Log and handle the error // Log and handle the error
} }
# Errors Errors
Errors returned by this package are either the raw errors provided by underlying Errors returned by this package are either the raw errors provided by underlying
calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and

View File

@ -2,9 +2,6 @@ package appmessage
import ( import (
"encoding/hex" "encoding/hex"
"math/big"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader" "github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes" "github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
@ -32,17 +29,13 @@ func DomainBlockToMsgBlock(domainBlock *externalapi.DomainBlock) *MsgBlock {
func DomainBlockHeaderToBlockHeader(domainBlockHeader externalapi.BlockHeader) *MsgBlockHeader { func DomainBlockHeaderToBlockHeader(domainBlockHeader externalapi.BlockHeader) *MsgBlockHeader {
return &MsgBlockHeader{ return &MsgBlockHeader{
Version: domainBlockHeader.Version(), Version: domainBlockHeader.Version(),
Parents: domainBlockHeader.Parents(), ParentHashes: domainBlockHeader.ParentHashes(),
HashMerkleRoot: domainBlockHeader.HashMerkleRoot(), HashMerkleRoot: domainBlockHeader.HashMerkleRoot(),
AcceptedIDMerkleRoot: domainBlockHeader.AcceptedIDMerkleRoot(), AcceptedIDMerkleRoot: domainBlockHeader.AcceptedIDMerkleRoot(),
UTXOCommitment: domainBlockHeader.UTXOCommitment(), UTXOCommitment: domainBlockHeader.UTXOCommitment(),
Timestamp: mstime.UnixMilliseconds(domainBlockHeader.TimeInMilliseconds()), Timestamp: mstime.UnixMilliseconds(domainBlockHeader.TimeInMilliseconds()),
Bits: domainBlockHeader.Bits(), Bits: domainBlockHeader.Bits(),
Nonce: domainBlockHeader.Nonce(), Nonce: domainBlockHeader.Nonce(),
BlueScore: domainBlockHeader.BlueScore(),
DAAScore: domainBlockHeader.DAAScore(),
BlueWork: domainBlockHeader.BlueWork(),
PruningPoint: domainBlockHeader.PruningPoint(),
} }
} }
@ -63,17 +56,13 @@ func MsgBlockToDomainBlock(msgBlock *MsgBlock) *externalapi.DomainBlock {
func BlockHeaderToDomainBlockHeader(blockHeader *MsgBlockHeader) externalapi.BlockHeader { func BlockHeaderToDomainBlockHeader(blockHeader *MsgBlockHeader) externalapi.BlockHeader {
return blockheader.NewImmutableBlockHeader( return blockheader.NewImmutableBlockHeader(
blockHeader.Version, blockHeader.Version,
blockHeader.Parents, blockHeader.ParentHashes,
blockHeader.HashMerkleRoot, blockHeader.HashMerkleRoot,
blockHeader.AcceptedIDMerkleRoot, blockHeader.AcceptedIDMerkleRoot,
blockHeader.UTXOCommitment, blockHeader.UTXOCommitment,
blockHeader.Timestamp.UnixMilliseconds(), blockHeader.Timestamp.UnixMilliseconds(),
blockHeader.Bits, blockHeader.Bits,
blockHeader.Nonce, blockHeader.Nonce,
blockHeader.DAAScore,
blockHeader.BlueScore,
blockHeader.BlueWork,
blockHeader.PruningPoint,
) )
} }
@ -214,14 +203,13 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
} }
return &externalapi.DomainTransaction{ return &externalapi.DomainTransaction{
Version: rpcTransaction.Version, Version: rpcTransaction.Version,
Inputs: inputs, Inputs: inputs,
Outputs: outputs, Outputs: outputs,
LockTime: rpcTransaction.LockTime, LockTime: rpcTransaction.LockTime,
SubnetworkID: *subnetworkID, SubnetworkID: *subnetworkID,
Gas: rpcTransaction.Gas, Gas: rpcTransaction.LockTime,
MassCommitment: rpcTransaction.Mass, Payload: payload,
Payload: payload,
}, nil }, nil
} }
@ -288,8 +276,7 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
Outputs: outputs, Outputs: outputs,
LockTime: transaction.LockTime, LockTime: transaction.LockTime,
SubnetworkID: subnetworkID, SubnetworkID: subnetworkID,
Gas: transaction.Gas, Gas: transaction.LockTime,
Mass: transaction.MassCommitment,
Payload: payload, Payload: payload,
} }
} }
@ -347,25 +334,15 @@ func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
// DomainBlockToRPCBlock converts DomainBlocks to RPCBlocks // DomainBlockToRPCBlock converts DomainBlocks to RPCBlocks
func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock { func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock {
parents := make([]*RPCBlockLevelParents, len(block.Header.Parents()))
for i, blockLevelParents := range block.Header.Parents() {
parents[i] = &RPCBlockLevelParents{
ParentHashes: hashes.ToStrings(blockLevelParents),
}
}
header := &RPCBlockHeader{ header := &RPCBlockHeader{
Version: uint32(block.Header.Version()), Version: uint32(block.Header.Version()),
Parents: parents, ParentHashes: hashes.ToStrings(block.Header.ParentHashes()),
HashMerkleRoot: block.Header.HashMerkleRoot().String(), HashMerkleRoot: block.Header.HashMerkleRoot().String(),
AcceptedIDMerkleRoot: block.Header.AcceptedIDMerkleRoot().String(), AcceptedIDMerkleRoot: block.Header.AcceptedIDMerkleRoot().String(),
UTXOCommitment: block.Header.UTXOCommitment().String(), UTXOCommitment: block.Header.UTXOCommitment().String(),
Timestamp: block.Header.TimeInMilliseconds(), Timestamp: block.Header.TimeInMilliseconds(),
Bits: block.Header.Bits(), Bits: block.Header.Bits(),
Nonce: block.Header.Nonce(), Nonce: block.Header.Nonce(),
DAAScore: block.Header.DAAScore(),
BlueScore: block.Header.BlueScore(),
BlueWork: block.Header.BlueWork().Text(16),
PruningPoint: block.Header.PruningPoint().String(),
} }
transactions := make([]*RPCTransaction, len(block.Transactions)) transactions := make([]*RPCTransaction, len(block.Transactions))
for i, transaction := range block.Transactions { for i, transaction := range block.Transactions {
@ -379,16 +356,13 @@ func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock {
// RPCBlockToDomainBlock converts `block` into a DomainBlock // RPCBlockToDomainBlock converts `block` into a DomainBlock
func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) { func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
parents := make([]externalapi.BlockLevelParents, len(block.Header.Parents)) parentHashes := make([]*externalapi.DomainHash, len(block.Header.ParentHashes))
for i, blockLevelParents := range block.Header.Parents { for i, parentHash := range block.Header.ParentHashes {
parents[i] = make(externalapi.BlockLevelParents, len(blockLevelParents.ParentHashes)) domainParentHashes, err := externalapi.NewDomainHashFromString(parentHash)
for j, parentHash := range blockLevelParents.ParentHashes { if err != nil {
var err error return nil, err
parents[i][j], err = externalapi.NewDomainHashFromString(parentHash)
if err != nil {
return nil, err
}
} }
parentHashes[i] = domainParentHashes
} }
hashMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.HashMerkleRoot) hashMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.HashMerkleRoot)
if err != nil { if err != nil {
@ -402,27 +376,15 @@ func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
blueWork, success := new(big.Int).SetString(block.Header.BlueWork, 16)
if !success {
return nil, errors.Errorf("failed to parse blue work: %s", block.Header.BlueWork)
}
pruningPoint, err := externalapi.NewDomainHashFromString(block.Header.PruningPoint)
if err != nil {
return nil, err
}
header := blockheader.NewImmutableBlockHeader( header := blockheader.NewImmutableBlockHeader(
uint16(block.Header.Version), uint16(block.Header.Version),
parents, parentHashes,
hashMerkleRoot, hashMerkleRoot,
acceptedIDMerkleRoot, acceptedIDMerkleRoot,
utxoCommitment, utxoCommitment,
block.Header.Timestamp, block.Header.Timestamp,
block.Header.Bits, block.Header.Bits,
block.Header.Nonce, block.Header.Nonce)
block.Header.DAAScore,
block.Header.BlueScore,
blueWork,
pruningPoint)
transactions := make([]*externalapi.DomainTransaction, len(block.Transactions)) transactions := make([]*externalapi.DomainTransaction, len(block.Transactions))
for i, transaction := range block.Transactions { for i, transaction := range block.Transactions {
domainTransaction, err := RPCTransactionToDomainTransaction(transaction) domainTransaction, err := RPCTransactionToDomainTransaction(transaction)
@ -439,10 +401,10 @@ func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
// BlockWithTrustedDataToDomainBlockWithTrustedData converts *MsgBlockWithTrustedData to *externalapi.BlockWithTrustedData // BlockWithTrustedDataToDomainBlockWithTrustedData converts *MsgBlockWithTrustedData to *externalapi.BlockWithTrustedData
func BlockWithTrustedDataToDomainBlockWithTrustedData(block *MsgBlockWithTrustedData) *externalapi.BlockWithTrustedData { func BlockWithTrustedDataToDomainBlockWithTrustedData(block *MsgBlockWithTrustedData) *externalapi.BlockWithTrustedData {
daaWindow := make([]*externalapi.TrustedDataDataDAAHeader, len(block.DAAWindow)) daaWindow := make([]*externalapi.TrustedDataDataDAABlock, len(block.DAAWindow))
for i, daaBlock := range block.DAAWindow { for i, daaBlock := range block.DAAWindow {
daaWindow[i] = &externalapi.TrustedDataDataDAAHeader{ daaWindow[i] = &externalapi.TrustedDataDataDAABlock{
Header: BlockHeaderToDomainBlockHeader(&daaBlock.Block.Header), Header: BlockHeaderToDomainBlockHeader(daaBlock.Header),
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData), GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData),
} }
} }
@ -457,27 +419,12 @@ func BlockWithTrustedDataToDomainBlockWithTrustedData(block *MsgBlockWithTrusted
return &externalapi.BlockWithTrustedData{ return &externalapi.BlockWithTrustedData{
Block: MsgBlockToDomainBlock(block.Block), Block: MsgBlockToDomainBlock(block.Block),
DAAScore: block.DAAScore,
DAAWindow: daaWindow, DAAWindow: daaWindow,
GHOSTDAGData: ghostdagData, GHOSTDAGData: ghostdagData,
} }
} }
// TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader converts *TrustedDataDAAHeader to *externalapi.TrustedDataDataDAAHeader
func TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(daaBlock *TrustedDataDAAHeader) *externalapi.TrustedDataDataDAAHeader {
return &externalapi.TrustedDataDataDAAHeader{
Header: BlockHeaderToDomainBlockHeader(daaBlock.Header),
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData),
}
}
// GHOSTDAGHashPairToDomainGHOSTDAGHashPair converts *BlockGHOSTDAGDataHashPair to *externalapi.BlockGHOSTDAGDataHashPair
func GHOSTDAGHashPairToDomainGHOSTDAGHashPair(datum *BlockGHOSTDAGDataHashPair) *externalapi.BlockGHOSTDAGDataHashPair {
return &externalapi.BlockGHOSTDAGDataHashPair{
Hash: datum.Hash,
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(datum.GHOSTDAGData),
}
}
func ghostdagDataToDomainGHOSTDAGData(data *BlockGHOSTDAGData) *externalapi.BlockGHOSTDAGData { func ghostdagDataToDomainGHOSTDAGData(data *BlockGHOSTDAGData) *externalapi.BlockGHOSTDAGData {
bluesAnticoneSizes := make(map[externalapi.DomainHash]externalapi.KType, len(data.BluesAnticoneSizes)) bluesAnticoneSizes := make(map[externalapi.DomainHash]externalapi.KType, len(data.BluesAnticoneSizes))
for _, pair := range data.BluesAnticoneSizes { for _, pair := range data.BluesAnticoneSizes {
@ -518,9 +465,7 @@ func DomainBlockWithTrustedDataToBlockWithTrustedData(block *externalapi.BlockWi
daaWindow := make([]*TrustedDataDataDAABlock, len(block.DAAWindow)) daaWindow := make([]*TrustedDataDataDAABlock, len(block.DAAWindow))
for i, daaBlock := range block.DAAWindow { for i, daaBlock := range block.DAAWindow {
daaWindow[i] = &TrustedDataDataDAABlock{ daaWindow[i] = &TrustedDataDataDAABlock{
Block: &MsgBlock{ Header: DomainBlockHeaderToBlockHeader(daaBlock.Header),
Header: *DomainBlockHeaderToBlockHeader(daaBlock.Header),
},
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData), GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData),
} }
} }
@ -535,70 +480,8 @@ func DomainBlockWithTrustedDataToBlockWithTrustedData(block *externalapi.BlockWi
return &MsgBlockWithTrustedData{ return &MsgBlockWithTrustedData{
Block: DomainBlockToMsgBlock(block.Block), Block: DomainBlockToMsgBlock(block.Block),
DAAScore: block.Block.Header.DAAScore(), DAAScore: block.DAAScore,
DAAWindow: daaWindow, DAAWindow: daaWindow,
GHOSTDAGData: ghostdagData, GHOSTDAGData: ghostdagData,
} }
} }
// DomainBlockWithTrustedDataToBlockWithTrustedDataV4 converts a set of *externalapi.DomainBlock, daa window indices and ghostdag data indices
// to *MsgBlockWithTrustedDataV4
func DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block *externalapi.DomainBlock, daaWindowIndices, ghostdagDataIndices []uint64) *MsgBlockWithTrustedDataV4 {
return &MsgBlockWithTrustedDataV4{
Block: DomainBlockToMsgBlock(block),
DAAWindowIndices: daaWindowIndices,
GHOSTDAGDataIndices: ghostdagDataIndices,
}
}
// DomainTrustedDataToTrustedData converts *externalapi.BlockWithTrustedData to *MsgBlockWithTrustedData
func DomainTrustedDataToTrustedData(domainDAAWindow []*externalapi.TrustedDataDataDAAHeader, domainGHOSTDAGData []*externalapi.BlockGHOSTDAGDataHashPair) *MsgTrustedData {
daaWindow := make([]*TrustedDataDAAHeader, len(domainDAAWindow))
for i, daaBlock := range domainDAAWindow {
daaWindow[i] = &TrustedDataDAAHeader{
Header: DomainBlockHeaderToBlockHeader(daaBlock.Header),
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData),
}
}
ghostdagData := make([]*BlockGHOSTDAGDataHashPair, len(domainGHOSTDAGData))
for i, datum := range domainGHOSTDAGData {
ghostdagData[i] = &BlockGHOSTDAGDataHashPair{
Hash: datum.Hash,
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(datum.GHOSTDAGData),
}
}
return &MsgTrustedData{
DAAWindow: daaWindow,
GHOSTDAGData: ghostdagData,
}
}
// MsgPruningPointProofToDomainPruningPointProof converts *MsgPruningPointProof to *externalapi.PruningPointProof
func MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage *MsgPruningPointProof) *externalapi.PruningPointProof {
headers := make([][]externalapi.BlockHeader, len(pruningPointProofMessage.Headers))
for blockLevel, blockLevelParents := range pruningPointProofMessage.Headers {
headers[blockLevel] = make([]externalapi.BlockHeader, len(blockLevelParents))
for i, header := range blockLevelParents {
headers[blockLevel][i] = BlockHeaderToDomainBlockHeader(header)
}
}
return &externalapi.PruningPointProof{
Headers: headers,
}
}
// DomainPruningPointProofToMsgPruningPointProof converts *externalapi.PruningPointProof to *MsgPruningPointProof
func DomainPruningPointProofToMsgPruningPointProof(pruningPointProof *externalapi.PruningPointProof) *MsgPruningPointProof {
headers := make([][]*MsgBlockHeader, len(pruningPointProof.Headers))
for blockLevel, blockLevelParents := range pruningPointProof.Headers {
headers[blockLevel] = make([]*MsgBlockHeader, len(blockLevelParents))
for i, header := range blockLevelParents {
headers[blockLevel][i] = DomainBlockHeaderToBlockHeader(header)
}
}
return &MsgPruningPointProof{
Headers: headers,
}
}

View File

@ -38,10 +38,6 @@ type RPCError struct {
Message string Message string
} }
func (err RPCError) Error() string {
return err.Message
}
// RPCErrorf formats according to a format specifier and returns the string // RPCErrorf formats according to a format specifier and returns the string
// as an RPCError. // as an RPCError.
func RPCErrorf(format string, args ...interface{}) *RPCError { func RPCErrorf(format string, args ...interface{}) *RPCError {

View File

@ -58,21 +58,13 @@ const (
CmdBlockHeaders CmdBlockHeaders
CmdRequestNextPruningPointUTXOSetChunk CmdRequestNextPruningPointUTXOSetChunk
CmdDonePruningPointUTXOSetChunks CmdDonePruningPointUTXOSetChunks
CmdBlockBlueWork
CmdBlockWithTrustedData CmdBlockWithTrustedData
CmdDoneBlocksWithTrustedData CmdDoneBlocksWithTrustedData
CmdRequestPruningPointAndItsAnticone CmdRequestPruningPointAndItsAnticone
CmdRequestBlockBlueWork
CmdIBDBlock CmdIBDBlock
CmdRequestIBDBlocks CmdRequestIBDBlocks
CmdPruningPoints
CmdRequestPruningPointProof
CmdPruningPointProof
CmdReady
CmdTrustedData
CmdBlockWithTrustedDataV4
CmdRequestNextPruningPointAndItsAnticoneBlocks
CmdRequestIBDChainBlockLocator
CmdIBDChainBlockLocator
CmdRequestAnticone
// rpc // rpc
CmdGetCurrentNetworkRequestMessage CmdGetCurrentNetworkRequestMessage
@ -131,8 +123,6 @@ const (
CmdStopNotifyingUTXOsChangedResponseMessage CmdStopNotifyingUTXOsChangedResponseMessage
CmdGetUTXOsByAddressesRequestMessage CmdGetUTXOsByAddressesRequestMessage
CmdGetUTXOsByAddressesResponseMessage CmdGetUTXOsByAddressesResponseMessage
CmdGetBalanceByAddressRequestMessage
CmdGetBalanceByAddressResponseMessage
CmdGetVirtualSelectedParentBlueScoreRequestMessage CmdGetVirtualSelectedParentBlueScoreRequestMessage
CmdGetVirtualSelectedParentBlueScoreResponseMessage CmdGetVirtualSelectedParentBlueScoreResponseMessage
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
@ -154,19 +144,6 @@ const (
CmdNotifyVirtualDaaScoreChangedRequestMessage CmdNotifyVirtualDaaScoreChangedRequestMessage
CmdNotifyVirtualDaaScoreChangedResponseMessage CmdNotifyVirtualDaaScoreChangedResponseMessage
CmdVirtualDaaScoreChangedNotificationMessage CmdVirtualDaaScoreChangedNotificationMessage
CmdGetBalancesByAddressesRequestMessage
CmdGetBalancesByAddressesResponseMessage
CmdNotifyNewBlockTemplateRequestMessage
CmdNotifyNewBlockTemplateResponseMessage
CmdNewBlockTemplateNotificationMessage
CmdGetMempoolEntriesByAddressesRequestMessage
CmdGetMempoolEntriesByAddressesResponseMessage
CmdGetCoinSupplyRequestMessage
CmdGetCoinSupplyResponseMessage
CmdGetFeeEstimateRequestMessage
CmdGetFeeEstimateResponseMessage
CmdSubmitTransactionReplacementRequestMessage
CmdSubmitTransactionReplacementResponseMessage
) )
// ProtocolMessageCommandToString maps all MessageCommands to their string representation // ProtocolMessageCommandToString maps all MessageCommands to their string representation
@ -199,21 +176,13 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
CmdBlockHeaders: "BlockHeaders", CmdBlockHeaders: "BlockHeaders",
CmdRequestNextPruningPointUTXOSetChunk: "RequestNextPruningPointUTXOSetChunk", CmdRequestNextPruningPointUTXOSetChunk: "RequestNextPruningPointUTXOSetChunk",
CmdDonePruningPointUTXOSetChunks: "DonePruningPointUTXOSetChunks", CmdDonePruningPointUTXOSetChunks: "DonePruningPointUTXOSetChunks",
CmdBlockBlueWork: "BlockBlueWork",
CmdBlockWithTrustedData: "BlockWithTrustedData", CmdBlockWithTrustedData: "BlockWithTrustedData",
CmdDoneBlocksWithTrustedData: "DoneBlocksWithTrustedData", CmdDoneBlocksWithTrustedData: "DoneBlocksWithTrustedData",
CmdRequestPruningPointAndItsAnticone: "RequestPruningPointAndItsAnticoneHeaders", CmdRequestPruningPointAndItsAnticone: "RequestPruningPointAndItsAnticoneHeaders",
CmdRequestBlockBlueWork: "RequestBlockBlueWork",
CmdIBDBlock: "IBDBlock", CmdIBDBlock: "IBDBlock",
CmdRequestIBDBlocks: "RequestIBDBlocks", CmdRequestIBDBlocks: "RequestIBDBlocks",
CmdPruningPoints: "PruningPoints",
CmdRequestPruningPointProof: "RequestPruningPointProof",
CmdPruningPointProof: "PruningPointProof",
CmdReady: "Ready",
CmdTrustedData: "TrustedData",
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
CmdRequestNextPruningPointAndItsAnticoneBlocks: "RequestNextPruningPointAndItsAnticoneBlocks",
CmdRequestIBDChainBlockLocator: "RequestIBDChainBlockLocator",
CmdIBDChainBlockLocator: "IBDChainBlockLocator",
CmdRequestAnticone: "RequestAnticone",
} }
// RPCMessageCommandToString maps all MessageCommands to their string representation // RPCMessageCommandToString maps all MessageCommands to their string representation
@ -272,8 +241,6 @@ var RPCMessageCommandToString = map[MessageCommand]string{
CmdStopNotifyingUTXOsChangedResponseMessage: "StopNotifyingUTXOsChangedResponse", CmdStopNotifyingUTXOsChangedResponseMessage: "StopNotifyingUTXOsChangedResponse",
CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest", CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest",
CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse", CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse",
CmdGetBalanceByAddressRequestMessage: "GetBalanceByAddressRequest",
CmdGetBalanceByAddressResponseMessage: "GetBalancesByAddressResponse",
CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest", CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest",
CmdGetVirtualSelectedParentBlueScoreResponseMessage: "GetVirtualSelectedParentBlueScoreResponse", CmdGetVirtualSelectedParentBlueScoreResponseMessage: "GetVirtualSelectedParentBlueScoreResponse",
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest", CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest",
@ -295,19 +262,6 @@ var RPCMessageCommandToString = map[MessageCommand]string{
CmdNotifyVirtualDaaScoreChangedRequestMessage: "NotifyVirtualDaaScoreChangedRequest", CmdNotifyVirtualDaaScoreChangedRequestMessage: "NotifyVirtualDaaScoreChangedRequest",
CmdNotifyVirtualDaaScoreChangedResponseMessage: "NotifyVirtualDaaScoreChangedResponse", CmdNotifyVirtualDaaScoreChangedResponseMessage: "NotifyVirtualDaaScoreChangedResponse",
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification", CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest",
CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse",
CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification",
CmdGetMempoolEntriesByAddressesRequestMessage: "GetMempoolEntriesByAddressesRequest",
CmdGetMempoolEntriesByAddressesResponseMessage: "GetMempoolEntriesByAddressesResponse",
CmdGetCoinSupplyRequestMessage: "GetCoinSupplyRequest",
CmdGetCoinSupplyResponseMessage: "GetCoinSupplyResponse",
CmdGetFeeEstimateRequestMessage: "GetFeeEstimateRequest",
CmdGetFeeEstimateResponseMessage: "GetFeeEstimateResponse",
CmdSubmitTransactionReplacementRequestMessage: "SubmitTransactionReplacementRequest",
CmdSubmitTransactionReplacementResponseMessage: "SubmitTransactionReplacementResponse",
} }
// Message is an interface that describes a kaspa message. A type that // Message is an interface that describes a kaspa message. A type that

View File

@ -18,21 +18,16 @@ import (
// TestBlock tests the MsgBlock API. // TestBlock tests the MsgBlock API.
func TestBlock(t *testing.T) { func TestBlock(t *testing.T) {
pver := uint32(4) pver := ProtocolVersion
// Block 1 header. // Block 1 header.
parents := blockOne.Header.Parents parentHashes := blockOne.Header.ParentHashes
hashMerkleRoot := blockOne.Header.HashMerkleRoot hashMerkleRoot := blockOne.Header.HashMerkleRoot
acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot
utxoCommitment := blockOne.Header.UTXOCommitment utxoCommitment := blockOne.Header.UTXOCommitment
bits := blockOne.Header.Bits bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce nonce := blockOne.Header.Nonce
daaScore := blockOne.Header.DAAScore bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
blueScore := blockOne.Header.BlueScore
blueWork := blockOne.Header.BlueWork
pruningPoint := blockOne.Header.PruningPoint
bh := NewBlockHeader(1, parents, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce,
daaScore, blueScore, blueWork, pruningPoint)
// Ensure the command is expected value. // Ensure the command is expected value.
wantCmd := MessageCommand(5) wantCmd := MessageCommand(5)
@ -132,11 +127,11 @@ func TestConvertToPartial(t *testing.T) {
} }
} }
// blockOne is the first block in the mainnet block DAG. //blockOne is the first block in the mainnet block DAG.
var blockOne = MsgBlock{ var blockOne = MsgBlock{
Header: MsgBlockHeader{ Header: MsgBlockHeader{
Version: 0, Version: 0,
Parents: []externalapi.BlockLevelParents{[]*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}}, ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot, HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot, AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment, UTXOCommitment: exampleUTXOCommitment,

View File

@ -0,0 +1,23 @@
package appmessage
import (
"math/big"
)
// MsgBlockBlueWork represents a kaspa BlockBlueWork message
type MsgBlockBlueWork struct {
baseMessage
BlueWork *big.Int
}
// Command returns the protocol command string for the message
func (msg *MsgBlockBlueWork) Command() MessageCommand {
return CmdBlockBlueWork
}
// NewBlockBlueWork returns a new kaspa BlockBlueWork message
func NewBlockBlueWork(blueWork *big.Int) *MsgBlockBlueWork {
return &MsgBlockBlueWork{
BlueWork: blueWork,
}
}

View File

@ -5,12 +5,13 @@
package appmessage package appmessage
import ( import (
"math/big" "math"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/mstime" "github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
) )
// BaseBlockHeaderPayload is the base number of bytes a block header can be, // BaseBlockHeaderPayload is the base number of bytes a block header can be,
@ -38,8 +39,8 @@ type MsgBlockHeader struct {
// Version of the block. This is not the same as the protocol version. // Version of the block. This is not the same as the protocol version.
Version uint16 Version uint16
// Parents are the parent block hashes of the block in the DAG per superblock level. // Hashes of the parent block headers in the blockDAG.
Parents []externalapi.BlockLevelParents ParentHashes []*externalapi.DomainHash
// HashMerkleRoot is the merkle tree reference to hash of all transactions for the block. // HashMerkleRoot is the merkle tree reference to hash of all transactions for the block.
HashMerkleRoot *externalapi.DomainHash HashMerkleRoot *externalapi.DomainHash
@ -59,16 +60,15 @@ type MsgBlockHeader struct {
// Nonce used to generate the block. // Nonce used to generate the block.
Nonce uint64 Nonce uint64
}
// DAASCore is the DAA score of the block. // NumParentBlocks return the number of entries in ParentHashes
DAAScore uint64 func (h *MsgBlockHeader) NumParentBlocks() byte {
numParents := len(h.ParentHashes)
BlueScore uint64 if numParents > math.MaxUint8 {
panic(errors.Errorf("number of parents is %d, which is more than one byte can fit", numParents))
// BlueWork is the blue work of the block. }
BlueWork *big.Int return byte(numParents)
PruningPoint *externalapi.DomainHash
} }
// BlockHash computes the block identifier hash for the given block header. // BlockHash computes the block identifier hash for the given block header.
@ -76,27 +76,27 @@ func (h *MsgBlockHeader) BlockHash() *externalapi.DomainHash {
return consensushashing.HeaderHash(BlockHeaderToDomainBlockHeader(h)) return consensushashing.HeaderHash(BlockHeaderToDomainBlockHeader(h))
} }
// IsGenesis returns true iff this block is a genesis block
func (h *MsgBlockHeader) IsGenesis() bool {
return h.NumParentBlocks() == 0
}
// NewBlockHeader returns a new MsgBlockHeader using the provided version, previous // NewBlockHeader returns a new MsgBlockHeader using the provided version, previous
// block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the // block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the
// block with defaults or calclulated values for the remaining fields. // block with defaults or calclulated values for the remaining fields.
func NewBlockHeader(version uint16, parents []externalapi.BlockLevelParents, hashMerkleRoot *externalapi.DomainHash, func NewBlockHeader(version uint16, parentHashes []*externalapi.DomainHash, hashMerkleRoot *externalapi.DomainHash,
acceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce, acceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce uint64) *MsgBlockHeader {
daaScore, blueScore uint64, blueWork *big.Int, pruningPoint *externalapi.DomainHash) *MsgBlockHeader {
// Limit the timestamp to one millisecond precision since the protocol // Limit the timestamp to one millisecond precision since the protocol
// doesn't support better. // doesn't support better.
return &MsgBlockHeader{ return &MsgBlockHeader{
Version: version, Version: version,
Parents: parents, ParentHashes: parentHashes,
HashMerkleRoot: hashMerkleRoot, HashMerkleRoot: hashMerkleRoot,
AcceptedIDMerkleRoot: acceptedIDMerkleRoot, AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment, UTXOCommitment: utxoCommitment,
Timestamp: mstime.Now(), Timestamp: mstime.Now(),
Bits: bits, Bits: bits,
Nonce: nonce, Nonce: nonce,
DAAScore: daaScore,
BlueScore: blueScore,
BlueWork: blueWork,
PruningPoint: pruningPoint,
} }
} }

View File

@ -5,34 +5,29 @@
package appmessage package appmessage
import ( import (
"math/big"
"reflect" "reflect"
"testing" "testing"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/mstime"
) )
// TestBlockHeader tests the MsgBlockHeader API. // TestBlockHeader tests the MsgBlockHeader API.
func TestBlockHeader(t *testing.T) { func TestBlockHeader(t *testing.T) {
nonce := uint64(0xba4d87a69924a93d) nonce := uint64(0xba4d87a69924a93d)
parents := []externalapi.BlockLevelParents{[]*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}} hashes := []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}
merkleHash := mainnetGenesisMerkleRoot merkleHash := mainnetGenesisMerkleRoot
acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot
bits := uint32(0x1d00ffff) bits := uint32(0x1d00ffff)
daaScore := uint64(123) bh := NewBlockHeader(1, hashes, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce)
blueScore := uint64(456)
blueWork := big.NewInt(789)
pruningPoint := simnetGenesisHash
bh := NewBlockHeader(1, parents, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce,
daaScore, blueScore, blueWork, pruningPoint)
// Ensure we get the same data back out. // Ensure we get the same data back out.
if !reflect.DeepEqual(bh.Parents, parents) { if !reflect.DeepEqual(bh.ParentHashes, hashes) {
t.Errorf("NewBlockHeader: wrong parents - got %v, want %v", t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v",
spew.Sprint(bh.Parents), spew.Sprint(parents)) spew.Sprint(bh.ParentHashes), spew.Sprint(hashes))
} }
if bh.HashMerkleRoot != merkleHash { if bh.HashMerkleRoot != merkleHash {
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v", t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
@ -46,20 +41,44 @@ func TestBlockHeader(t *testing.T) {
t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v", t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v",
bh.Nonce, nonce) bh.Nonce, nonce)
} }
if bh.DAAScore != daaScore { }
t.Errorf("NewBlockHeader: wrong daaScore - got %v, want %v",
bh.DAAScore, daaScore) func TestIsGenesis(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &MsgBlockHeader{
Version: 1,
ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
} }
if bh.BlueScore != blueScore { genesisBlockHdr := &MsgBlockHeader{
t.Errorf("NewBlockHeader: wrong blueScore - got %v, want %v", Version: 1,
bh.BlueScore, blueScore) ParentHashes: []*externalapi.DomainHash{},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
} }
if bh.BlueWork != blueWork {
t.Errorf("NewBlockHeader: wrong blueWork - got %v, want %v", tests := []struct {
bh.BlueWork, blueWork) in *MsgBlockHeader // Block header to encode
isGenesis bool // Expected result for call of .IsGenesis
}{
{genesisBlockHdr, true},
{baseBlockHdr, false},
} }
if !bh.PruningPoint.Equal(pruningPoint) {
t.Errorf("NewBlockHeader: wrong pruningPoint - got %v, want %v", t.Logf("Running %d tests", len(tests))
bh.PruningPoint, pruningPoint) for i, test := range tests {
isGenesis := test.in.IsGenesis()
if isGenesis != test.isGenesis {
t.Errorf("MsgBlockHeader.IsGenesis: #%d got: %t, want: %t",
i, isGenesis, test.isGenesis)
}
} }
} }

View File

@ -27,7 +27,7 @@ func NewMsgBlockWithTrustedData() *MsgBlockWithTrustedData {
// TrustedDataDataDAABlock is an appmessage representation of externalapi.TrustedDataDataDAABlock // TrustedDataDataDAABlock is an appmessage representation of externalapi.TrustedDataDataDAABlock
type TrustedDataDataDAABlock struct { type TrustedDataDataDAABlock struct {
Block *MsgBlock Header *MsgBlockHeader
GHOSTDAGData *BlockGHOSTDAGData GHOSTDAGData *BlockGHOSTDAGData
} }

View File

@ -1,20 +0,0 @@
package appmessage
// MsgBlockWithTrustedDataV4 represents a kaspa BlockWithTrustedDataV4 message
type MsgBlockWithTrustedDataV4 struct {
baseMessage
Block *MsgBlock
DAAWindowIndices []uint64
GHOSTDAGDataIndices []uint64
}
// Command returns the protocol command string for the message
func (msg *MsgBlockWithTrustedDataV4) Command() MessageCommand {
return CmdBlockWithTrustedDataV4
}
// NewMsgBlockWithTrustedDataV4 returns a new MsgBlockWithTrustedDataV4.
func NewMsgBlockWithTrustedDataV4() *MsgBlockWithTrustedDataV4 {
return &MsgBlockWithTrustedDataV4{}
}

View File

@ -1,27 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgIBDChainBlockLocator implements the Message interface and represents a kaspa
// locator message. It is used to find the blockLocator of a peer that is
// syncing with you.
type MsgIBDChainBlockLocator struct {
baseMessage
BlockLocatorHashes []*externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgIBDChainBlockLocator) Command() MessageCommand {
return CmdIBDChainBlockLocator
}
// NewMsgIBDChainBlockLocator returns a new kaspa locator message that conforms to
// the Message interface. See MsgBlockLocator for details.
func NewMsgIBDChainBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgIBDChainBlockLocator {
return &MsgIBDChainBlockLocator{
BlockLocatorHashes: locatorHashes,
}
}

View File

@ -1,20 +0,0 @@
package appmessage
// MsgPruningPointProof represents a kaspa PruningPointProof message
type MsgPruningPointProof struct {
baseMessage
Headers [][]*MsgBlockHeader
}
// Command returns the protocol command string for the message
func (msg *MsgPruningPointProof) Command() MessageCommand {
return CmdPruningPointProof
}
// NewMsgPruningPointProof returns a new MsgPruningPointProof.
func NewMsgPruningPointProof(headers [][]*MsgBlockHeader) *MsgPruningPointProof {
return &MsgPruningPointProof{
Headers: headers,
}
}

View File

@ -1,20 +0,0 @@
package appmessage
// MsgPruningPoints represents a kaspa PruningPoints message
type MsgPruningPoints struct {
baseMessage
Headers []*MsgBlockHeader
}
// Command returns the protocol command string for the message
func (msg *MsgPruningPoints) Command() MessageCommand {
return CmdPruningPoints
}
// NewMsgPruningPoints returns a new MsgPruningPoints.
func NewMsgPruningPoints(headers []*MsgBlockHeader) *MsgPruningPoints {
return &MsgPruningPoints{
Headers: headers,
}
}

View File

@ -1,33 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestAnticone implements the Message interface and represents a kaspa
// RequestHeaders message. It is used to request the set past(ContextHash) \cap anticone(BlockHash)
type MsgRequestAnticone struct {
baseMessage
BlockHash *externalapi.DomainHash
ContextHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestAnticone) Command() MessageCommand {
return CmdRequestAnticone
}
// NewMsgRequestAnticone returns a new kaspa RequestPastDiff message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequestAnticone(blockHash, contextHash *externalapi.DomainHash) *MsgRequestAnticone {
return &MsgRequestAnticone{
BlockHash: blockHash,
ContextHash: contextHash,
}
}

View File

@ -0,0 +1,23 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestBlockBlueWork represents a kaspa RequestBlockBlueWork message
type MsgRequestBlockBlueWork struct {
baseMessage
Hash *externalapi.DomainHash
}
// Command returns the protocol command string for the message
func (msg *MsgRequestBlockBlueWork) Command() MessageCommand {
return CmdRequestBlockBlueWork
}
// NewRequestBlockBlueWork returns a new kaspa RequestBlockBlueWork message
func NewRequestBlockBlueWork(hash *externalapi.DomainHash) *MsgRequestBlockBlueWork {
return &MsgRequestBlockBlueWork{
Hash: hash,
}
}

View File

@ -1,31 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestIBDChainBlockLocator implements the Message interface and represents a kaspa
// IBDRequestChainBlockLocator message. It is used to request a block locator between low
// and high hash.
// The locator is returned via a locator message (MsgIBDChainBlockLocator).
type MsgRequestIBDChainBlockLocator struct {
baseMessage
HighHash *externalapi.DomainHash
LowHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestIBDChainBlockLocator) Command() MessageCommand {
return CmdRequestIBDChainBlockLocator
}
// NewMsgIBDRequestChainBlockLocator returns a new IBDRequestChainBlockLocator message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgIBDRequestChainBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestIBDChainBlockLocator {
return &MsgRequestIBDChainBlockLocator{
HighHash: highHash,
LowHash: lowHash,
}
}

View File

@ -1,22 +0,0 @@
package appmessage
// MsgRequestNextPruningPointAndItsAnticoneBlocks implements the Message interface and represents a kaspa
// RequestNextPruningPointAndItsAnticoneBlocks message. It is used to notify the IBD syncer peer to send
// more blocks from the pruning anticone.
//
// This message has no payload.
type MsgRequestNextPruningPointAndItsAnticoneBlocks struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestNextPruningPointAndItsAnticoneBlocks) Command() MessageCommand {
return CmdRequestNextPruningPointAndItsAnticoneBlocks
}
// NewMsgRequestNextPruningPointAndItsAnticoneBlocks returns a new kaspa RequestNextPruningPointAndItsAnticoneBlocks message that conforms to the
// Message interface.
func NewMsgRequestNextPruningPointAndItsAnticoneBlocks() *MsgRequestNextPruningPointAndItsAnticoneBlocks {
return &MsgRequestNextPruningPointAndItsAnticoneBlocks{}
}

View File

@ -1,16 +0,0 @@
package appmessage
// MsgRequestPruningPointProof represents a kaspa RequestPruningPointProof message
type MsgRequestPruningPointProof struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgRequestPruningPointProof) Command() MessageCommand {
return CmdRequestPruningPointProof
}
// NewMsgRequestPruningPointProof returns a new MsgRequestPruningPointProof.
func NewMsgRequestPruningPointProof() *MsgRequestPruningPointProof {
return &MsgRequestPruningPointProof{}
}

View File

@ -1,25 +0,0 @@
package appmessage
// MsgTrustedData represents a kaspa TrustedData message
type MsgTrustedData struct {
baseMessage
DAAWindow []*TrustedDataDAAHeader
GHOSTDAGData []*BlockGHOSTDAGDataHashPair
}
// Command returns the protocol command string for the message
func (msg *MsgTrustedData) Command() MessageCommand {
return CmdTrustedData
}
// NewMsgTrustedData returns a new MsgTrustedData.
func NewMsgTrustedData() *MsgTrustedData {
return &MsgTrustedData{}
}
// TrustedDataDAAHeader is an appmessage representation of externalapi.TrustedDataDataDAAHeader
type TrustedDataDAAHeader struct {
Header *MsgBlockHeader
GHOSTDAGData *BlockGHOSTDAGData
}

View File

@ -22,7 +22,7 @@ import (
// TestTx tests the MsgTx API. // TestTx tests the MsgTx API.
func TestTx(t *testing.T) { func TestTx(t *testing.T) {
pver := uint32(4) pver := ProtocolVersion
txIDStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" txIDStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
txID, err := transactionid.FromString(txIDStr) txID, err := transactionid.FromString(txIDStr)
@ -133,8 +133,8 @@ func TestTx(t *testing.T) {
// TestTxHash tests the ability to generate the hash of a transaction accurately. // TestTxHash tests the ability to generate the hash of a transaction accurately.
func TestTxHashAndID(t *testing.T) { func TestTxHashAndID(t *testing.T) {
txHash1Str := "b06f8b650115b5cf4d59499e10764a9312742930cb43c9b4ff6495d76f332ed7" txHash1Str := "93663e597f6c968d32d229002f76408edf30d6a0151ff679fc729812d8cb2acc"
txID1Str := "e20225c3d065ee41743607ee627db44d01ef396dc9779b05b2caf55bac50e12d" txID1Str := "24079c6d2bdf602fc389cc307349054937744a9c8dc0f07c023e6af0e949a4e7"
wantTxID1, err := transactionid.FromString(txID1Str) wantTxID1, err := transactionid.FromString(txID1Str)
if err != nil { if err != nil {
t.Fatalf("NewTxIDFromStr: %v", err) t.Fatalf("NewTxIDFromStr: %v", err)
@ -185,7 +185,7 @@ func TestTxHashAndID(t *testing.T) {
spew.Sprint(tx1ID), spew.Sprint(wantTxID1)) spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
} }
hash2Str := "fa16a8ce88d52ca1ff45187bbba0d33044e9f5fe309e8d0b22d4812dcf1782b7" hash2Str := "8dafd1bec24527d8e3b443ceb0a3b92fffc0d60026317f890b2faf5e9afc177a"
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str) wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
if err != nil { if err != nil {
t.Errorf("NewTxIDFromStr: %v", err) t.Errorf("NewTxIDFromStr: %v", err)

View File

@ -82,12 +82,12 @@ func (msg *MsgVersion) Command() MessageCommand {
// Message interface using the passed parameters and defaults for the remaining // Message interface using the passed parameters and defaults for the remaining
// fields. // fields.
func NewMsgVersion(addr *NetAddress, id *id.ID, network string, func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
subnetworkID *externalapi.DomainSubnetworkID, protocolVersion uint32) *MsgVersion { subnetworkID *externalapi.DomainSubnetworkID) *MsgVersion {
// Limit the timestamp to one millisecond precision since the protocol // Limit the timestamp to one millisecond precision since the protocol
// doesn't support better. // doesn't support better.
return &MsgVersion{ return &MsgVersion{
ProtocolVersion: protocolVersion, ProtocolVersion: ProtocolVersion,
Network: network, Network: network,
Services: 0, Services: 0,
Timestamp: mstime.Now(), Timestamp: mstime.Now(),

View File

@ -15,7 +15,7 @@ import (
// TestVersion tests the MsgVersion API. // TestVersion tests the MsgVersion API.
func TestVersion(t *testing.T) { func TestVersion(t *testing.T) {
pver := uint32(4) pver := ProtocolVersion
// Create version message data. // Create version message data.
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111} tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
@ -26,7 +26,7 @@ func TestVersion(t *testing.T) {
} }
// Ensure we get the correct data back out. // Ensure we get the correct data back out.
msg := NewMsgVersion(me, generatedID, "mainnet", nil, 4) msg := NewMsgVersion(me, generatedID, "mainnet", nil)
if msg.ProtocolVersion != pver { if msg.ProtocolVersion != pver {
t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v", t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v",
msg.ProtocolVersion, pver) msg.ProtocolVersion, pver)

View File

@ -5,9 +5,8 @@
package appmessage package appmessage
import ( import (
"net"
"github.com/kaspanet/kaspad/util/mstime" "github.com/kaspanet/kaspad/util/mstime"
"net"
) )
// NetAddress defines information about a peer on the network including the time // NetAddress defines information about a peer on the network including the time
@ -58,7 +57,3 @@ func NewNetAddressTimestamp(
func NewNetAddress(addr *net.TCPAddr) *NetAddress { func NewNetAddress(addr *net.TCPAddr) *NetAddress {
return NewNetAddressIPPort(addr.IP, uint16(addr.Port)) return NewNetAddressIPPort(addr.IP, uint16(addr.Port))
} }
func (na NetAddress) String() string {
return na.TCPAddress().String()
}

View File

@ -1,22 +0,0 @@
package appmessage
// MsgReady implements the Message interface and represents a kaspa
// Ready message. It is used to notify that the peer is ready to receive
// messages.
//
// This message has no payload.
type MsgReady struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgReady) Command() MessageCommand {
return CmdReady
}
// NewMsgReady returns a new kaspa Ready message that conforms to the
// Message interface.
func NewMsgReady() *MsgReady {
return &MsgReady{}
}

View File

@ -11,6 +11,9 @@ import (
) )
const ( const (
// ProtocolVersion is the latest protocol version this package supports.
ProtocolVersion uint32 = 1
// DefaultServices describes the default services that are supported by // DefaultServices describes the default services that are supported by
// the server. // the server.
DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF

View File

@ -1,47 +0,0 @@
package appmessage
// GetFeeEstimateRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetFeeEstimateRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *GetFeeEstimateRequestMessage) Command() MessageCommand {
return CmdGetFeeEstimateRequestMessage
}
// NewGetFeeEstimateRequestMessage returns a instance of the message
func NewGetFeeEstimateRequestMessage() *GetFeeEstimateRequestMessage {
return &GetFeeEstimateRequestMessage{}
}
type RPCFeeRateBucket struct {
Feerate float64
EstimatedSeconds float64
}
type RPCFeeEstimate struct {
PriorityBucket RPCFeeRateBucket
NormalBuckets []RPCFeeRateBucket
LowBuckets []RPCFeeRateBucket
}
// GetCoinSupplyResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetFeeEstimateResponseMessage struct {
baseMessage
Estimate RPCFeeEstimate
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetFeeEstimateResponseMessage) Command() MessageCommand {
return CmdGetFeeEstimateResponseMessage
}
// NewGetFeeEstimateResponseMessage returns a instance of the message
func NewGetFeeEstimateResponseMessage() *GetFeeEstimateResponseMessage {
return &GetFeeEstimateResponseMessage{}
}

View File

@ -1,41 +0,0 @@
package appmessage
// GetBalanceByAddressRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetBalanceByAddressRequestMessage struct {
baseMessage
Address string
}
// Command returns the protocol command string for the message
func (msg *GetBalanceByAddressRequestMessage) Command() MessageCommand {
return CmdGetBalanceByAddressRequestMessage
}
// NewGetBalanceByAddressRequest returns a instance of the message
func NewGetBalanceByAddressRequest(address string) *GetBalanceByAddressRequestMessage {
return &GetBalanceByAddressRequestMessage{
Address: address,
}
}
// GetBalanceByAddressResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetBalanceByAddressResponseMessage struct {
baseMessage
Balance uint64
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetBalanceByAddressResponseMessage) Command() MessageCommand {
return CmdGetBalanceByAddressResponseMessage
}
// NewGetBalanceByAddressResponse returns an instance of the message
func NewGetBalanceByAddressResponse(Balance uint64) *GetBalanceByAddressResponseMessage {
return &GetBalanceByAddressResponseMessage{
Balance: Balance,
}
}

View File

@ -1,47 +0,0 @@
package appmessage
// GetBalancesByAddressesRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetBalancesByAddressesRequestMessage struct {
baseMessage
Addresses []string
}
// Command returns the protocol command string for the message
func (msg *GetBalancesByAddressesRequestMessage) Command() MessageCommand {
return CmdGetBalancesByAddressesRequestMessage
}
// NewGetBalancesByAddressesRequest returns a instance of the message
func NewGetBalancesByAddressesRequest(addresses []string) *GetBalancesByAddressesRequestMessage {
return &GetBalancesByAddressesRequestMessage{
Addresses: addresses,
}
}
// BalancesByAddressesEntry represents the balance of some address
type BalancesByAddressesEntry struct {
Address string
Balance uint64
}
// GetBalancesByAddressesResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetBalancesByAddressesResponseMessage struct {
baseMessage
Entries []*BalancesByAddressesEntry
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetBalancesByAddressesResponseMessage) Command() MessageCommand {
return CmdGetBalancesByAddressesResponseMessage
}
// NewGetBalancesByAddressesResponse returns an instance of the message
func NewGetBalancesByAddressesResponse(entries []*BalancesByAddressesEntry) *GetBalancesByAddressesResponseMessage {
return &GetBalancesByAddressesResponseMessage{
Entries: entries,
}
}

View File

@ -5,7 +5,6 @@ package appmessage
type GetBlockTemplateRequestMessage struct { type GetBlockTemplateRequestMessage struct {
baseMessage baseMessage
PayAddress string PayAddress string
ExtraData string
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -14,10 +13,9 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
} }
// NewGetBlockTemplateRequestMessage returns a instance of the message // NewGetBlockTemplateRequestMessage returns a instance of the message
func NewGetBlockTemplateRequestMessage(payAddress, extraData string) *GetBlockTemplateRequestMessage { func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
return &GetBlockTemplateRequestMessage{ return &GetBlockTemplateRequestMessage{
PayAddress: payAddress, PayAddress: payAddress,
ExtraData: extraData,
} }
} }

View File

@ -1,40 +0,0 @@
package appmessage
// GetCoinSupplyRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetCoinSupplyRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *GetCoinSupplyRequestMessage) Command() MessageCommand {
return CmdGetCoinSupplyRequestMessage
}
// NewGetCoinSupplyRequestMessage returns a instance of the message
func NewGetCoinSupplyRequestMessage() *GetCoinSupplyRequestMessage {
return &GetCoinSupplyRequestMessage{}
}
// GetCoinSupplyResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetCoinSupplyResponseMessage struct {
baseMessage
MaxSompi uint64
CirculatingSompi uint64
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetCoinSupplyResponseMessage) Command() MessageCommand {
return CmdGetCoinSupplyResponseMessage
}
// NewGetCoinSupplyResponseMessage returns a instance of the message
func NewGetCoinSupplyResponseMessage(maxSompi uint64, circulatingSompi uint64) *GetCoinSupplyResponseMessage {
return &GetCoinSupplyResponseMessage{
MaxSompi: maxSompi,
CirculatingSompi: circulatingSompi,
}
}

View File

@ -23,8 +23,6 @@ type GetInfoResponseMessage struct {
P2PID string P2PID string
MempoolSize uint64 MempoolSize uint64
ServerVersion string ServerVersion string
IsUtxoIndexed bool
IsSynced bool
Error *RPCError Error *RPCError
} }
@ -35,12 +33,10 @@ func (msg *GetInfoResponseMessage) Command() MessageCommand {
} }
// NewGetInfoResponseMessage returns a instance of the message // NewGetInfoResponseMessage returns a instance of the message
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string, isUtxoIndexed bool, isSynced bool) *GetInfoResponseMessage { func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string) *GetInfoResponseMessage {
return &GetInfoResponseMessage{ return &GetInfoResponseMessage{
P2PID: p2pID, P2PID: p2pID,
MempoolSize: mempoolSize, MempoolSize: mempoolSize,
ServerVersion: serverVersion, ServerVersion: serverVersion,
IsUtxoIndexed: isUtxoIndexed,
IsSynced: isSynced,
} }
} }

View File

@ -4,8 +4,6 @@ package appmessage
// its respective RPC message // its respective RPC message
type GetMempoolEntriesRequestMessage struct { type GetMempoolEntriesRequestMessage struct {
baseMessage baseMessage
IncludeOrphanPool bool
FilterTransactionPool bool
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -14,11 +12,8 @@ func (msg *GetMempoolEntriesRequestMessage) Command() MessageCommand {
} }
// NewGetMempoolEntriesRequestMessage returns a instance of the message // NewGetMempoolEntriesRequestMessage returns a instance of the message
func NewGetMempoolEntriesRequestMessage(includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesRequestMessage { func NewGetMempoolEntriesRequestMessage() *GetMempoolEntriesRequestMessage {
return &GetMempoolEntriesRequestMessage{ return &GetMempoolEntriesRequestMessage{}
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
} }
// GetMempoolEntriesResponseMessage is an appmessage corresponding to // GetMempoolEntriesResponseMessage is an appmessage corresponding to

View File

@ -1,52 +0,0 @@
package appmessage
// MempoolEntryByAddress represents MempoolEntries associated with some address
type MempoolEntryByAddress struct {
Address string
Receiving []*MempoolEntry
Sending []*MempoolEntry
}
// GetMempoolEntriesByAddressesRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetMempoolEntriesByAddressesRequestMessage struct {
baseMessage
Addresses []string
IncludeOrphanPool bool
FilterTransactionPool bool
}
// Command returns the protocol command string for the message
func (msg *GetMempoolEntriesByAddressesRequestMessage) Command() MessageCommand {
return CmdGetMempoolEntriesByAddressesRequestMessage
}
// NewGetMempoolEntriesByAddressesRequestMessage returns a instance of the message
func NewGetMempoolEntriesByAddressesRequestMessage(addresses []string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesByAddressesRequestMessage {
return &GetMempoolEntriesByAddressesRequestMessage{
Addresses: addresses,
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
}
// GetMempoolEntriesByAddressesResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetMempoolEntriesByAddressesResponseMessage struct {
baseMessage
Entries []*MempoolEntryByAddress
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetMempoolEntriesByAddressesResponseMessage) Command() MessageCommand {
return CmdGetMempoolEntriesByAddressesResponseMessage
}
// NewGetMempoolEntriesByAddressesResponseMessage returns a instance of the message
func NewGetMempoolEntriesByAddressesResponseMessage(entries []*MempoolEntryByAddress) *GetMempoolEntriesByAddressesResponseMessage {
return &GetMempoolEntriesByAddressesResponseMessage{
Entries: entries,
}
}

View File

@ -4,9 +4,7 @@ package appmessage
// its respective RPC message // its respective RPC message
type GetMempoolEntryRequestMessage struct { type GetMempoolEntryRequestMessage struct {
baseMessage baseMessage
TxID string TxID string
IncludeOrphanPool bool
FilterTransactionPool bool
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -15,12 +13,8 @@ func (msg *GetMempoolEntryRequestMessage) Command() MessageCommand {
} }
// NewGetMempoolEntryRequestMessage returns a instance of the message // NewGetMempoolEntryRequestMessage returns a instance of the message
func NewGetMempoolEntryRequestMessage(txID string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntryRequestMessage { func NewGetMempoolEntryRequestMessage(txID string) *GetMempoolEntryRequestMessage {
return &GetMempoolEntryRequestMessage{ return &GetMempoolEntryRequestMessage{TxID: txID}
TxID: txID,
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
} }
// GetMempoolEntryResponseMessage is an appmessage corresponding to // GetMempoolEntryResponseMessage is an appmessage corresponding to
@ -36,7 +30,6 @@ type GetMempoolEntryResponseMessage struct {
type MempoolEntry struct { type MempoolEntry struct {
Fee uint64 Fee uint64
Transaction *RPCTransaction Transaction *RPCTransaction
IsOrphan bool
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -45,12 +38,11 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
} }
// NewGetMempoolEntryResponseMessage returns a instance of the message // NewGetMempoolEntryResponseMessage returns a instance of the message
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction, isOrphan bool) *GetMempoolEntryResponseMessage { func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
return &GetMempoolEntryResponseMessage{ return &GetMempoolEntryResponseMessage{
Entry: &MempoolEntry{ Entry: &MempoolEntry{
Fee: fee, Fee: fee,
Transaction: transaction, Transaction: transaction,
IsOrphan: isOrphan,
}, },
} }
} }

View File

@ -4,8 +4,7 @@ package appmessage
// its respective RPC message // its respective RPC message
type GetVirtualSelectedParentChainFromBlockRequestMessage struct { type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
baseMessage baseMessage
StartHash string StartHash string
IncludeAcceptedTransactionIDs bool
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -14,29 +13,18 @@ func (msg *GetVirtualSelectedParentChainFromBlockRequestMessage) Command() Messa
} }
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message // NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
func NewGetVirtualSelectedParentChainFromBlockRequestMessage( func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *GetVirtualSelectedParentChainFromBlockRequestMessage {
startHash string, includeAcceptedTransactionIDs bool) *GetVirtualSelectedParentChainFromBlockRequestMessage {
return &GetVirtualSelectedParentChainFromBlockRequestMessage{ return &GetVirtualSelectedParentChainFromBlockRequestMessage{
StartHash: startHash, StartHash: startHash,
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
} }
} }
// AcceptedTransactionIDs is a part of the GetVirtualSelectedParentChainFromBlockResponseMessage and
// VirtualSelectedParentChainChangedNotificationMessage appmessages
type AcceptedTransactionIDs struct {
AcceptingBlockHash string
AcceptedTransactionIDs []string
}
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to // GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
// its respective RPC message // its respective RPC message
type GetVirtualSelectedParentChainFromBlockResponseMessage struct { type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
baseMessage baseMessage
RemovedChainBlockHashes []string RemovedChainBlockHashes []string
AddedChainBlockHashes []string AddedChainBlockHashes []string
AcceptedTransactionIDs []*AcceptedTransactionIDs
Error *RPCError Error *RPCError
} }
@ -48,11 +36,10 @@ func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() Mess
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message // NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes, func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes,
addedChainBlockHashes []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *GetVirtualSelectedParentChainFromBlockResponseMessage { addedChainBlockHashes []string) *GetVirtualSelectedParentChainFromBlockResponseMessage {
return &GetVirtualSelectedParentChainFromBlockResponseMessage{ return &GetVirtualSelectedParentChainFromBlockResponseMessage{
RemovedChainBlockHashes: removedChainBlockHashes, RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlockHashes: addedChainBlockHashes, AddedChainBlockHashes: addedChainBlockHashes,
AcceptedTransactionIDs: acceptedTransactionIDs,
} }
} }

View File

@ -1,50 +0,0 @@
package appmessage
// NotifyNewBlockTemplateRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyNewBlockTemplateRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NotifyNewBlockTemplateRequestMessage) Command() MessageCommand {
return CmdNotifyNewBlockTemplateRequestMessage
}
// NewNotifyNewBlockTemplateRequestMessage returns an instance of the message
func NewNotifyNewBlockTemplateRequestMessage() *NotifyNewBlockTemplateRequestMessage {
return &NotifyNewBlockTemplateRequestMessage{}
}
// NotifyNewBlockTemplateResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyNewBlockTemplateResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyNewBlockTemplateResponseMessage) Command() MessageCommand {
return CmdNotifyNewBlockTemplateResponseMessage
}
// NewNotifyNewBlockTemplateResponseMessage returns an instance of the message
func NewNotifyNewBlockTemplateResponseMessage() *NotifyNewBlockTemplateResponseMessage {
return &NotifyNewBlockTemplateResponseMessage{}
}
// NewBlockTemplateNotificationMessage is an appmessage corresponding to
// its respective RPC message
type NewBlockTemplateNotificationMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NewBlockTemplateNotificationMessage) Command() MessageCommand {
return CmdNewBlockTemplateNotificationMessage
}
// NewNewBlockTemplateNotificationMessage returns an instance of the message
func NewNewBlockTemplateNotificationMessage() *NewBlockTemplateNotificationMessage {
return &NewBlockTemplateNotificationMessage{}
}

View File

@ -4,7 +4,6 @@ package appmessage
// its respective RPC message // its respective RPC message
type NotifyVirtualSelectedParentChainChangedRequestMessage struct { type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
baseMessage baseMessage
IncludeAcceptedTransactionIDs bool
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -12,13 +11,9 @@ func (msg *NotifyVirtualSelectedParentChainChangedRequestMessage) Command() Mess
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
} }
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns an instance of the message // NewNotifyVirtualSelectedParentChainChangedRequestMessage returns a instance of the message
func NewNotifyVirtualSelectedParentChainChangedRequestMessage( func NewNotifyVirtualSelectedParentChainChangedRequestMessage() *NotifyVirtualSelectedParentChainChangedRequestMessage {
includeAcceptedTransactionIDs bool) *NotifyVirtualSelectedParentChainChangedRequestMessage { return &NotifyVirtualSelectedParentChainChangedRequestMessage{}
return &NotifyVirtualSelectedParentChainChangedRequestMessage{
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
}
} }
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to // NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
@ -44,7 +39,6 @@ type VirtualSelectedParentChainChangedNotificationMessage struct {
baseMessage baseMessage
RemovedChainBlockHashes []string RemovedChainBlockHashes []string
AddedChainBlockHashes []string AddedChainBlockHashes []string
AcceptedTransactionIDs []*AcceptedTransactionIDs
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -54,11 +48,10 @@ func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() Messa
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message // NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes, func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes,
addedChainBlocks []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *VirtualSelectedParentChainChangedNotificationMessage { addedChainBlocks []string) *VirtualSelectedParentChainChangedNotificationMessage {
return &VirtualSelectedParentChainChangedNotificationMessage{ return &VirtualSelectedParentChainChangedNotificationMessage{
RemovedChainBlockHashes: removedChainBlockHashes, RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlockHashes: addedChainBlocks, AddedChainBlockHashes: addedChainBlocks,
AcceptedTransactionIDs: acceptedTransactionIDs,
} }
} }

View File

@ -4,8 +4,7 @@ package appmessage
// its respective RPC message // its respective RPC message
type SubmitBlockRequestMessage struct { type SubmitBlockRequestMessage struct {
baseMessage baseMessage
Block *RPCBlock Block *RPCBlock
AllowNonDAABlocks bool
} }
// Command returns the protocol command string for the message // Command returns the protocol command string for the message
@ -14,10 +13,9 @@ func (msg *SubmitBlockRequestMessage) Command() MessageCommand {
} }
// NewSubmitBlockRequestMessage returns a instance of the message // NewSubmitBlockRequestMessage returns a instance of the message
func NewSubmitBlockRequestMessage(block *RPCBlock, allowNonDAABlocks bool) *SubmitBlockRequestMessage { func NewSubmitBlockRequestMessage(block *RPCBlock) *SubmitBlockRequestMessage {
return &SubmitBlockRequestMessage{ return &SubmitBlockRequestMessage{
Block: block, Block: block,
AllowNonDAABlocks: allowNonDAABlocks,
} }
} }
@ -55,7 +53,7 @@ func (msg *SubmitBlockResponseMessage) Command() MessageCommand {
return CmdSubmitBlockResponseMessage return CmdSubmitBlockResponseMessage
} }
// NewSubmitBlockResponseMessage returns an instance of the message // NewSubmitBlockResponseMessage returns a instance of the message
func NewSubmitBlockResponseMessage() *SubmitBlockResponseMessage { func NewSubmitBlockResponseMessage() *SubmitBlockResponseMessage {
return &SubmitBlockResponseMessage{} return &SubmitBlockResponseMessage{}
} }
@ -72,34 +70,22 @@ type RPCBlock struct {
// used over RPC // used over RPC
type RPCBlockHeader struct { type RPCBlockHeader struct {
Version uint32 Version uint32
Parents []*RPCBlockLevelParents ParentHashes []string
HashMerkleRoot string HashMerkleRoot string
AcceptedIDMerkleRoot string AcceptedIDMerkleRoot string
UTXOCommitment string UTXOCommitment string
Timestamp int64 Timestamp int64
Bits uint32 Bits uint32
Nonce uint64 Nonce uint64
DAAScore uint64
BlueScore uint64
BlueWork string
PruningPoint string
}
// RPCBlockLevelParents holds parent hashes for one block level
type RPCBlockLevelParents struct {
ParentHashes []string
} }
// RPCBlockVerboseData holds verbose data about a block // RPCBlockVerboseData holds verbose data about a block
type RPCBlockVerboseData struct { type RPCBlockVerboseData struct {
Hash string Hash string
Difficulty float64 Difficulty float64
SelectedParentHash string SelectedParentHash string
TransactionIDs []string TransactionIDs []string
IsHeaderOnly bool IsHeaderOnly bool
BlueScore uint64 BlueScore uint64
ChildrenHashes []string ChildrenHashes []string
MergeSetBluesHashes []string
MergeSetRedsHashes []string
IsChainBlock bool
} }

View File

@ -52,7 +52,6 @@ type RPCTransaction struct {
SubnetworkID string SubnetworkID string
Gas uint64 Gas uint64
Payload string Payload string
Mass uint64
VerboseData *RPCTransactionVerboseData VerboseData *RPCTransactionVerboseData
} }

View File

@ -1,42 +0,0 @@
package appmessage
// SubmitTransactionReplacementRequestMessage is an appmessage corresponding to
// its respective RPC message
type SubmitTransactionReplacementRequestMessage struct {
baseMessage
Transaction *RPCTransaction
}
// Command returns the protocol command string for the message
func (msg *SubmitTransactionReplacementRequestMessage) Command() MessageCommand {
return CmdSubmitTransactionReplacementRequestMessage
}
// NewSubmitTransactionReplacementRequestMessage returns a instance of the message
func NewSubmitTransactionReplacementRequestMessage(transaction *RPCTransaction) *SubmitTransactionReplacementRequestMessage {
return &SubmitTransactionReplacementRequestMessage{
Transaction: transaction,
}
}
// SubmitTransactionReplacementResponseMessage is an appmessage corresponding to
// its respective RPC message
type SubmitTransactionReplacementResponseMessage struct {
baseMessage
TransactionID string
ReplacedTransaction *RPCTransaction
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *SubmitTransactionReplacementResponseMessage) Command() MessageCommand {
return CmdSubmitTransactionReplacementResponseMessage
}
// NewSubmitTransactionReplacementResponseMessage returns a instance of the message
func NewSubmitTransactionReplacementResponseMessage(transactionID string) *SubmitTransactionReplacementResponseMessage {
return &SubmitTransactionReplacementResponseMessage{
TransactionID: transactionID,
}
}

View File

@ -4,8 +4,6 @@ import (
"fmt" "fmt"
"sync/atomic" "sync/atomic"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool" "github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/app/protocol" "github.com/kaspanet/kaspad/app/protocol"
@ -69,7 +67,6 @@ func (a *ComponentManager) Stop() {
} }
a.protocolManager.Close() a.protocolManager.Close()
close(a.protocolManager.Context().Domain().ConsensusEventsChannel())
return return
} }
@ -105,7 +102,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
var utxoIndex *utxoindex.UTXOIndex var utxoIndex *utxoindex.UTXOIndex
if cfg.UTXOIndex { if cfg.UTXOIndex {
utxoIndex, err = utxoindex.New(domain, db) utxoIndex, err = utxoindex.New(domain.Consensus(), db)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -121,7 +118,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
if err != nil { if err != nil {
return nil, err return nil, err
} }
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, domain.ConsensusEventsChannel(), interrupt) rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, interrupt)
return &ComponentManager{ return &ComponentManager{
cfg: cfg, cfg: cfg,
@ -142,7 +139,6 @@ func setupRPC(
connectionManager *connmanager.ConnectionManager, connectionManager *connmanager.ConnectionManager,
addressManager *addressmanager.AddressManager, addressManager *addressmanager.AddressManager,
utxoIndex *utxoindex.UTXOIndex, utxoIndex *utxoindex.UTXOIndex,
consensusEventsChan chan externalapi.ConsensusEvent,
shutDownChan chan<- struct{}, shutDownChan chan<- struct{},
) *rpc.Manager { ) *rpc.Manager {
@ -154,10 +150,9 @@ func setupRPC(
connectionManager, connectionManager,
addressManager, addressManager,
utxoIndex, utxoIndex,
consensusEventsChan,
shutDownChan, shutDownChan,
) )
protocolManager.SetOnNewBlockTemplateHandler(rpcManager.NotifyNewBlockTemplate) protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride) protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
return rpcManager return rpcManager

View File

@ -1,8 +1,6 @@
package common package common
import ( import (
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -10,18 +8,7 @@ import (
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing // DefaultTimeout is the default duration to wait for enqueuing/dequeuing
// to/from routes. // to/from routes.
const DefaultTimeout = 120 * time.Second const DefaultTimeout = 30 * time.Second
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist. // ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists") var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")
type flowExecuteFunc func(peer *peerpkg.Peer)
// Flow is a a data structure that is used in order to associate a p2p flow to some route in a router.
type Flow struct {
Name string
ExecuteFunc flowExecuteFunc
}
// FlowInitializeFunc is a function that is used in order to initialize a flow
type FlowInitializeFunc func(route *routerpkg.Route, peer *peerpkg.Peer) error

View File

@ -11,52 +11,55 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
) )
// OnNewBlock updates the mempool after a new block arrival, and // OnNewBlock updates the mempool after a new block arrival, and
// relays newly unorphaned transactions and possibly rebroadcast // relays newly unorphaned transactions and possibly rebroadcast
// manually added transactions when not in IBD. // manually added transactions when not in IBD.
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock) error { func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
blockInsertionResult *externalapi.BlockInsertionResult) error {
hash := consensushashing.BlockHash(block) hash := consensushashing.BlockHash(block)
log.Tracef("OnNewBlock start for block %s", hash) log.Debugf("OnNewBlock start for block %s", hash)
defer log.Tracef("OnNewBlock end for block %s", hash) defer log.Debugf("OnNewBlock end for block %s", hash)
unorphanedBlocks, err := f.UnorphanBlocks(block) unorphaningResults, err := f.UnorphanBlocks(block)
if err != nil { if err != nil {
return err return err
} }
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphanedBlocks)) log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
newBlocks := []*externalapi.DomainBlock{block} newBlocks := []*externalapi.DomainBlock{block}
newBlocks = append(newBlocks, unorphanedBlocks...) newBlockInsertionResults := []*externalapi.BlockInsertionResult{blockInsertionResult}
for _, unorphaningResult := range unorphaningResults {
newBlocks = append(newBlocks, unorphaningResult.block)
newBlockInsertionResults = append(newBlockInsertionResults, unorphaningResult.blockInsertionResult)
}
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0) allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
for _, newBlock := range newBlocks { for i, newBlock := range newBlocks {
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash) log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions) acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
if err != nil { if err != nil {
return err return err
} }
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...) allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
if f.onBlockAddedToDAGHandler != nil {
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
blockInsertionResult = newBlockInsertionResults[i]
err := f.onBlockAddedToDAGHandler(newBlock, blockInsertionResult)
if err != nil {
return err
}
}
} }
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions) return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
} }
// OnNewBlockTemplate calls the handler function whenever a new block template is available for miners.
func (f *FlowContext) OnNewBlockTemplate() error {
// Clear current template cache. Note we call this even if the handler is nil, in order to keep the
// state consistent without dependency on external event registration
f.Domain().MiningManager().ClearBlockTemplate()
if f.onNewBlockTemplateHandler != nil {
return f.onNewBlockTemplateHandler()
}
return nil
}
// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set // OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set
// resets due to pruning point change via IBD. // resets due to pruning point change via IBD.
func (f *FlowContext) OnPruningPointUTXOSetOverride() error { func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
@ -97,7 +100,7 @@ func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
// SharedRequestedBlocks returns a *blockrelay.SharedRequestedBlocks for sharing // SharedRequestedBlocks returns a *blockrelay.SharedRequestedBlocks for sharing
// data about requested blocks between different peers. // data about requested blocks between different peers.
func (f *FlowContext) SharedRequestedBlocks() *SharedRequestedBlocks { func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks {
return f.sharedRequestedBlocks return f.sharedRequestedBlocks
} }
@ -107,18 +110,14 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
return protocolerrors.Errorf(false, "cannot add header only block") return protocolerrors.Errorf(false, "cannot add header only block")
} }
err := f.Domain().Consensus().ValidateAndInsertBlock(block, true) blockInsertionResult, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
if err != nil { if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) { if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err) log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
} }
return err return err
} }
err = f.OnNewBlockTemplate() err = f.OnNewBlock(block, blockInsertionResult)
if err != nil {
return err
}
err = f.OnNewBlock(block)
if err != nil { if err != nil {
return err return err
} }
@ -143,7 +142,7 @@ func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
return false return false
} }
f.ibdPeer = ibdPeer f.ibdPeer = ibdPeer
log.Infof("IBD started with peer %s", ibdPeer) log.Infof("IBD started")
return true return true
} }

View File

@ -2,7 +2,6 @@ package flowcontext
import ( import (
"errors" "errors"
"strings"
"sync/atomic" "sync/atomic"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
@ -10,11 +9,6 @@ import (
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
) )
var (
// ErrPingTimeout signifies that a ping operation timed out.
ErrPingTimeout = protocolerrors.New(false, "timeout expired on ping")
)
// HandleError handles an error from a flow, // HandleError handles an error from a flow,
// It sends the error to errChan if isStopping == 0 and increments isStopping // It sends the error to errChan if isStopping == 0 and increments isStopping
// //
@ -27,15 +21,8 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) { if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
panic(err) panic(err)
} }
if errors.Is(err, ErrPingTimeout) {
// Avoid printing the call stack on ping timeouts, since users get panicked and this case is not interesting log.Errorf("error from %s: %s", flowName, err)
log.Errorf("error from %s: %s", flowName, err)
} else {
// Explain to the user that this is not a panic, but only a protocol error with a specific peer
logFrame := strings.Repeat("=", 52)
log.Errorf("Non-critical peer protocol error from %s, printing the full stack for debug purposes: \n%s\n%+v \n%s",
flowName, logFrame, err, logFrame)
}
} }
if atomic.AddUint32(isStopping, 1) == 1 { if atomic.AddUint32(isStopping, 1) == 1 {

View File

@ -10,6 +10,8 @@ import (
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer" peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/infrastructure/config" "github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager" "github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
@ -18,8 +20,9 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
) )
// OnNewBlockTemplateHandler is a handler function that's triggered when a new block template is available // OnBlockAddedToDAGHandler is a handler function that's triggered
type OnNewBlockTemplateHandler func() error // when a block is added to the DAG
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set // OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
// resets due to pruning point change via IBD. // resets due to pruning point change via IBD.
@ -40,14 +43,14 @@ type FlowContext struct {
timeStarted int64 timeStarted int64
onNewBlockTemplateHandler OnNewBlockTemplateHandler onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
lastRebroadcastTime time.Time lastRebroadcastTime time.Time
sharedRequestedTransactions *SharedRequestedTransactions sharedRequestedTransactions *transactionrelay.SharedRequestedTransactions
sharedRequestedBlocks *SharedRequestedBlocks sharedRequestedBlocks *blockrelay.SharedRequestedBlocks
ibdPeer *peerpkg.Peer ibdPeer *peerpkg.Peer
ibdPeerMutex sync.RWMutex ibdPeerMutex sync.RWMutex
@ -75,8 +78,8 @@ func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanage
domain: domain, domain: domain,
addressManager: addressManager, addressManager: addressManager,
connectionManager: connectionManager, connectionManager: connectionManager,
sharedRequestedTransactions: NewSharedRequestedTransactions(), sharedRequestedTransactions: transactionrelay.NewSharedRequestedTransactions(),
sharedRequestedBlocks: NewSharedRequestedBlocks(), sharedRequestedBlocks: blockrelay.NewSharedRequestedBlocks(),
peers: make(map[id.ID]*peerpkg.Peer), peers: make(map[id.ID]*peerpkg.Peer),
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock), orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
timeStarted: mstime.Now().UnixMilliseconds(), timeStarted: mstime.Now().UnixMilliseconds(),
@ -97,14 +100,9 @@ func (f *FlowContext) ShutdownChan() <-chan struct{} {
return f.shutdownChan return f.shutdownChan
} }
// IsNearlySynced returns whether current consensus is considered synced or close to being synced. // SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
func (f *FlowContext) IsNearlySynced() (bool, error) { func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
return f.Domain().Consensus().IsNearlySynced() f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
}
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplateHandler handler
func (f *FlowContext) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler OnNewBlockTemplateHandler) {
f.onNewBlockTemplateHandler = onNewBlockTemplateHandler
} }
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler // SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler

View File

@ -72,10 +72,3 @@ func (f *FlowContext) Peers() []*peerpkg.Peer {
} }
return peers return peers
} }
// HasPeers returns whether there are currently active peers
func (f *FlowContext) HasPeers() bool {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
return len(f.peers) > 0
}

View File

@ -15,6 +15,12 @@ import (
// on: 2^orphanResolutionRange * PHANTOM K. // on: 2^orphanResolutionRange * PHANTOM K.
const maxOrphans = 600 const maxOrphans = 600
// UnorphaningResult is the result of unorphaning a block
type UnorphaningResult struct {
block *externalapi.DomainBlock
blockInsertionResult *externalapi.BlockInsertionResult
}
// AddOrphan adds the block to the orphan set // AddOrphan adds the block to the orphan set
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) { func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
f.orphansMutex.Lock() f.orphansMutex.Lock()
@ -51,7 +57,7 @@ func (f *FlowContext) IsOrphan(blockHash *externalapi.DomainHash) bool {
} }
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore. // UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*externalapi.DomainBlock, error) { func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*UnorphaningResult, error) {
f.orphansMutex.Lock() f.orphansMutex.Lock()
defer f.orphansMutex.Unlock() defer f.orphansMutex.Unlock()
@ -60,17 +66,17 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*ext
rootBlockHash := consensushashing.BlockHash(rootBlock) rootBlockHash := consensushashing.BlockHash(rootBlock)
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{}) processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
var unorphanedBlocks []*externalapi.DomainBlock var unorphaningResults []*UnorphaningResult
for len(processQueue) > 0 { for len(processQueue) > 0 {
var orphanHash externalapi.DomainHash var orphanHash externalapi.DomainHash
orphanHash, processQueue = processQueue[0], processQueue[1:] orphanHash, processQueue = processQueue[0], processQueue[1:]
orphanBlock := f.orphans[orphanHash] orphanBlock := f.orphans[orphanHash]
log.Debugf("Considering to unorphan block %s with parents %s", log.Debugf("Considering to unorphan block %s with parents %s",
orphanHash, orphanBlock.Header.DirectParents()) orphanHash, orphanBlock.Header.ParentHashes())
canBeUnorphaned := true canBeUnorphaned := true
for _, orphanBlockParentHash := range orphanBlock.Header.DirectParents() { for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
orphanBlockParentInfo, err := f.domain.Consensus().GetBlockInfo(orphanBlockParentHash) orphanBlockParentInfo, err := f.domain.Consensus().GetBlockInfo(orphanBlockParentHash)
if err != nil { if err != nil {
return nil, err return nil, err
@ -84,18 +90,21 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*ext
} }
} }
if canBeUnorphaned { if canBeUnorphaned {
unorphaningSucceeded, err := f.unorphanBlock(orphanHash) blockInsertionResult, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if unorphaningSucceeded { if unorphaningSucceeded {
unorphanedBlocks = append(unorphanedBlocks, orphanBlock) unorphaningResults = append(unorphaningResults, &UnorphaningResult{
block: orphanBlock,
blockInsertionResult: blockInsertionResult,
})
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue) processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
} }
} }
} }
return unorphanedBlocks, nil return unorphaningResults, nil
} }
// addChildOrphansToProcessQueue finds all child orphans of `blockHash` // addChildOrphansToProcessQueue finds all child orphans of `blockHash`
@ -124,7 +133,7 @@ func (f *FlowContext) addChildOrphansToProcessQueue(blockHash *externalapi.Domai
func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash) []externalapi.DomainHash { func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash) []externalapi.DomainHash {
var childOrphans []externalapi.DomainHash var childOrphans []externalapi.DomainHash
for orphanHash, orphanBlock := range f.orphans { for orphanHash, orphanBlock := range f.orphans {
for _, orphanBlockParentHash := range orphanBlock.Header.DirectParents() { for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
if orphanBlockParentHash.Equal(blockHash) { if orphanBlockParentHash.Equal(blockHash) {
childOrphans = append(childOrphans, orphanHash) childOrphans = append(childOrphans, orphanHash)
break break
@ -134,24 +143,24 @@ func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash)
return childOrphans return childOrphans
} }
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (bool, error) { func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.BlockInsertionResult, bool, error) {
orphanBlock, ok := f.orphans[orphanHash] orphanBlock, ok := f.orphans[orphanHash]
if !ok { if !ok {
return false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash) return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
} }
delete(f.orphans, orphanHash) delete(f.orphans, orphanHash)
err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true) blockInsertionResult, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
if err != nil { if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) { if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err) log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
return false, nil return nil, false, nil
} }
return false, err return nil, false, err
} }
log.Infof("Unorphaned block %s", orphanHash) log.Infof("Unorphaned block %s", orphanHash)
return true, nil return blockInsertionResult, true, nil
} }
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan // GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
@ -192,7 +201,7 @@ func (f *FlowContext) GetOrphanRoots(orphan *externalapi.DomainHash) ([]*externa
continue continue
} }
for _, parent := range block.Header.DirectParents() { for _, parent := range block.Header.ParentHashes() {
if !addedToQueueSet.Contains(parent) { if !addedToQueueSet.Contains(parent) {
queue = append(queue, parent) queue = append(queue, parent)
addedToQueueSet.Add(parent) addedToQueueSet.Add(parent)

View File

@ -0,0 +1,43 @@
package flowcontext
import "github.com/kaspanet/kaspad/util/mstime"
const (
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
)
// ShouldMine returns whether it's ok to use block template from this node
// for mining purposes.
func (f *FlowContext) ShouldMine() (bool, error) {
peers := f.Peers()
if len(peers) == 0 {
log.Debugf("The node is not connected, so ShouldMine returns false")
return false, nil
}
if f.IsIBDRunning() {
log.Debugf("IBD is running, so ShouldMine returns false")
return false, nil
}
virtualSelectedParent, err := f.domain.Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
if err != nil {
return false, err
}
now := mstime.Now().UnixMilliseconds()
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
virtualSelectedParentHeader.TimeInMilliseconds())
return true, nil
}
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
virtualSelectedParentHeader.TimeInMilliseconds())
return false, nil
}

View File

@ -4,6 +4,7 @@ import (
"time" "time"
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
) )
@ -29,7 +30,7 @@ func (f *FlowContext) shouldRebroadcastTransactions() bool {
// SharedRequestedTransactions returns a *transactionrelay.SharedRequestedTransactions for sharing // SharedRequestedTransactions returns a *transactionrelay.SharedRequestedTransactions for sharing
// data about requested transactions between different peers. // data about requested transactions between different peers.
func (f *FlowContext) SharedRequestedTransactions() *SharedRequestedTransactions { func (f *FlowContext) SharedRequestedTransactions() *transactionrelay.SharedRequestedTransactions {
return f.sharedRequestedTransactions return f.sharedRequestedTransactions
} }

View File

@ -13,21 +13,15 @@ func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.Domai
} }
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) { func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
for { message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) if err != nil {
if err != nil { return nil, err
return nil, err
}
switch message := message.(type) {
case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
case *appmessage.MsgBlockLocator:
return message.BlockLocatorHashes, nil
default:
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
}
} }
msgBlockLocator, ok := message.(*appmessage.MsgBlockLocator)
if !ok {
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
}
return msgBlockLocator.BlockLocatorHashes, nil
} }

View File

@ -0,0 +1,42 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// BlockBlueWorkRequestsContext is the interface for the context needed for the HandleBlockBlueWorkRequests flow.
type BlockBlueWorkRequestsContext interface {
Domain() domain.Domain
}
// HandleBlockBlueWorkRequests listens to appmessage.MsgRequestBlockBlueWork messages and sends
// their corresponding blue work to the requesting peer.
func HandleBlockBlueWorkRequests(context BlockBlueWorkRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
msgRequestBlockBlueWork := message.(*appmessage.MsgRequestBlockBlueWork)
log.Debugf("Got request for block %s blue work from %s", msgRequestBlockBlueWork.Hash, peer)
blockInfo, err := context.Domain().Consensus().GetBlockInfo(msgRequestBlockBlueWork.Hash)
if err != nil {
return err
}
if !blockInfo.Exists {
return protocolerrors.Errorf(true, "block %s not found", msgRequestBlockBlueWork.Hash)
}
err = outgoingRoute.Enqueue(appmessage.NewBlockBlueWork(blockInfo.BlueWork))
if err != nil {
return err
}
log.Debugf("Sent blue work for block %s to %s", msgRequestBlockBlueWork.Hash, peer)
}
}

View File

@ -5,6 +5,7 @@ import (
"github.com/kaspanet/kaspad/app/protocol/peer" "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
) )
@ -33,7 +34,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
if err != nil { if err != nil {
return err return err
} }
if !blockInfo.HasHeader() { if !blockInfo.Exists {
return protocolerrors.Errorf(true, "received IBDBlockLocator "+ return protocolerrors.Errorf(true, "received IBDBlockLocator "+
"with an unknown targetHash %s", targetHash) "with an unknown targetHash %s", targetHash)
} }
@ -46,7 +47,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
} }
// The IBD block locator is checking only existing blocks with bodies. // The IBD block locator is checking only existing blocks with bodies.
if !blockInfo.HasBody() { if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
continue continue
} }

View File

@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -27,15 +28,18 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes)) log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
for i, hash := range msgRequestIBDBlocks.Hashes { for i, hash := range msgRequestIBDBlocks.Hashes {
// Fetch the block from the database. // Fetch the block from the database.
block, found, err := context.Domain().Consensus().GetBlock(hash) blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil { if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash) return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
} }
if !found {
return protocolerrors.Errorf(false, "IBD block %s not found", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed // TODO (Partial nodes): Convert block to partial block if needed
blockMessage := appmessage.DomainBlockToMsgBlock(block) blockMessage := appmessage.DomainBlockToMsgBlock(block)

View File

@ -0,0 +1,46 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
type PruningPointAndItsAnticoneRequestsContext interface {
Domain() domain.Domain
}
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
// the pruning point and its anticone to the requesting peer.
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
log.Debugf("Got request for pruning point and its anticone from %s", peer)
blocks, err := context.Domain().Consensus().PruningPointAndItsAnticoneWithTrustedData()
if err != nil {
return err
}
for _, block := range blocks {
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(block))
if err != nil {
return err
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
if err != nil {
return err
}
log.Debugf("Sent pruning point and its anticone to %s", peer)
}
}

View File

@ -5,6 +5,7 @@ import (
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer" peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -28,15 +29,18 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes) log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
for _, hash := range getRelayBlocksMessage.Hashes { for _, hash := range getRelayBlocksMessage.Hashes {
// Fetch the block from the database. // Fetch the block from the database.
block, found, err := context.Domain().Consensus().GetBlock(hash) blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil { if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash) return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
} }
if !found {
return protocolerrors.Errorf(false, "Relay block %s not found", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed // TODO (Partial nodes): Convert block to partial block if needed
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block)) err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))

View File

@ -3,15 +3,12 @@ package blockrelay
import ( import (
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common" "github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer" peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors" "github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/infrastructure/config" "github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -26,29 +23,24 @@ var orphanResolutionRange uint32 = 5
type RelayInvsContext interface { type RelayInvsContext interface {
Domain() domain.Domain Domain() domain.Domain
Config() *config.Config Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock) error OnNewBlock(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
OnNewBlockTemplate() error
OnPruningPointUTXOSetOverride() error OnPruningPointUTXOSetOverride() error
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks SharedRequestedBlocks() *SharedRequestedBlocks
Broadcast(message appmessage.Message) error Broadcast(message appmessage.Message) error
AddOrphan(orphanBlock *externalapi.DomainBlock) AddOrphan(orphanBlock *externalapi.DomainBlock)
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error) GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
IsOrphan(blockHash *externalapi.DomainHash) bool IsOrphan(blockHash *externalapi.DomainHash) bool
IsIBDRunning() bool IsIBDRunning() bool
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
UnsetIBDRunning()
IsRecoverableError(err error) bool IsRecoverableError(err error) bool
IsNearlySynced() (bool, error)
}
type invRelayBlock struct {
Hash *externalapi.DomainHash
IsOrphanRoot bool
} }
type handleRelayInvsFlow struct { type handleRelayInvsFlow struct {
RelayInvsContext RelayInvsContext
incomingRoute, outgoingRoute *router.Route incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer peer *peerpkg.Peer
invsQueue []invRelayBlock invsQueue []*appmessage.MsgInvRelayBlock
} }
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they // HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
@ -61,12 +53,9 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
incomingRoute: incomingRoute, incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute, outgoingRoute: outgoingRoute,
peer: peer, peer: peer,
invsQueue: make([]invRelayBlock, 0), invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
} }
err := flow.start() return flow.start()
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
close(peer.IBDRequestChannel())
return err
} }
func (flow *handleRelayInvsFlow) start() error { func (flow *handleRelayInvsFlow) start() error {
@ -92,18 +81,7 @@ func (flow *handleRelayInvsFlow) start() error {
continue continue
} }
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if flow.IsOrphan(inv.Hash) { if flow.IsOrphan(inv.Hash) {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent {
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", inv.Hash)
continue
}
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash) log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
err := flow.AddOrphanRootsToQueue(inv.Hash) err := flow.AddOrphanRootsToQueue(inv.Hash)
if err != nil { if err != nil {
@ -112,16 +90,10 @@ func (flow *handleRelayInvsFlow) start() error {
continue continue
} }
// Block relay is disabled if the node is already during IBD AND considered out of sync // Block relay is disabled during IBD
if flow.IsIBDRunning() { if flow.IsIBDRunning() {
isNearlySynced, err := flow.IsNearlySynced() log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
if err != nil { continue
return err
}
if !isNearlySynced {
log.Debugf("Got block %s while in IBD and the node is out of sync. Continuing...", inv.Hash)
continue
}
} }
log.Debugf("Requesting block %s", inv.Hash) log.Debugf("Requesting block %s", inv.Hash)
@ -139,41 +111,8 @@ func (flow *handleRelayInvsFlow) start() error {
return err return err
} }
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) {
log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block))
continue
}
// Note we do not apply the heuristic below if inv was queued as an orphan root, since
// that means the process started by a proper and relevant relay block
if !inv.IsOrphanRoot {
// Check bounded merge depth to avoid requesting irrelevant data which cannot be merged under virtual
virtualMergeDepthRoot, err := flow.Domain().Consensus().VirtualMergeDepthRoot()
if err != nil {
return err
}
if !virtualMergeDepthRoot.Equal(model.VirtualGenesisBlockHash) {
mergeDepthRootHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualMergeDepthRoot)
if err != nil {
return err
}
// Since `BlueWork` respects topology, this condition means that the relay
// block is not in the future of virtual's merge depth root, and thus cannot be merged unless
// other valid blocks Kosherize it, in which case it will be obtained once the merger is relayed
if block.Header.BlueWork().Cmp(mergeDepthRootHeader.BlueWork()) <= 0 {
log.Debugf("Block %s has lower blue work than virtual's merge root %s (%d <= %d), hence we are skipping it",
inv.Hash, virtualMergeDepthRoot, block.Header.BlueWork(), mergeDepthRootHeader.BlueWork())
continue
}
}
}
log.Debugf("Processing block %s", inv.Hash) log.Debugf("Processing block %s", inv.Hash)
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo() missingParents, blockInsertionResult, err := flow.processBlock(block)
if err != nil {
return err
}
missingParents, err := flow.processBlock(block)
if err != nil { if err != nil {
if errors.Is(err, ruleerrors.ErrPrunedBlock) { if errors.Is(err, ruleerrors.ErrPrunedBlock) {
log.Infof("Ignoring pruned block %s", inv.Hash) log.Infof("Ignoring pruned block %s", inv.Hash)
@ -195,48 +134,13 @@ func (flow *handleRelayInvsFlow) start() error {
continue continue
} }
oldVirtualParents := hashset.New() log.Debugf("Relaying block %s", inv.Hash)
for _, parent := range oldVirtualInfo.ParentHashes { err = flow.relayBlock(block)
oldVirtualParents.Add(parent)
}
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
if err != nil { if err != nil {
return err return err
} }
virtualHasNewParents := false
for _, parent := range newVirtualInfo.ParentHashes {
if oldVirtualParents.Contains(parent) {
continue
}
virtualHasNewParents = true
block, found, err := flow.Domain().Consensus().GetBlock(parent)
if err != nil {
return err
}
if !found {
return protocolerrors.Errorf(false, "Virtual parent %s not found", parent)
}
blockHash := consensushashing.BlockHash(block)
log.Debugf("Relaying block %s", blockHash)
err = flow.relayBlock(block)
if err != nil {
return err
}
}
if virtualHasNewParents {
log.Debugf("Virtual %d has new parents, raising new block template event", newVirtualInfo.DAAScore)
err = flow.OnNewBlockTemplate()
if err != nil {
return err
}
}
log.Infof("Accepted block %s via relay", inv.Hash) log.Infof("Accepted block %s via relay", inv.Hash)
err = flow.OnNewBlock(block) err = flow.OnNewBlock(block, blockInsertionResult)
if err != nil { if err != nil {
return err return err
} }
@ -252,35 +156,35 @@ func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.Domai
return nil return nil
} }
func (flow *handleRelayInvsFlow) readInv() (invRelayBlock, error) { func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
if len(flow.invsQueue) > 0 { if len(flow.invsQueue) > 0 {
var inv invRelayBlock var inv *appmessage.MsgInvRelayBlock
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:] inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
return inv, nil return inv, nil
} }
msg, err := flow.incomingRoute.Dequeue() msg, err := flow.incomingRoute.Dequeue()
if err != nil { if err != nil {
return invRelayBlock{}, err return nil, err
} }
msgInv, ok := msg.(*appmessage.MsgInvRelayBlock) inv, ok := msg.(*appmessage.MsgInvRelayBlock)
if !ok { if !ok {
return invRelayBlock{}, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+ return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
"expecting an inv message", msg.Command()) "expecting an inv message", msg.Command())
} }
return invRelayBlock{Hash: msgInv.Hash, IsOrphanRoot: false}, nil return inv, nil
} }
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) { func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash) exists := flow.SharedRequestedBlocks().addIfNotExists(requestHash)
if exists { if exists {
return nil, true, nil return nil, true, nil
} }
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is // In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
// clean from any pending blocks. // clean from any pending blocks.
defer flow.SharedRequestedBlocks().Remove(requestHash) defer flow.SharedRequestedBlocks().remove(requestHash)
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash}) getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg) err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
@ -314,7 +218,7 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
switch message := message.(type) { switch message := message.(type) {
case *appmessage.MsgInvRelayBlock: case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false}) flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlock: case *appmessage.MsgBlock:
return message, nil return message, nil
default: default:
@ -323,25 +227,22 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
} }
} }
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, error) { func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.BlockInsertionResult, error) {
blockHash := consensushashing.BlockHash(block) blockHash := consensushashing.BlockHash(block)
err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true) blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
if err != nil { if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) { if !errors.As(err, &ruleerrors.RuleError{}) {
return nil, errors.Wrapf(err, "failed to process block %s", blockHash) return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
} }
missingParentsError := &ruleerrors.ErrMissingParents{} missingParentsError := &ruleerrors.ErrMissingParents{}
if errors.As(err, missingParentsError) { if errors.As(err, missingParentsError) {
return missingParentsError.MissingParentHashes, nil return missingParentsError.MissingParentHashes, nil, nil
} }
// A duplicate block should not appear to the user as a warning and is already reported in the calling function log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) { return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
}
return nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
} }
return nil, nil return nil, blockInsertionResult, nil
} }
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error { func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
@ -364,19 +265,6 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) e
return err return err
} }
if isBlockInOrphanResolutionRange { if isBlockInOrphanResolutionRange {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if isGenesisVirtualSelectedParent {
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", blockHash)
return nil
}
}
log.Debugf("Block %s is within orphan resolution range. "+ log.Debugf("Block %s is within orphan resolution range. "+
"Adding it to the orphan set", blockHash) "Adding it to the orphan set", blockHash)
flow.AddOrphan(block) flow.AddOrphan(block)
@ -387,28 +275,7 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) e
// Start IBD unless we already are in IBD // Start IBD unless we already are in IBD
log.Debugf("Block %s is out of orphan resolution range. "+ log.Debugf("Block %s is out of orphan resolution range. "+
"Attempting to start IBD against it.", blockHash) "Attempting to start IBD against it.", blockHash)
return flow.runIBDIfNotRunning(blockHash)
// Send the block to IBD flow via the IBDRequestChannel.
// Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it
select {
case flow.peer.IBDRequestChannel() <- block:
default:
}
return nil
}
func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
}
func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool {
parents := block.Header.DirectParents()
return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash)
} }
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be // isBlockInOrphanResolutionRange finds out whether the given blockHash should be
@ -449,16 +316,12 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
"probably happened because it was randomly evicted immediately after it was added.", orphan) "probably happened because it was randomly evicted immediately after it was added.", orphan)
} }
if len(orphanRoots) == 0 {
// In some rare cases we get here when there are no orphan roots already
return nil
}
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots)) log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
invMessages := make([]invRelayBlock, len(orphanRoots)) invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
for i, root := range orphanRoots { for i, root := range orphanRoots {
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root) log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
invMessages[i] = invRelayBlock{Hash: root, IsOrphanRoot: true} invMessages[i] = appmessage.NewMsgInvBlock(root)
} }
flow.invsQueue = append(invMessages, flow.invsQueue...) flow.invsQueue = append(invMessages, flow.invsQueue...)

View File

@ -10,9 +10,7 @@ import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
) )
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p const ibdBatchSize = router.DefaultMaxMessages
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
const ibdBatchSize = 99
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow. // RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
type RequestHeadersContext interface { type RequestHeadersContext interface {
@ -44,34 +42,7 @@ func (flow *handleRequestHeadersFlow) start() error {
if err != nil { if err != nil {
return err return err
} }
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash) log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
consensus := flow.Domain().Consensus()
lowHashInfo, err := consensus.GetBlockInfo(lowHash)
if err != nil {
return err
}
if !lowHashInfo.HasHeader() {
return protocolerrors.Errorf(true, "Block %s does not exist", lowHash)
}
highHashInfo, err := consensus.GetBlockInfo(highHash)
if err != nil {
return err
}
if !highHashInfo.HasHeader() {
return protocolerrors.Errorf(true, "Block %s does not exist", highHash)
}
isLowSelectedAncestorOfHigh, err := consensus.IsInSelectedParentChainOf(lowHash, highHash)
if err != nil {
return err
}
if !isLowSelectedAncestorOfHigh {
return protocolerrors.Errorf(true, "Expected %s to be on the selected chain of %s",
lowHash, highHash)
}
for !lowHash.Equal(highHash) { for !lowHash.Equal(highHash) {
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer) log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
@ -80,7 +51,7 @@ func (flow *handleRequestHeadersFlow) start() error {
// in order to avoid locking the consensus for too long // in order to avoid locking the consensus for too long
// maxBlocks MUST be >= MergeSetSizeLimit + 1 // maxBlocks MUST be >= MergeSetSizeLimit + 1
const maxBlocks = 1 << 10 const maxBlocks = 1 << 10
blockHashes, _, err := consensus.GetHashesBetween(lowHash, highHash, maxBlocks) blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
if err != nil { if err != nil {
return err return err
} }
@ -88,7 +59,7 @@ func (flow *handleRequestHeadersFlow) start() error {
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes)) blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
for i, blockHash := range blockHashes { for i, blockHash := range blockHashes {
blockHeader, err := consensus.GetBlockHeader(blockHash) blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
if err != nil { if err != nil {
return err return err
} }

View File

@ -70,8 +70,7 @@ func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXO
} }
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet) msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
if !ok { if !ok {
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command()) "expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
} }
return msgRequestPruningPointUTXOSet, nil return msgRequestPruningPointUTXOSet, nil
@ -103,17 +102,14 @@ func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
return err return err
} }
finished := len(pruningPointUTXOs) < step if len(pruningPointUTXOs) < step {
if finished && chunksSent%ibdBatchSize != 0 {
log.Debugf("Finished sending UTXOs for pruning block %s", log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash) msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks()) return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
} }
if len(pruningPointUTXOs) > 0 { fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
}
chunksSent++ chunksSent++
// Wait for the peer to request more chunks every `ibdBatchSize` chunks // Wait for the peer to request more chunks every `ibdBatchSize` chunks
@ -124,17 +120,9 @@ func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
} }
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk) _, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
if !ok { if !ok {
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages return protocolerrors.Errorf(true, "received unexpected message type. "+
return protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command()) "expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
} }
if finished {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
} }
} }
} }

View File

@ -0,0 +1,457 @@
package blockrelay
import (
"time"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/pkg/errors"
)
func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.DomainHash) error {
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
if !wasIBDNotRunning {
log.Debugf("IBD is already running")
return nil
}
isFinishedSuccessfully := false
defer func() {
flow.UnsetIBDRunning()
flow.logIBDFinished(isFinishedSuccessfully)
}()
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
log.Debugf("Syncing blocks up to %s", highHash)
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
if err != nil {
return err
}
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(highHash, highestSharedBlockFound)
if err != nil {
return err
}
if !shouldSync {
return nil
}
if shouldDownloadHeadersProof {
log.Infof("Starting IBD with headers proof")
err := flow.ibdWithHeadersProof(highHash)
if err != nil {
return err
}
} else {
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash)
if err != nil {
return err
}
}
err = flow.syncMissingBlockBodies(highHash)
if err != nil {
return err
}
log.Debugf("Finished syncing blocks up to %s", highHash)
isFinishedSuccessfully = true
return nil
}
func (flow *handleRelayInvsFlow) logIBDFinished(isFinishedSuccessfully bool) {
successString := "successfully"
if !isFinishedSuccessfully {
successString = "(interrupted)"
}
log.Infof("IBD finished %s", successString)
}
// findHighestSharedBlock attempts to find the highest shared block between the peer
// and this node. This method may fail because the peer and us have conflicting pruning
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
if err != nil {
return nil, false, err
}
for {
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
if err != nil {
return nil, false, err
}
if !highestHashFound {
return nil, false, nil
}
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
if err != nil {
return nil, false, err
}
if highestHashIndex == 0 ||
// If the block locator contains only two adjacent chain blocks, the
// syncer will always find the same highest chain block, so to avoid
// an endless loop, we explicitly stop the loop in such situation.
(len(blockLocator) == 2 && highestHashIndex == 1) {
return highestHash, true, nil
}
locatorHashAboveHighestHash := highestHash
if highestHashIndex > 0 {
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
}
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
if err != nil {
return nil, false, err
}
}
}
func (flow *handleRelayInvsFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
if err != nil {
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
return nil, err
}
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
"restarting with full block locator")
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
if err != nil {
return nil, err
}
}
return blockLocator, nil
}
func (flow *handleRelayInvsFlow) findHighestHashIndex(
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
highestHashIndex := 0
highestHashIndexFound := false
for i, blockLocatorHash := range blockLocator {
if highestHash.Equal(blockLocatorHash) {
highestHashIndex = i
highestHashIndexFound = true
break
}
}
if !highestHashIndexFound {
return 0, protocolerrors.Errorf(true, "highest hash %s "+
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
}
log.Debugf("The index of the highest hash in the original "+
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
return highestHashIndex, nil
}
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
// blockLocator. This method may fail because the peer and us have conflicting pruning
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
func (flow *handleRelayInvsFlow) fetchHighestHash(
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
if err != nil {
return nil, false, err
}
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch message := message.(type) {
case *appmessage.MsgIBDBlockLocatorHighestHash:
highestHash := message.HighestHash
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
return highestHash, true, nil
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
log.Debugf("Peer %s does not know any block within our blockLocator. "+
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
return nil, false, nil
default:
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
}
}
func (flow *handleRelayInvsFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
highHash *externalapi.DomainHash) error {
log.Infof("Downloading headers from %s", flow.peer)
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
if err != nil {
return err
}
// Keep a short queue of BlockHeadersMessages so that there's
// never a moment when the node is not validating and inserting
// headers
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
errChan := make(chan error)
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
for {
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
if err != nil {
errChan <- err
return
}
if doneIBD {
close(blockHeadersMessageChan)
return
}
blockHeadersMessageChan <- blockHeadersMessage
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
if err != nil {
errChan <- err
return
}
}
})
for {
select {
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
if !ok {
// If the highHash has not been received, the peer is misbehaving
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
if err != nil {
return err
}
if !highHashBlockInfo.Exists {
return protocolerrors.Errorf(true, "did not receive "+
"highHash block %s from peer %s during block download", highHash, flow.peer)
}
return nil
}
for _, block := range ibdBlocksMessage.BlockHeaders {
err = flow.processHeader(consensus, block)
if err != nil {
return err
}
}
case err := <-errChan:
return err
}
}
}
func (flow *handleRelayInvsFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
peerSelectedTipHash *externalapi.DomainHash) error {
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
}
func (flow *handleRelayInvsFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch message := message.(type) {
case *appmessage.BlockHeadersMessage:
return message, false, nil
case *appmessage.MsgDoneHeaders:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
appmessage.CmdBlockHeaders,
appmessage.CmdDoneHeaders,
message.Command())
}
}
func (flow *handleRelayInvsFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
block := &externalapi.DomainBlock{
Header: header,
Transactions: nil,
}
blockHash := consensushashing.BlockHash(block)
blockInfo, err := consensus.GetBlockInfo(blockHash)
if err != nil {
return err
}
if blockInfo.Exists {
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
return nil
}
_, err = consensus.ValidateAndInsertBlock(block, false)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
}
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
} else {
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
}
}
return nil
}
func (flow *handleRelayInvsFlow) receiveAndInsertPruningPointUTXOSet(
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
defer onEnd()
receivedChunkCount := 0
receivedUTXOCount := 0
for {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return false, err
}
switch message := message.(type) {
case *appmessage.MsgPruningPointUTXOSetChunk:
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
domainOutpointAndUTXOEntryPairs :=
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
if err != nil {
return false, err
}
receivedChunkCount++
if receivedChunkCount%ibdBatchSize == 0 {
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
receivedChunkCount, receivedUTXOCount)
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
if err != nil {
return false, err
}
}
case *appmessage.MsgDonePruningPointUTXOSetChunks:
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
return true, nil
case *appmessage.MsgUnexpectedPruningPoint:
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
return false, nil
default:
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
)
}
}
}
func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
if err != nil {
return err
}
if len(hashes) == 0 {
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
// In these cases - GetMissingBlockBodyHashes would return an empty array.
log.Debugf("No missing block body hashes found.")
return nil
}
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
var hashesToRequest []*externalapi.DomainHash
if offset+ibdBatchSize < len(hashes) {
hashesToRequest = hashes[offset : offset+ibdBatchSize]
} else {
hashesToRequest = hashes[offset:]
}
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
if err != nil {
return err
}
for _, expectedHash := range hashesToRequest {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return err
}
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
}
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
blockHash := consensushashing.BlockHash(block)
if !expectedHash.Equal(blockHash) {
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
}
err = flow.banIfBlockIsHeaderOnly(block)
if err != nil {
return err
}
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
if err != nil {
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
continue
}
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
}
err = flow.OnNewBlock(block, blockInsertionResult)
if err != nil {
return err
}
}
}
return flow.Domain().Consensus().ResolveVirtual()
}
// dequeueIncomingMessageAndSkipInvs is a convenience method to be used during
// IBD. Inv messages are expected to arrive at any given moment, but should be
// ignored while we're in IBD
func (flow *handleRelayInvsFlow) dequeueIncomingMessageAndSkipInvs(timeout time.Duration) (appmessage.Message, error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
if err != nil {
return nil, err
}
if _, ok := message.(*appmessage.MsgInvRelayBlock); !ok {
return message, nil
}
}
}

View File

@ -0,0 +1,264 @@
package blockrelay
import (
"fmt"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/pkg/errors"
)
func (flow *handleRelayInvsFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash) error {
err := flow.Domain().InitStagingConsensus()
if err != nil {
return err
}
err = flow.downloadHeadersAndPruningUTXOSet(flow.Domain().StagingConsensus(), highHash)
if err != nil {
if !flow.IsRecoverableError(err) {
return err
}
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
if deleteStagingConsensusErr != nil {
return deleteStagingConsensusErr
}
return err
}
err = flow.Domain().CommitStagingConsensus()
if err != nil {
return err
}
return nil
}
func (flow *handleRelayInvsFlow) shouldSyncAndShouldDownloadHeadersProof(highHash *externalapi.DomainHash,
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
if !highestSharedBlockFound {
hasMoreBlueWorkThanSelectedTip, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTip(highHash)
if err != nil {
return false, false, err
}
if hasMoreBlueWorkThanSelectedTip {
return true, true, nil
}
return false, false, nil
}
return false, true, nil
}
func (flow *handleRelayInvsFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTip(highHash *externalapi.DomainHash) (bool, error) {
err := flow.outgoingRoute.Enqueue(appmessage.NewRequestBlockBlueWork(highHash))
if err != nil {
return false, err
}
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return false, err
}
msgBlockBlueWork, ok := message.(*appmessage.MsgBlockBlueWork)
if !ok {
return false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdBlockBlueWork, message.Command())
}
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
if err != nil {
return false, err
}
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
if err != nil {
return false, err
}
return msgBlockBlueWork.BlueWork.Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
}
func (flow *handleRelayInvsFlow) downloadHeadersProof() error {
// TODO: Implement headers proof mechanism
return nil
}
func (flow *handleRelayInvsFlow) downloadHeadersAndPruningUTXOSet(consensus externalapi.Consensus, highHash *externalapi.DomainHash) error {
err := flow.downloadHeadersProof()
if err != nil {
return err
}
pruningPoint, err := flow.syncPruningPointAndItsAnticone(consensus)
if err != nil {
return err
}
// TODO: Remove this condition once there's more proper way to check finality violation
// in the headers proof.
if pruningPoint.Equal(flow.Config().NetParams().GenesisHash) {
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
}
err = flow.syncPruningPointFutureHeaders(consensus, pruningPoint, highHash)
if err != nil {
return err
}
log.Debugf("Blocks downloaded from peer %s", flow.peer)
log.Debugf("Syncing the current pruning point UTXO set")
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet(consensus, pruningPoint)
if err != nil {
return err
}
if !syncedPruningPointUTXOSetSuccessfully {
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
return nil
}
log.Debugf("Finished syncing the current pruning point UTXO set")
return nil
}
func (flow *handleRelayInvsFlow) syncPruningPointAndItsAnticone(consensus externalapi.Consensus) (*externalapi.DomainHash, error) {
log.Infof("Downloading pruning point and its anticone from %s", flow.peer)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone())
if err != nil {
return nil, err
}
pruningPoint, done, err := flow.receiveBlockWithTrustedData()
if err != nil {
return nil, err
}
if done {
return nil, protocolerrors.Errorf(true, "got `done` message before receiving the pruning point")
}
err = flow.processBlockWithTrustedData(consensus, pruningPoint)
if err != nil {
return nil, err
}
for {
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
if err != nil {
return nil, err
}
if done {
break
}
err = flow.processBlockWithTrustedData(consensus, blockWithTrustedData)
if err != nil {
return nil, err
}
}
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
return pruningPoint.Block.Header.BlockHash(), nil
}
func (flow *handleRelayInvsFlow) processBlockWithTrustedData(
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedData) error {
_, err := consensus.ValidateAndInsertBlockWithTrustedData(appmessage.BlockWithTrustedDataToDomainBlockWithTrustedData(block), false)
return err
}
func (flow *handleRelayInvsFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedData, bool, error) {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch downCastedMessage := message.(type) {
case *appmessage.MsgBlockWithTrustedData:
return downCastedMessage, false, nil
case *appmessage.MsgDoneBlocksWithTrustedData:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
(&appmessage.MsgBlockWithTrustedData{}).Command(),
(&appmessage.MsgDoneBlocksWithTrustedData{}).Command(),
downCastedMessage.Command())
}
}
func (flow *handleRelayInvsFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus, pruningPoint *externalapi.DomainHash) (bool, error) {
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint)
isValid, err := flow.Domain().StagingConsensus().IsValidPruningPoint(pruningPoint)
if err != nil {
return false, err
}
if !isValid {
return false, protocolerrors.Errorf(true, "invalid pruning point %s", pruningPoint)
}
log.Info("Fetching the pruning point UTXO set")
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
if err != nil {
return false, err
}
if !isSuccessful {
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
return false, nil
}
log.Info("Fetched the new pruning point UTXO set")
return true, nil
}
func (flow *handleRelayInvsFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
defer func() {
err := flow.Domain().StagingConsensus().ClearImportedPruningPointData()
if err != nil {
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
}
}()
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSet(pruningPointHash))
if err != nil {
return false, err
}
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(consensus, pruningPointHash)
if err != nil {
return false, err
}
if !receivedAll {
return false, nil
}
err = flow.Domain().StagingConsensus().ValidateAndInsertImportedPruningPoint(pruningPointHash)
if err != nil {
// TODO: Find a better way to deal with finality conflicts.
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
return false, nil
}
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
}
err = flow.OnPruningPointUTXOSetOverride()
if err != nil {
return false, err
}
return true, nil
}

View File

@ -1,4 +1,4 @@
package flowcontext package blockrelay
import ( import (
"sync" "sync"
@ -13,15 +13,13 @@ type SharedRequestedBlocks struct {
sync.Mutex sync.Mutex
} }
// Remove removes a block from the set. func (s *SharedRequestedBlocks) remove(hash *externalapi.DomainHash) {
func (s *SharedRequestedBlocks) Remove(hash *externalapi.DomainHash) {
s.Lock() s.Lock()
defer s.Unlock() defer s.Unlock()
delete(s.blocks, *hash) delete(s.blocks, *hash)
} }
// RemoveSet removes a set of blocks from the set. func (s *SharedRequestedBlocks) removeSet(blockHashes map[externalapi.DomainHash]struct{}) {
func (s *SharedRequestedBlocks) RemoveSet(blockHashes map[externalapi.DomainHash]struct{}) {
s.Lock() s.Lock()
defer s.Unlock() defer s.Unlock()
for hash := range blockHashes { for hash := range blockHashes {
@ -29,8 +27,7 @@ func (s *SharedRequestedBlocks) RemoveSet(blockHashes map[externalapi.DomainHash
} }
} }
// AddIfNotExists adds a block to the set if it doesn't exist yet. func (s *SharedRequestedBlocks) addIfNotExists(hash *externalapi.DomainHash) (exists bool) {
func (s *SharedRequestedBlocks) AddIfNotExists(hash *externalapi.DomainHash) (exists bool) {
s.Lock() s.Lock()
defer s.Unlock() defer s.Unlock()
_, ok := s.blocks[*hash] _, ok := s.blocks[*hash]

View File

@ -28,7 +28,7 @@ type HandleHandshakeContext interface {
HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error) HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error)
} }
// HandleHandshake sets up the new_handshake protocol - It sends a version message and waits for an incoming // HandleHandshake sets up the handshake protocol - It sends a version message and waits for an incoming
// version message, as well as a verack for the sent version // version message, as well as a verack for the sent version
func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.NetConnection, func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.NetConnection,
receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route, outgoingRoute *routerpkg.Route, receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
@ -98,7 +98,7 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
} }
// Handshake is different from other flows, since in it should forward router.ErrRouteClosed to errChan // Handshake is different from other flows, since in it should forward router.ErrRouteClosed to errChan
// Therefore we implement a separate handleError for new_handshake // Therefore we implement a separate handleError for handshake
func handleError(err error, flowName string, isStopping *uint32, errChan chan error) { func handleError(err error, flowName string, isStopping *uint32, errChan chan error) {
if errors.Is(err, routerpkg.ErrRouteClosed) { if errors.Is(err, routerpkg.ErrRouteClosed) {
if atomic.AddUint32(isStopping, 1) == 1 { if atomic.AddUint32(isStopping, 1) == 1 {

View File

@ -7,7 +7,6 @@ import (
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/logger" "github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
) )
var ( var (
@ -18,9 +17,7 @@ var (
// minAcceptableProtocolVersion is the lowest protocol version that a // minAcceptableProtocolVersion is the lowest protocol version that a
// connected peer may support. // connected peer may support.
minAcceptableProtocolVersion = uint32(5) minAcceptableProtocolVersion = appmessage.ProtocolVersion
maxAcceptableProtocolVersion = uint32(5)
) )
type receiveVersionFlow struct { type receiveVersionFlow struct {
@ -100,12 +97,7 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
return nil, protocolerrors.New(false, "incompatible subnetworks") return nil, protocolerrors.New(false, "incompatible subnetworks")
} }
if flow.Config().ProtocolVersion > maxAcceptableProtocolVersion { flow.peer.UpdateFieldsFromMsgVersion(msgVersion)
return nil, errors.Errorf("%d is a non existing protocol version", flow.Config().ProtocolVersion)
}
maxProtocolVersion := flow.Config().ProtocolVersion
flow.peer.UpdateFieldsFromMsgVersion(msgVersion, maxProtocolVersion)
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgVerAck()) err = flow.outgoingRoute.Enqueue(appmessage.NewMsgVerAck())
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -7,7 +7,6 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger" "github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/version" "github.com/kaspanet/kaspad/version"
"github.com/pkg/errors"
) )
var ( var (
@ -57,18 +56,15 @@ func (flow *sendVersionFlow) start() error {
// Version message. // Version message.
localAddress := flow.AddressManager().BestLocalAddress(flow.peer.Connection().NetAddress()) localAddress := flow.AddressManager().BestLocalAddress(flow.peer.Connection().NetAddress())
subnetworkID := flow.Config().SubnetworkID subnetworkID := flow.Config().SubnetworkID
if flow.Config().ProtocolVersion < minAcceptableProtocolVersion {
return errors.Errorf("configured protocol version %d is obsolete", flow.Config().ProtocolVersion)
}
msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(), msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(),
flow.Config().ActiveNetParams.Name, subnetworkID, flow.Config().ProtocolVersion) flow.Config().ActiveNetParams.Name, subnetworkID)
msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...) msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...)
// Advertise the services flag // Advertise the services flag
msg.Services = defaultServices msg.Services = defaultServices
// Advertise our max supported protocol version. // Advertise our max supported protocol version.
msg.ProtocolVersion = flow.Config().ProtocolVersion msg.ProtocolVersion = appmessage.ProtocolVersion
// Advertise if inv messages for transactions are desired. // Advertise if inv messages for transactions are desired.
msg.DisableRelayTx = flow.Config().BlocksOnly msg.DisableRelayTx = flow.Config().BlocksOnly

View File

@ -2,8 +2,6 @@ package ping
import ( import (
"github.com/kaspanet/kaspad/app/protocol/common" "github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/pkg/errors"
"time" "time"
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
@ -63,9 +61,6 @@ func (flow *sendPingsFlow) start() error {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil { if err != nil {
if errors.Is(err, router.ErrTimeout) {
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
}
return err return err
} }
pongMessage := message.(*appmessage.MsgPong) pongMessage := message.(*appmessage.MsgPong)

View File

@ -1,9 +0,0 @@
package ready
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("PROT")
var spawn = panics.GoroutineWrapperFunc(log)

View File

@ -1,56 +0,0 @@
package ready
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"sync/atomic"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// HandleReady notify the other peer that peer is ready for messages, and wait for the other peer
// to send a ready message before start running the flows.
func HandleReady(incomingRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
peer *peerpkg.Peer,
) error {
log.Debugf("Sending ready message to %s", peer)
isStopping := uint32(0)
err := outgoingRoute.Enqueue(appmessage.NewMsgReady())
if err != nil {
return handleError(err, "HandleReady", &isStopping)
}
_, err = incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return handleError(err, "HandleReady", &isStopping)
}
log.Debugf("Got ready message from %s", peer)
return nil
}
// Ready is different from other flows, since in it should forward router.ErrRouteClosed to errChan
// Therefore we implement a separate handleError for 'ready'
func handleError(err error, flowName string, isStopping *uint32) error {
if errors.Is(err, routerpkg.ErrRouteClosed) {
if atomic.AddUint32(isStopping, 1) == 1 {
return err
}
return nil
}
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
log.Errorf("Ready protocol error from %s: %s", flowName, err)
if atomic.AddUint32(isStopping, 1) == 1 {
return err
}
return nil
}
panic(err)
}

View File

@ -1,11 +1,11 @@
package testing package testing
import ( import (
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
"testing" "testing"
"time" "time"
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/addressexchange"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer" peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/consensus" "github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils" "github.com/kaspanet/kaspad/domain/consensus/utils/testutils"

View File

@ -3,7 +3,6 @@ package transactionrelay
import ( import (
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common" "github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
@ -19,10 +18,9 @@ import (
type TransactionsRelayContext interface { type TransactionsRelayContext interface {
NetAdapter() *netadapter.NetAdapter NetAdapter() *netadapter.NetAdapter
Domain() domain.Domain Domain() domain.Domain
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions SharedRequestedTransactions() *SharedRequestedTransactions
OnTransactionAddedToMempool() OnTransactionAddedToMempool()
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
IsNearlySynced() (bool, error)
} }
type handleRelayedTransactionsFlow struct { type handleRelayedTransactionsFlow struct {
@ -50,15 +48,6 @@ func (flow *handleRelayedTransactionsFlow) start() error {
return err return err
} }
isNearlySynced, err := flow.IsNearlySynced()
if err != nil {
return err
}
// Transaction relay is disabled if the node is out of sync and thus not mining
if !isNearlySynced {
continue
}
requestedIDs, err := flow.requestInvTransactions(inv) requestedIDs, err := flow.requestInvTransactions(inv)
if err != nil { if err != nil {
return err return err
@ -79,7 +68,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
if flow.isKnownTransaction(txID) { if flow.isKnownTransaction(txID) {
continue continue
} }
exists := flow.SharedRequestedTransactions().AddIfNotExists(txID) exists := flow.SharedRequestedTransactions().addIfNotExists(txID)
if exists { if exists {
continue continue
} }
@ -93,7 +82,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
msgGetTransactions := appmessage.NewMsgRequestTransactions(idsToRequest) msgGetTransactions := appmessage.NewMsgRequestTransactions(idsToRequest)
err = flow.outgoingRoute.Enqueue(msgGetTransactions) err = flow.outgoingRoute.Enqueue(msgGetTransactions)
if err != nil { if err != nil {
flow.SharedRequestedTransactions().RemoveMany(idsToRequest) flow.SharedRequestedTransactions().removeMany(idsToRequest)
return nil, err return nil, err
} }
return idsToRequest, nil return idsToRequest, nil
@ -102,7 +91,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool { func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
// Ask the transaction memory pool if the transaction is known // Ask the transaction memory pool if the transaction is known
// to it in any form (main pool or orphan). // to it in any form (main pool or orphan).
if _, _, ok := flow.Domain().MiningManager().GetTransaction(txID, true, true); ok { if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
return true return true
} }
@ -162,7 +151,7 @@ func (flow *handleRelayedTransactionsFlow) readMsgTxOrNotFound() (
func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransactions []*externalapi.DomainTransactionID) error { func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransactions []*externalapi.DomainTransactionID) error {
// In case the function returns earlier than expected, we want to make sure sharedRequestedTransactions is // In case the function returns earlier than expected, we want to make sure sharedRequestedTransactions is
// clean from any pending transactions. // clean from any pending transactions.
defer flow.SharedRequestedTransactions().RemoveMany(requestedTransactions) defer flow.SharedRequestedTransactions().removeMany(requestedTransactions)
for _, expectedID := range requestedTransactions { for _, expectedID := range requestedTransactions {
msgTx, msgTxNotFound, err := flow.readMsgTxOrNotFound() msgTx, msgTxNotFound, err := flow.readMsgTxOrNotFound()
if err != nil { if err != nil {

View File

@ -2,11 +2,10 @@ package transactionrelay_test
import ( import (
"errors" "errors"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
"strings" "strings"
"testing" "testing"
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus" "github.com/kaspanet/kaspad/domain/consensus"
@ -25,7 +24,7 @@ import (
type mocTransactionsRelayContext struct { type mocTransactionsRelayContext struct {
netAdapter *netadapter.NetAdapter netAdapter *netadapter.NetAdapter
domain domain.Domain domain domain.Domain
sharedRequestedTransactions *flowcontext.SharedRequestedTransactions sharedRequestedTransactions *transactionrelay.SharedRequestedTransactions
} }
func (m *mocTransactionsRelayContext) NetAdapter() *netadapter.NetAdapter { func (m *mocTransactionsRelayContext) NetAdapter() *netadapter.NetAdapter {
@ -36,7 +35,7 @@ func (m *mocTransactionsRelayContext) Domain() domain.Domain {
return m.domain return m.domain
} }
func (m *mocTransactionsRelayContext) SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions { func (m *mocTransactionsRelayContext) SharedRequestedTransactions() *transactionrelay.SharedRequestedTransactions {
return m.sharedRequestedTransactions return m.sharedRequestedTransactions
} }
@ -47,10 +46,6 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() { func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
} }
func (m *mocTransactionsRelayContext) IsNearlySynced() (bool, error) {
return true, nil
}
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't // TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
// have the requested transactions in the mempool. // have the requested transactions in the mempool.
func TestHandleRelayedTransactionsNotFound(t *testing.T) { func TestHandleRelayedTransactionsNotFound(t *testing.T) {
@ -65,7 +60,7 @@ func TestHandleRelayedTransactionsNotFound(t *testing.T) {
} }
defer teardown(false) defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions() sharedRequestedTransactions := transactionrelay.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig()) adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil { if err != nil {
t.Fatalf("Failed to create a NetAdapter: %v", err) t.Fatalf("Failed to create a NetAdapter: %v", err)
@ -158,7 +153,7 @@ func TestOnClosedIncomingRoute(t *testing.T) {
} }
defer teardown(false) defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions() sharedRequestedTransactions := transactionrelay.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig()) adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil { if err != nil {
t.Fatalf("Failed to creat a NetAdapter : %v", err) t.Fatalf("Failed to creat a NetAdapter : %v", err)

View File

@ -30,7 +30,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
} }
for _, transactionID := range msgRequestTransactions.IDs { for _, transactionID := range msgRequestTransactions.IDs {
tx, _, ok := flow.Domain().MiningManager().GetTransaction(transactionID, true, false) tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
if !ok { if !ok {
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID) msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
@ -40,6 +40,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
} }
continue continue
} }
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx)) err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
if err != nil { if err != nil {
return err return err

View File

@ -1,11 +1,10 @@
package transactionrelay_test package transactionrelay_test
import ( import (
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
"testing" "testing"
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus" "github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
@ -32,7 +31,7 @@ func TestHandleRequestedTransactionsNotFound(t *testing.T) {
} }
defer teardown(false) defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions() sharedRequestedTransactions := transactionrelay.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig()) adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil { if err != nil {
t.Fatalf("Failed to create a NetAdapter: %v", err) t.Fatalf("Failed to create a NetAdapter: %v", err)

View File

@ -1,4 +1,4 @@
package flowcontext package transactionrelay
import ( import (
"sync" "sync"
@ -13,15 +13,13 @@ type SharedRequestedTransactions struct {
sync.Mutex sync.Mutex
} }
// Remove removes a transaction from the set. func (s *SharedRequestedTransactions) remove(txID *externalapi.DomainTransactionID) {
func (s *SharedRequestedTransactions) Remove(txID *externalapi.DomainTransactionID) {
s.Lock() s.Lock()
defer s.Unlock() defer s.Unlock()
delete(s.transactions, *txID) delete(s.transactions, *txID)
} }
// RemoveMany removes a set of transactions from the set. func (s *SharedRequestedTransactions) removeMany(txIDs []*externalapi.DomainTransactionID) {
func (s *SharedRequestedTransactions) RemoveMany(txIDs []*externalapi.DomainTransactionID) {
s.Lock() s.Lock()
defer s.Unlock() defer s.Unlock()
for _, txID := range txIDs { for _, txID := range txIDs {
@ -29,8 +27,7 @@ func (s *SharedRequestedTransactions) RemoveMany(txIDs []*externalapi.DomainTran
} }
} }
// AddIfNotExists adds a transaction to the set if it doesn't exist yet. func (s *SharedRequestedTransactions) addIfNotExists(txID *externalapi.DomainTransactionID) (exists bool) {
func (s *SharedRequestedTransactions) AddIfNotExists(txID *externalapi.DomainTransactionID) (exists bool) {
s.Lock() s.Lock()
defer s.Unlock() defer s.Unlock()
_, ok := s.transactions[*txID] _, ok := s.transactions[*txID]

View File

@ -1,16 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"testing"
)
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
if ibdBatchSize >= router.DefaultMaxMessages {
t.Fatalf("IBD batch size (%d) must be smaller than router.DefaultMaxMessages (%d)",
ibdBatchSize, router.DefaultMaxMessages)
}
}

View File

@ -1,85 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// RequestIBDChainBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
type RequestIBDChainBlockLocatorContext interface {
Domain() domain.Domain
}
type handleRequestIBDChainBlockLocatorFlow struct {
RequestIBDChainBlockLocatorContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestIBDChainBlockLocator handles getBlockLocator messages
func HandleRequestIBDChainBlockLocator(context RequestIBDChainBlockLocatorContext, incomingRoute *router.Route,
outgoingRoute *router.Route) error {
flow := &handleRequestIBDChainBlockLocatorFlow{
RequestIBDChainBlockLocatorContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestIBDChainBlockLocatorFlow) start() error {
for {
highHash, lowHash, err := flow.receiveRequestIBDChainBlockLocator()
if err != nil {
return err
}
log.Debugf("Received getIBDChainBlockLocator with highHash: %s, lowHash: %s", highHash, lowHash)
var locator externalapi.BlockLocator
if highHash == nil || lowHash == nil {
locator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
} else {
locator, err = flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
// The chain has been modified, signal it by sending an empty locator
locator, err = externalapi.BlockLocator{}, nil
}
}
if err != nil {
log.Debugf("Received error from CreateHeadersSelectedChainBlockLocator: %s", err)
return protocolerrors.Errorf(true, "couldn't build a block "+
"locator between %s and %s", lowHash, highHash)
}
err = flow.sendIBDChainBlockLocator(locator)
if err != nil {
return err
}
}
}
func (flow *handleRequestIBDChainBlockLocatorFlow) receiveRequestIBDChainBlockLocator() (highHash, lowHash *externalapi.DomainHash, err error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgGetBlockLocator := message.(*appmessage.MsgRequestIBDChainBlockLocator)
return msgGetBlockLocator.HighHash, msgGetBlockLocator.LowHash, nil
}
func (flow *handleRequestIBDChainBlockLocatorFlow) sendIBDChainBlockLocator(locator externalapi.BlockLocator) error {
msgIBDChainBlockLocator := appmessage.NewMsgIBDChainBlockLocator(locator)
err := flow.outgoingRoute.Enqueue(msgIBDChainBlockLocator)
if err != nil {
return err
}
return nil
}

View File

@ -1,162 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"sync/atomic"
)
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
type PruningPointAndItsAnticoneRequestsContext interface {
Domain() domain.Domain
Config() *config.Config
}
var isBusy uint32
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
// the pruning point and its anticone to the requesting peer.
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
err := func() error {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
}
defer atomic.StoreUint32(&isBusy, 0)
log.Debugf("Got request for pruning point and its anticone from %s", peer)
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
if err != nil {
return err
}
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
for i, header := range pruningPointHeaders {
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
}
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
if err != nil {
return err
}
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
if err != nil {
return err
}
windowSize := context.Config().NetParams().DifficultyAdjustmentWindowSize
daaWindowBlocks := make([]*externalapi.TrustedDataDataDAAHeader, 0, windowSize)
daaWindowHashesToIndex := make(map[externalapi.DomainHash]int, windowSize)
trustedDataDAABlockIndexes := make(map[externalapi.DomainHash][]uint64)
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, 0)
ghostdagDataHashToIndex := make(map[externalapi.DomainHash]int)
trustedDataGHOSTDAGDataIndexes := make(map[externalapi.DomainHash][]uint64)
for _, blockHash := range pointAndItsAnticone {
blockDAAWindowHashes, err := context.Domain().Consensus().BlockDAAWindowHashes(blockHash)
if err != nil {
return err
}
trustedDataDAABlockIndexes[*blockHash] = make([]uint64, 0, windowSize)
for i, daaBlockHash := range blockDAAWindowHashes {
index, exists := daaWindowHashesToIndex[*daaBlockHash]
if !exists {
trustedDataDataDAAHeader, err := context.Domain().Consensus().TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i))
if err != nil {
return err
}
daaWindowBlocks = append(daaWindowBlocks, trustedDataDataDAAHeader)
index = len(daaWindowBlocks) - 1
daaWindowHashesToIndex[*daaBlockHash] = index
}
trustedDataDAABlockIndexes[*blockHash] = append(trustedDataDAABlockIndexes[*blockHash], uint64(index))
}
ghostdagDataBlockHashes, err := context.Domain().Consensus().TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash)
if err != nil {
return err
}
trustedDataGHOSTDAGDataIndexes[*blockHash] = make([]uint64, 0, context.Config().NetParams().K)
for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes {
index, exists := ghostdagDataHashToIndex[*ghostdagDataBlockHash]
if !exists {
data, err := context.Domain().Consensus().TrustedGHOSTDAGData(ghostdagDataBlockHash)
if err != nil {
return err
}
ghostdagData = append(ghostdagData, &externalapi.BlockGHOSTDAGDataHashPair{
Hash: ghostdagDataBlockHash,
GHOSTDAGData: data,
})
index = len(ghostdagData) - 1
ghostdagDataHashToIndex[*ghostdagDataBlockHash] = index
}
trustedDataGHOSTDAGDataIndexes[*blockHash] = append(trustedDataGHOSTDAGDataIndexes[*blockHash], uint64(index))
}
}
err = outgoingRoute.Enqueue(appmessage.DomainTrustedDataToTrustedData(daaWindowBlocks, ghostdagData))
if err != nil {
return err
}
for i, blockHash := range pointAndItsAnticone {
block, found, err := context.Domain().Consensus().GetBlock(blockHash)
if err != nil {
return err
}
if !found {
return protocolerrors.Errorf(false, "pruning point anticone block %s not found", blockHash)
}
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
if err != nil {
return err
}
if (i+1)%ibdBatchSize == 0 {
// No timeout here, as we don't care if the syncee takes its time computing,
// since it only blocks this dedicated flow
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
if _, ok := message.(*appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks); !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks, message.Command())
}
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
if err != nil {
return err
}
log.Debugf("Sent pruning point and its anticone to %s", peer)
return nil
}()
if err != nil {
return err
}
}
}

View File

@ -1,40 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// PruningPointProofRequestsContext is the interface for the context needed for the HandlePruningPointProofRequests flow.
type PruningPointProofRequestsContext interface {
Domain() domain.Domain
}
// HandlePruningPointProofRequests listens to appmessage.MsgRequestPruningPointProof messages and sends
// the pruning point proof to the requesting peer.
func HandlePruningPointProofRequests(context PruningPointProofRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
log.Debugf("Got request for pruning point proof from %s", peer)
pruningPointProof, err := context.Domain().Consensus().BuildPruningPointProof()
if err != nil {
return err
}
pruningPointProofMessage := appmessage.DomainPruningPointProofToMsgPruningPointProof(pruningPointProof)
err = outgoingRoute.Enqueue(pruningPointProofMessage)
if err != nil {
return err
}
log.Debugf("Sent pruning point proof to %s", peer)
}
}

View File

@ -1,95 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"sort"
)
// RequestAnticoneContext is the interface for the context needed for the HandleRequestHeaders flow.
type RequestAnticoneContext interface {
Domain() domain.Domain
Config() *config.Config
}
type handleRequestAnticoneFlow struct {
RequestAnticoneContext
incomingRoute, outgoingRoute *router.Route
peer *peer.Peer
}
// HandleRequestAnticone handles RequestAnticone messages
func HandleRequestAnticone(context RequestAnticoneContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peer.Peer) error {
flow := &handleRequestAnticoneFlow{
RequestAnticoneContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleRequestAnticoneFlow) start() error {
for {
blockHash, contextHash, err := receiveRequestAnticone(flow.incomingRoute)
if err != nil {
return err
}
log.Debugf("Received requestAnticone with blockHash: %s, contextHash: %s", blockHash, contextHash)
log.Debugf("Getting past(%s) cap anticone(%s) for peer %s", contextHash, blockHash, flow.peer)
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
// we relay blocks only if they enter virtual's mergeset. We add a 2 factor for possible sync gaps.
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
flow.Config().ActiveNetParams.MergeSetSizeLimit*2)
if err != nil {
return protocolerrors.Wrap(true, err, "Failed querying anticone")
}
log.Debugf("Got %d header hashes in past(%s) cap anticone(%s)", len(blockHashes), contextHash, blockHash)
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
for i, blockHash := range blockHashes {
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
if err != nil {
return err
}
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
}
// We sort the headers in bottom-up topological order before sending
sort.Slice(blockHeaders, func(i, j int) bool {
return blockHeaders[i].BlueWork.Cmp(blockHeaders[j].BlueWork) < 0
})
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
if err != nil {
return err
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
return err
}
}
}
func receiveRequestAnticone(incomingRoute *router.Route) (blockHash *externalapi.DomainHash,
contextHash *externalapi.DomainHash, err error) {
message, err := incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgRequestAnticone := message.(*appmessage.MsgRequestAnticone)
return msgRequestAnticone.BlockHash, msgRequestAnticone.ContextHash, nil
}

View File

@ -1,751 +0,0 @@
package blockrelay
import (
"fmt"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
"time"
)
// IBDContext is the interface for the context needed for the HandleIBD flow.
type IBDContext interface {
Domain() domain.Domain
Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock) error
OnNewBlockTemplate() error
OnPruningPointUTXOSetOverride() error
IsIBDRunning() bool
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
UnsetIBDRunning()
IsRecoverableError(err error) bool
}
type handleIBDFlow struct {
IBDContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// HandleIBD handles IBD
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
flow := &handleIBDFlow{
IBDContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleIBDFlow) start() error {
for {
// Wait for IBD requests triggered by other flows
block, ok := <-flow.peer.IBDRequestChannel()
if !ok {
return nil
}
err := flow.runIBDIfNotRunning(block)
if err != nil {
return err
}
}
}
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
if !wasIBDNotRunning {
log.Debugf("IBD is already running")
return nil
}
isFinishedSuccessfully := false
var err error
defer func() {
flow.UnsetIBDRunning()
flow.logIBDFinished(isFinishedSuccessfully, err)
}()
relayBlockHash := consensushashing.BlockHash(block)
log.Infof("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
log.Infof("Syncing blocks up to %s", relayBlockHash)
log.Infof("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
if err != nil {
return err
}
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(
block, highestKnownSyncerChainHash)
if err != nil {
return err
}
if !shouldSync {
return nil
}
if shouldDownloadHeadersProof {
log.Infof("Starting IBD with headers proof")
err = flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
if err != nil {
return err
}
} else {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if isGenesisVirtualSelectedParent {
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", relayBlockHash)
return nil
}
}
err = flow.syncPruningPointFutureHeaders(
flow.Domain().Consensus(),
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash, block.Header.DAAScore())
if err != nil {
return err
}
}
// We start by syncing missing bodies over the syncer selected chain
err = flow.syncMissingBlockBodies(syncerHeaderSelectedTipHash)
if err != nil {
return err
}
relayBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(relayBlockHash)
if err != nil {
return err
}
// Relay block might be in the anticone of syncer selected tip, thus
// check his chain for missing bodies as well.
// Note: this operation can be slightly optimized to avoid the full chain search since relay block
// is in syncer virtual mergeset which has bounded size.
if relayBlockInfo.BlockStatus == externalapi.StatusHeaderOnly {
err = flow.syncMissingBlockBodies(relayBlockHash)
if err != nil {
return err
}
}
log.Debugf("Finished syncing blocks up to %s", relayBlockHash)
isFinishedSuccessfully = true
return nil
}
func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) {
/*
Algorithm:
Request full selected chain block locator from syncer
Find the highest block which we know
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
*/
// Empty hashes indicate that the full chain is queried
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout)
if err != nil {
return nil, nil, err
}
if len(locatorHashes) == 0 {
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
"to contain at least one element")
}
log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer,
len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
syncerHeaderSelectedTipHash := locatorHashes[0]
var highestKnownSyncerChainHash *externalapi.DomainHash
chainNegotiationRestartCounter := 0
chainNegotiationZoomCounts := 0
initialLocatorLen := len(locatorHashes)
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return nil, nil, err
}
for {
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
for _, syncerChainHash := range locatorHashes {
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
if err != nil {
return nil, nil, err
}
if info.Exists {
if info.BlockStatus == externalapi.StatusInvalid {
return nil, nil, protocolerrors.Errorf(true, "Sent invalid chain block %s", syncerChainHash)
}
isPruningPointOnSyncerChain, err := flow.Domain().Consensus().IsInSelectedParentChainOf(pruningPoint, syncerChainHash)
if err != nil {
log.Errorf("Error checking isPruningPointOnSyncerChain: %s", err)
}
// We're only interested in syncer chain blocks that have our pruning
// point in their selected chain. Otherwise, it means one of the following:
// 1) We will not switch the virtual selected chain to the syncers chain since it will violate finality
// (hence we can ignore it unless merged by others).
// 2) syncerChainHash is actually in the past of our pruning point so there's no
// point in syncing from it.
if err == nil && isPruningPointOnSyncerChain {
currentHighestKnownSyncerChainHash = syncerChainHash
break
}
}
lowestUnknownSyncerChainHash = syncerChainHash
}
// No unknown blocks, break. Note this can only happen in the first iteration
if lowestUnknownSyncerChainHash == nil {
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
break
}
// No shared block, break
if currentHighestKnownSyncerChainHash == nil {
highestKnownSyncerChainHash = nil
break
}
// No point in zooming further
if len(locatorHashes) == 1 {
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
break
}
// Zoom in
locatorHashes, err = flow.getSyncerChainBlockLocator(
lowestUnknownSyncerChainHash,
currentHighestKnownSyncerChainHash, time.Second*10)
if err != nil {
return nil, nil, err
}
if len(locatorHashes) > 0 {
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
!locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) {
return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+
"hashes to match the locator bounds")
}
chainNegotiationZoomCounts++
log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer,
chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
if len(locatorHashes) == 2 {
// We found our search target
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
break
}
if chainNegotiationZoomCounts > initialLocatorLen*2 {
// Since the zoom-in always queries two consecutive entries in the previous locator, it is
// expected to decrease in size at least every two iterations
return nil, nil, protocolerrors.Errorf(true,
"IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d",
chainNegotiationZoomCounts, initialLocatorLen)
}
} else { // Empty locator signals a restart due to chain changes
chainNegotiationZoomCounts = 0
chainNegotiationRestartCounter++
if chainNegotiationRestartCounter > 32 {
return nil, nil, protocolerrors.Errorf(false,
"IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
}
log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter)
// An empty locator signals that the syncer chain was modified and no longer contains one of
// the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10)
if err != nil {
return nil, nil, err
}
if len(locatorHashes) == 0 {
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
"to contain at least one element")
}
log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer,
chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
initialLocatorLen = len(locatorHashes)
// Reset syncer's header selected tip
syncerHeaderSelectedTipHash = locatorHashes[0]
}
}
log.Infof("Found highest known syncer chain block %s from peer %s",
highestKnownSyncerChainHash, flow.peer)
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
}
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
}
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool, err error) {
successString := "successfully"
if !isFinishedSuccessfully {
if err != nil {
successString = fmt.Sprintf("(interrupted: %s)", err)
} else {
successString = fmt.Sprintf("(interrupted)")
}
}
log.Infof("IBD with peer %s finished %s", flow.peer, successString)
}
func (flow *handleIBDFlow) getSyncerChainBlockLocator(
highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) {
requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash)
err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage)
if err != nil {
return nil, err
}
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
if err != nil {
return nil, err
}
switch message := message.(type) {
case *appmessage.MsgIBDChainBlockLocator:
if len(message.BlockLocatorHashes) > 64 {
return nil, protocolerrors.Errorf(true,
"Got block locator of size %d>64 while expecting locator to have size "+
"which is logarithmic in DAG size (which should never exceed 2^64)",
len(message.BlockLocatorHashes))
}
return message.BlockLocatorHashes, nil
default:
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDChainBlockLocator, message.Command())
}
}
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus,
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash *externalapi.DomainHash,
highBlockDAAScoreHint uint64) error {
log.Infof("Downloading headers from %s", flow.peer)
if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) {
// No need to get syncer selected tip headers, so sync relay past and return
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
}
err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
if err != nil {
return err
}
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestKnownSyncerChainHash)
if err != nil {
return err
}
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScoreHint, "block headers")
// Keep a short queue of BlockHeadersMessages so that there's
// never a moment when the node is not validating and inserting
// headers
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
errChan := make(chan error)
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
for {
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
if err != nil {
errChan <- err
return
}
if doneIBD {
close(blockHeadersMessageChan)
return
}
if len(blockHeadersMessage.BlockHeaders) == 0 {
// The syncer should have sent a done message if the search completed, and not an empty list
errChan <- protocolerrors.Errorf(true, "Received an empty headers message from peer %s", flow.peer)
return
}
blockHeadersMessageChan <- blockHeadersMessage
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
if err != nil {
errChan <- err
return
}
}
})
for {
select {
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
if !ok {
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
}
for _, header := range ibdBlocksMessage.BlockHeaders {
err = flow.processHeader(consensus, header)
if err != nil {
return err
}
}
lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1]
progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore)
case err := <-errChan:
return err
}
}
}
func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error {
// Finished downloading syncer selected tip blocks,
// check if we already have the triggering relayBlockHash
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
if err != nil {
return err
}
if !relayBlockInfo.Exists {
// Send a special header request for the selected tip anticone. This is expected to
// be a small set, as it is bounded to the size of virtual's mergeset.
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
if err != nil {
return err
}
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
if err != nil {
return err
}
if anticoneDone {
return protocolerrors.Errorf(true,
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
relayBlockHash, syncerHeaderSelectedTipHash)
}
_, anticoneDone, err = flow.receiveHeaders()
if err != nil {
return err
}
if !anticoneDone {
return protocolerrors.Errorf(true,
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
relayBlockHash, syncerHeaderSelectedTipHash)
}
for _, header := range anticoneHeadersMessage.BlockHeaders {
err = flow.processHeader(consensus, header)
if err != nil {
return err
}
}
}
// If the relayBlockHash has still not been received, the peer is misbehaving
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
if err != nil {
return err
}
if !relayBlockInfo.Exists {
return protocolerrors.Errorf(true, "did not receive "+
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
}
return nil
}
func (flow *handleIBDFlow) sendRequestAnticone(
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error {
msgRequestAnticone := appmessage.NewMsgRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
return flow.outgoingRoute.Enqueue(msgRequestAnticone)
}
func (flow *handleIBDFlow) sendRequestHeaders(
highestKnownSyncerChainHash, syncerHeaderSelectedTipHash *externalapi.DomainHash) error {
msgRequestHeaders := appmessage.NewMsgRequstHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
return flow.outgoingRoute.Enqueue(msgRequestHeaders)
}
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch message := message.(type) {
case *appmessage.BlockHeadersMessage:
return message, false, nil
case *appmessage.MsgDoneHeaders:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
appmessage.CmdBlockHeaders,
appmessage.CmdDoneHeaders,
message.Command())
}
}
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
block := &externalapi.DomainBlock{
Header: header,
Transactions: nil,
}
blockHash := consensushashing.BlockHash(block)
blockInfo, err := consensus.GetBlockInfo(blockHash)
if err != nil {
return err
}
if blockInfo.Exists {
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
return nil
}
err = consensus.ValidateAndInsertBlock(block, false)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
}
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
} else {
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
}
}
return nil
}
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
if err != nil {
return err
}
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
if err != nil {
return err
}
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
if err != nil {
return err
}
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
if err != nil {
return err
}
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
"tip is smaller than the current selected tip")
}
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
return protocolerrors.Errorf(false, "difference between the timestamps of "+
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
}
return nil
}
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
defer onEnd()
receivedChunkCount := 0
receivedUTXOCount := 0
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return false, err
}
switch message := message.(type) {
case *appmessage.MsgPruningPointUTXOSetChunk:
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
domainOutpointAndUTXOEntryPairs :=
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
if err != nil {
return false, err
}
receivedChunkCount++
if receivedChunkCount%ibdBatchSize == 0 {
log.Infof("Received %d UTXO set chunks so far, totaling in %d UTXOs",
receivedChunkCount, receivedUTXOCount)
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
if err != nil {
return false, err
}
}
case *appmessage.MsgDonePruningPointUTXOSetChunks:
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
return true, nil
case *appmessage.MsgUnexpectedPruningPoint:
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
return false, nil
default:
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
)
}
}
}
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
if err != nil {
return err
}
if len(hashes) == 0 {
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
// In these cases - GetMissingBlockBodyHashes would return an empty array.
log.Debugf("No missing block body hashes found.")
return nil
}
lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0])
if err != nil {
return err
}
highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1])
if err != nil {
return err
}
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
highestProcessedDAAScore := lowBlockHeader.DAAScore()
// If the IBD is small, we want to update the virtual after each block in order to avoid complications and possible bugs.
updateVirtual, err := flow.Domain().Consensus().IsNearlySynced()
if err != nil {
return err
}
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
var hashesToRequest []*externalapi.DomainHash
if offset+ibdBatchSize < len(hashes) {
hashesToRequest = hashes[offset : offset+ibdBatchSize]
} else {
hashesToRequest = hashes[offset:]
}
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
if err != nil {
return err
}
for _, expectedHash := range hashesToRequest {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
}
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
blockHash := consensushashing.BlockHash(block)
if !expectedHash.Equal(blockHash) {
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
}
err = flow.banIfBlockIsHeaderOnly(block)
if err != nil {
return err
}
err = flow.Domain().Consensus().ValidateAndInsertBlock(block, updateVirtual)
if err != nil {
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
continue
}
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
}
err = flow.OnNewBlock(block)
if err != nil {
return err
}
highestProcessedDAAScore = block.Header.DAAScore()
}
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
}
// We need to resolve virtual only if it wasn't updated while syncing block bodies
if !updateVirtual {
err := flow.resolveVirtual(highestProcessedDAAScore)
if err != nil {
return err
}
}
return flow.OnNewBlockTemplate()
}
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
if len(block.Transactions) == 0 {
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
consensushashing.BlockHash(block))
}
return nil
}
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
err := flow.Domain().Consensus().ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) {
var percents int
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
percents = 100
} else {
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
}
if percents < 0 {
percents = 0
} else if percents > 100 {
percents = 100
}
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
})
if err != nil {
return err
}
log.Infof("Resolved virtual")
return nil
}

View File

@ -1,45 +0,0 @@
package blockrelay
type ibdProgressReporter struct {
lowDAAScore uint64
highDAAScore uint64
objectName string
totalDAAScoreDifference uint64
lastReportedProgressPercent int
processed int
}
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
if highDAAScore <= lowDAAScore {
// Avoid a zero or negative diff
highDAAScore = lowDAAScore + 1
}
return &ibdProgressReporter{
lowDAAScore: lowDAAScore,
highDAAScore: highDAAScore,
objectName: objectName,
totalDAAScoreDifference: highDAAScore - lowDAAScore,
lastReportedProgressPercent: 0,
processed: 0,
}
}
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
ipr.processed += processedDelta
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
if highestProcessedDAAScore > ipr.highDAAScore {
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
}
relativeDAAScore := uint64(0)
if highestProcessedDAAScore > ipr.lowDAAScore {
// Avoid a negative diff
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
}
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
if progressPercent > ipr.lastReportedProgressPercent {
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
ipr.lastReportedProgressPercent = progressPercent
}
}

View File

@ -1,444 +0,0 @@
package blockrelay
import (
"fmt"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/pkg/errors"
"time"
)
func (flow *handleIBDFlow) ibdWithHeadersProof(
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
err := flow.Domain().InitStagingConsensusWithoutGenesis()
if err != nil {
return err
}
err = flow.downloadHeadersAndPruningUTXOSet(syncerHeaderSelectedTipHash, relayBlockHash, highBlockDAAScore)
if err != nil {
if !flow.IsRecoverableError(err) {
return err
}
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus. (%s)", flow.peer, err)
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
if deleteStagingConsensusErr != nil {
return deleteStagingConsensusErr
}
return err
}
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
err = flow.Domain().CommitStagingConsensus()
if err != nil {
return err
}
err = flow.OnPruningPointUTXOSetOverride()
if err != nil {
return err
}
return nil
}
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
relayBlock *externalapi.DomainBlock,
highestKnownSyncerChainHash *externalapi.DomainHash) (shouldDownload, shouldSync bool, err error) {
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
if highestKnownSyncerChainHash != nil {
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highestKnownSyncerChainHash)
if err != nil {
return false, false, err
}
highestSharedBlockFound = blockInfo.HasBody()
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return false, false, err
}
isPruningPointInSharedBlockChain, err = flow.Domain().Consensus().IsInSelectedParentChainOf(
pruningPoint, highestKnownSyncerChainHash)
if err != nil {
return false, false, err
}
}
// Note: in the case where `highestSharedBlockFound == true && isPruningPointInSharedBlockChain == false`
// we might have here info which is relevant to finality conflict decisions. This should be taken into
// account when we improve this aspect.
if !highestSharedBlockFound || !isPruningPointInSharedBlockChain {
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock)
if err != nil {
return false, false, err
}
if hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore {
return true, true, nil
}
if highestKnownSyncerChainHash == nil {
log.Infof("Stopping IBD since IBD from this node will cause a finality conflict")
return false, false, nil
}
return false, true, nil
}
return false, true, nil
}
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
virtualSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(virtualSelectedParent)
if err != nil {
return false, err
}
if relayBlock.Header.BlueScore() < virtualSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
return false, nil
}
return relayBlock.Header.BlueWork().Cmp(virtualSelectedTipInfo.BlueWork) > 0, nil
}
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
log.Infof("Downloading the pruning point proof from %s", flow.peer)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointProof())
if err != nil {
return nil, err
}
message, err := flow.incomingRoute.DequeueWithTimeout(10 * time.Minute)
if err != nil {
return nil, err
}
pruningPointProofMessage, ok := message.(*appmessage.MsgPruningPointProof)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdPruningPointProof, message.Command())
}
pruningPointProof := appmessage.MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage)
err = flow.Domain().Consensus().ValidatePruningPointProof(pruningPointProof)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
return nil, protocolerrors.Wrapf(true, err, "pruning point proof validation failed")
}
return nil, err
}
err = flow.Domain().StagingConsensus().ApplyPruningPointProof(pruningPointProof)
if err != nil {
return nil, err
}
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
}
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash,
highBlockDAAScore uint64) error {
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
if err != nil {
return err
}
err = flow.syncPruningPointsAndPruningPointAnticone(proofPruningPoint)
if err != nil {
return err
}
// TODO: Remove this condition once there's more proper way to check finality violation
// in the headers proof.
if proofPruningPoint.Equal(flow.Config().NetParams().GenesisHash) {
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
}
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(),
syncerHeaderSelectedTipHash, proofPruningPoint, relayBlockHash, highBlockDAAScore)
if err != nil {
return err
}
log.Infof("Headers downloaded from peer %s", flow.peer)
relayBlockInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(relayBlockHash)
if err != nil {
return err
}
if !relayBlockInfo.Exists {
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
}
err = flow.validatePruningPointFutureHeaderTimestamps()
if err != nil {
return err
}
log.Debugf("Syncing the current pruning point UTXO set")
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet(flow.Domain().StagingConsensus(), proofPruningPoint)
if err != nil {
return err
}
if !syncedPruningPointUTXOSetSuccessfully {
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
return nil
}
log.Debugf("Finished syncing the current pruning point UTXO set")
return nil
}
func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error {
log.Infof("Downloading the past pruning points and the pruning point anticone from %s", flow.peer)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone())
if err != nil {
return err
}
err = flow.validateAndInsertPruningPoints(proofPruningPoint)
if err != nil {
return err
}
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
msgTrustedData, ok := message.(*appmessage.MsgTrustedData)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdTrustedData, message.Command())
}
pruningPointWithMetaData, done, err := flow.receiveBlockWithTrustedData()
if err != nil {
return err
}
if done {
return protocolerrors.Errorf(true, "got `done` message before receiving the pruning point")
}
if !pruningPointWithMetaData.Block.Header.BlockHash().Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "first block with trusted data is not the pruning point")
}
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData, msgTrustedData)
if err != nil {
return err
}
i := 0
for ; ; i++ {
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
if err != nil {
return err
}
if done {
break
}
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData, msgTrustedData)
if err != nil {
return err
}
// We're using i+2 because we want to check if the next block will belong to the next batch, but we already downloaded
// the pruning point outside the loop so we use i+2 instead of i+1.
if (i+2)%ibdBatchSize == 0 {
log.Infof("Downloaded %d blocks from the pruning point anticone", i+1)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextPruningPointAndItsAnticoneBlocks())
if err != nil {
return err
}
}
}
log.Infof("Finished downloading pruning point and its anticone from %s. Total blocks downloaded: %d", flow.peer, i+1)
return nil
}
func (flow *handleIBDFlow) processBlockWithTrustedData(
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedDataV4, data *appmessage.MsgTrustedData) error {
blockWithTrustedData := &externalapi.BlockWithTrustedData{
Block: appmessage.MsgBlockToDomainBlock(block.Block),
DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(block.DAAWindowIndices)),
GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(block.GHOSTDAGDataIndices)),
}
for _, index := range block.DAAWindowIndices {
blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, appmessage.TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(data.DAAWindow[index]))
}
for _, index := range block.GHOSTDAGDataIndices {
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
}
err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
return protocolerrors.Wrapf(true, err, "failed validating block with trusted data")
}
return err
}
return nil
}
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch downCastedMessage := message.(type) {
case *appmessage.MsgBlockWithTrustedDataV4:
return downCastedMessage, false, nil
case *appmessage.MsgDoneBlocksWithTrustedData:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
(&appmessage.MsgBlockWithTrustedData{}).Command(),
(&appmessage.MsgDoneBlocksWithTrustedData{}).Command(),
downCastedMessage.Command())
}
}
func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgPruningPoints, ok := message.(*appmessage.MsgPruningPoints)
if !ok {
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdPruningPoints, message.Command())
}
return msgPruningPoints, nil
}
func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error {
currentPruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return err
}
if currentPruningPoint.Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "the proposed pruning point is the same as the current pruning point")
}
pruningPoints, err := flow.receivePruningPoints()
if err != nil {
return err
}
headers := make([]externalapi.BlockHeader, len(pruningPoints.Headers))
for i, header := range pruningPoints.Headers {
headers[i] = appmessage.BlockHeaderToDomainBlockHeader(header)
}
arePruningPointsViolatingFinality, err := flow.Domain().Consensus().ArePruningPointsViolatingFinality(headers)
if err != nil {
return err
}
if arePruningPointsViolatingFinality {
// TODO: Find a better way to deal with finality conflicts.
return protocolerrors.Errorf(false, "pruning points are violating finality")
}
lastPruningPoint := consensushashing.HeaderHash(headers[len(headers)-1])
if !lastPruningPoint.Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "the proof pruning point is not equal to the last pruning "+
"point in the list")
}
err = flow.Domain().StagingConsensus().ImportPruningPoints(headers)
if err != nil {
return err
}
return nil
}
func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus,
pruningPoint *externalapi.DomainHash) (bool, error) {
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint)
isValid, err := flow.Domain().StagingConsensus().IsValidPruningPoint(pruningPoint)
if err != nil {
return false, err
}
if !isValid {
return false, protocolerrors.Errorf(true, "invalid pruning point %s", pruningPoint)
}
log.Info("Fetching the pruning point UTXO set")
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
if err != nil {
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
return false, err
}
if !isSuccessful {
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
return false, nil
}
log.Info("Fetched the new pruning point UTXO set")
return true, nil
}
func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
defer func() {
err := flow.Domain().StagingConsensus().ClearImportedPruningPointData()
if err != nil {
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
}
}()
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSet(pruningPointHash))
if err != nil {
return false, err
}
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(consensus, pruningPointHash)
if err != nil {
return false, err
}
if !receivedAll {
return false, nil
}
err = flow.Domain().StagingConsensus().ValidateAndInsertImportedPruningPoint(pruningPointHash)
if err != nil {
// TODO: Find a better way to deal with finality conflicts.
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
return false, nil
}
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
}
return true, nil
}

View File

@ -1,209 +0,0 @@
package v5
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/ping"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/rejects"
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type protocolManager interface {
RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
messageTypes []appmessage.MessageCommand, isStopping *uint32,
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
Context() *flowcontext.FlowContext
}
// Register is used in order to register all the protocol flows to the given router.
func Register(m protocolManager, router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*common.Flow) {
flows = registerAddressFlows(m, router, isStopping, errChan)
flows = append(flows, registerBlockRelayFlows(m, router, isStopping, errChan)...)
flows = append(flows, registerPingFlows(m, router, isStopping, errChan)...)
flows = append(flows, registerTransactionRelayFlow(m, router, isStopping, errChan)...)
flows = append(flows, registerRejectsFlow(m, router, isStopping, errChan)...)
return flows
}
func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return addressexchange.ReceiveAddresses(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.SendVirtualSelectedParentInv(m.Context(), outgoingRoute, peer)
}),
m.RegisterFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator,
},
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRelayInvs(m.Context(), incomingRoute,
outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBD", router, []appmessage.MessageCommand{
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
appmessage.CmdBlockHeaders, appmessage.CmdIBDBlockLocatorHighestHash, appmessage.CmdBlockWithTrustedDataV4,
appmessage.CmdDoneBlocksWithTrustedData, appmessage.CmdIBDBlockLocatorHighestHashNotFound,
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
appmessage.CmdPruningPointProof,
appmessage.CmdTrustedData,
appmessage.CmdIBDChainBlockLocator,
},
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBD(m.Context(), incomingRoute,
outgoingRoute, peer)
},
),
m.RegisterFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRelayBlockRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleRequestBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestBlockLocator(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestHeaders", router,
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestHeaders(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBDBlockRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBDBlockRequests(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestPruningPointUTXOSet", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSet,
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestPruningPointUTXOSet(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone, appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBDBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBDBlockLocator(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleRequestIBDChainBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdRequestIBDChainBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestIBDChainBlockLocator(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestAnticone", router,
[]appmessage.MessageCommand{appmessage.CmdRequestAnticone}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestAnticone(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandlePruningPointProofRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandlePruningPointProofRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerPingFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return ping.ReceivePings(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return ping.SendPings(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerTransactionRelayFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return transactionrelay.HandleRelayedTransactions(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestTransactions", router,
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return transactionrelay.HandleRequestedTransactions(m.Context(), incomingRoute, outgoingRoute)
},
),
}
}
func registerRejectsFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("HandleRejects", router,
[]appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return rejects.HandleRejects(m.Context(), incomingRoute, outgoingRoute)
},
),
}
}

View File

@ -5,8 +5,6 @@ import (
"sync" "sync"
"sync/atomic" "sync/atomic"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
@ -73,16 +71,11 @@ func (m *Manager) AddBlock(block *externalapi.DomainBlock) error {
return m.context.AddBlock(block) return m.context.AddBlock(block)
} }
// Context returns the manager's flow context func (m *Manager) runFlows(flows []*flow, peer *peerpkg.Peer, errChan <-chan error, flowsWaitGroup *sync.WaitGroup) error {
func (m *Manager) Context() *flowcontext.FlowContext {
return m.context
}
func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-chan error, flowsWaitGroup *sync.WaitGroup) error {
flowsWaitGroup.Add(len(flows)) flowsWaitGroup.Add(len(flows))
for _, flow := range flows { for _, flow := range flows {
executeFunc := flow.ExecuteFunc // extract to new variable so that it's not overwritten executeFunc := flow.executeFunc // extract to new variable so that it's not overwritten
spawn(fmt.Sprintf("flow-%s", flow.Name), func() { spawn(fmt.Sprintf("flow-%s", flow.name), func() {
executeFunc(peer) executeFunc(peer)
flowsWaitGroup.Done() flowsWaitGroup.Done()
}) })
@ -91,9 +84,9 @@ func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-c
return <-errChan return <-errChan
} }
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplate handler // SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
func (m *Manager) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler flowcontext.OnNewBlockTemplateHandler) { func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowcontext.OnBlockAddedToDAGHandler) {
m.context.SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler) m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)
} }
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler // SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
@ -106,6 +99,12 @@ func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMemp
m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler) m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler)
} }
// ShouldMine returns whether it's ok to use block template from this node
// for mining purposes.
func (m *Manager) ShouldMine() (bool, error) {
return m.context.ShouldMine()
}
// IsIBDRunning returns true if IBD is currently marked as running // IsIBDRunning returns true if IBD is currently marked as running
func (m *Manager) IsIBDRunning() bool { func (m *Manager) IsIBDRunning() bool {
return m.context.IsIBDRunning() return m.context.IsIBDRunning()

Some files were not shown because too many files have changed in this diff Show More