mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-09-13 13:00:10 +00:00
Compare commits
158 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
4bb5bf25d3 | ||
![]() |
25c2dd8670 | ||
![]() |
c93100ccd0 | ||
![]() |
03cc7dfc19 | ||
![]() |
ed745a9acb | ||
![]() |
c23c1d141c | ||
![]() |
352d261fd6 | ||
![]() |
43b9523919 | ||
![]() |
6085d1fc84 | ||
![]() |
1e9ddc42d0 | ||
![]() |
48a142e12f | ||
![]() |
86b89065cf | ||
![]() |
f41dc7fa0b | ||
![]() |
6b38bf7069 | ||
![]() |
d2453f8e7b | ||
![]() |
629faa8436 | ||
![]() |
91e6c6b74b | ||
![]() |
0819244ba1 | ||
![]() |
a0149cd8d0 | ||
![]() |
5a3b8a0066 | ||
![]() |
8e71f79f98 | ||
![]() |
346341a709 | ||
![]() |
8c881aea39 | ||
![]() |
40ec440dcf | ||
![]() |
88bdcb43bc | ||
![]() |
9d1e44673f | ||
![]() |
387fade044 | ||
![]() |
c417c8b525 | ||
![]() |
bd1420220a | ||
![]() |
5640ec4020 | ||
![]() |
1c0887ca60 | ||
![]() |
7be3f41aa7 | ||
![]() |
26c4c73624 | ||
![]() |
880d917e58 | ||
![]() |
3c53c6d8cd | ||
![]() |
3c4b973090 | ||
![]() |
8aee8f81c5 | ||
![]() |
ec3441e63f | ||
![]() |
e3ba1ca07e | ||
![]() |
27fdbd9c88 | ||
![]() |
377d9aaaeb | ||
![]() |
beee947dda | ||
![]() |
d4a27bf1c1 | ||
![]() |
eec6eb9669 | ||
![]() |
d5c10832c2 | ||
![]() |
9fbfba17b6 | ||
![]() |
09d698dd0e | ||
![]() |
ec51c6926a | ||
![]() |
7d44275eb1 | ||
![]() |
a3387a56b3 | ||
![]() |
c2ae03fc89 | ||
![]() |
6c774c966b | ||
![]() |
2d54c9693b | ||
![]() |
d8350d62b0 | ||
![]() |
26c7db251f | ||
![]() |
4d435f2b3a | ||
![]() |
067688f549 | ||
![]() |
3a3fa0d3f0 | ||
![]() |
cf4073b773 | ||
![]() |
6a5e7c9e3f | ||
![]() |
7e9b5b9010 | ||
![]() |
953838e0d8 | ||
![]() |
a1dcb34c29 | ||
![]() |
23764e1b0b | ||
![]() |
0838cc8e32 | ||
![]() |
9f51330f38 | ||
![]() |
f6d46fd23f | ||
![]() |
2a7e03e232 | ||
![]() |
3286a7d010 | ||
![]() |
aabbc741d7 | ||
![]() |
20b7ab89f9 | ||
![]() |
10f1e7e3f4 | ||
![]() |
d941c73701 | ||
![]() |
3f80638c86 | ||
![]() |
266ec6c270 | ||
![]() |
9ee409afaa | ||
![]() |
715cb3b1ac | ||
![]() |
eb693c4a86 | ||
![]() |
7a61c637b0 | ||
![]() |
c7bd84ef9d | ||
![]() |
b26b9f6c4b | ||
![]() |
1c9bb54cc2 | ||
![]() |
b9093d59eb | ||
![]() |
18d000f625 | ||
![]() |
c5aade7e7f | ||
![]() |
d4b741fd7c | ||
![]() |
74a4f927e9 | ||
![]() |
847aafc91f | ||
![]() |
c87e541570 | ||
![]() |
2ea1c4f922 | ||
![]() |
5e9c28b77b | ||
![]() |
d957a6d93a | ||
![]() |
b2648aa5bd | ||
![]() |
3908f274ae | ||
![]() |
fa7ea121ff | ||
![]() |
24848da895 | ||
![]() |
b200b77541 | ||
![]() |
d50ad0667c | ||
![]() |
5cea285960 | ||
![]() |
7eb5085f6b | ||
![]() |
491e3569d2 | ||
![]() |
440aea19b0 | ||
![]() |
968d47c3e6 | ||
![]() |
052193865e | ||
![]() |
85febcb551 | ||
![]() |
a4d9fa10bf | ||
![]() |
cd5fd86ad3 | ||
![]() |
b84d6fed2c | ||
![]() |
24c94b38be | ||
![]() |
4dd7113dc5 | ||
![]() |
48c7fa0104 | ||
![]() |
4d0cf2169a | ||
![]() |
5f7cc079e9 | ||
![]() |
016ddfdfce | ||
![]() |
5d24e2afbc | ||
![]() |
8735da045f | ||
![]() |
c839337425 | ||
![]() |
7390651072 | ||
![]() |
52fbeedf20 | ||
![]() |
1660cf0cf1 | ||
![]() |
2b5202be7a | ||
![]() |
9ffbb15160 | ||
![]() |
540b0d3a22 | ||
![]() |
8d5faee53a | ||
![]() |
6e2fd0633b | ||
![]() |
beb038c815 | ||
![]() |
35a959b56f | ||
![]() |
57c6118be8 | ||
![]() |
723aebbec9 | ||
![]() |
2b395e34b1 | ||
![]() |
ada559f007 | ||
![]() |
357e8ce73c | ||
![]() |
6725902663 | ||
![]() |
99bb21c512 | ||
![]() |
a4669f3fb5 | ||
![]() |
e8f40bdff9 | ||
![]() |
68a407ea37 | ||
![]() |
80879cabe1 | ||
![]() |
71afc62298 | ||
![]() |
ca5c8549b9 | ||
![]() |
ab73def07a | ||
![]() |
3f840233d8 | ||
![]() |
90d9edb8e5 | ||
![]() |
b9b360bce4 | ||
![]() |
27654961f9 | ||
![]() |
d45af760d8 | ||
![]() |
95fa045297 | ||
![]() |
cb65dae63d | ||
![]() |
21b82d7efc | ||
![]() |
63c6d7443b | ||
![]() |
753f4a2ec1 | ||
![]() |
ed667f7e54 | ||
![]() |
c4a034eb43 | ||
![]() |
2eca0f0b5f | ||
![]() |
58d627e05a | ||
![]() |
639183ba0e | ||
![]() |
9fa08442cf | ||
![]() |
0dd50394ec |
23
.github/workflows/deploy.yaml
vendored
23
.github/workflows/deploy.yaml
vendored
@ -1,7 +1,7 @@
|
||||
name: Build and upload assets
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@ -9,7 +9,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest, windows-latest, macos-latest ]
|
||||
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||
name: Building, ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Fix CRLF on Windows
|
||||
@ -17,18 +17,12 @@ jobs:
|
||||
run: git config --global core.autocrlf false
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Increase the pagefile size on Windows to aviod running out of memory
|
||||
- name: Increase pagefile size on Windows
|
||||
if: runner.os == 'Windows'
|
||||
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
||||
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.21
|
||||
|
||||
- name: Build on Linux
|
||||
if: runner.os == 'Linux'
|
||||
@ -36,7 +30,7 @@ jobs:
|
||||
# `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
|
||||
# `-s -w` strips the binary to produce smaller size binaries
|
||||
run: |
|
||||
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ . ./cmd/...
|
||||
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./cmd/...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
@ -47,7 +41,7 @@ jobs:
|
||||
if: runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o bin/ . ./cmd/...
|
||||
go build -v -ldflags="-s -w" -o bin/ ./cmd/...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
powershell "Compress-Archive bin/* \"${archive}\""
|
||||
@ -57,14 +51,13 @@ jobs:
|
||||
- name: Build on MacOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o ./bin/ . ./cmd/...
|
||||
go build -v -ldflags="-s -w" -o ./bin/ ./cmd/...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Upload release asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
|
8
.github/workflows/race.yaml
vendored
8
.github/workflows/race.yaml
vendored
@ -11,18 +11,18 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
branch: [ master, latest ]
|
||||
branch: [master, latest]
|
||||
name: Race detection on ${{ matrix.branch }}
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.23
|
||||
|
||||
- name: Set scheduled branch name
|
||||
shell: bash
|
||||
|
28
.github/workflows/tests.yaml
vendored
28
.github/workflows/tests.yaml
vendored
@ -8,22 +8,20 @@ on:
|
||||
types: [opened, synchronize, edited]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest, macos-latest ]
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
name: Tests, ${{ matrix.os }}
|
||||
steps:
|
||||
|
||||
- name: Fix CRLF on Windows
|
||||
if: runner.os == 'Windows'
|
||||
run: git config --global core.autocrlf false
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Increase the pagefile size on Windows to aviod running out of memory
|
||||
- name: Increase pagefile size on Windows
|
||||
@ -31,14 +29,13 @@ jobs:
|
||||
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
go-version: 1.23
|
||||
|
||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||
- name: Go Cache
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@ -49,19 +46,17 @@ jobs:
|
||||
shell: bash
|
||||
run: ./build_and_test.sh -v
|
||||
|
||||
|
||||
stability-test-fast:
|
||||
runs-on: ubuntu-latest
|
||||
name: Fast stability tests, ${{ github.head_ref }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.23
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@ -75,18 +70,17 @@ jobs:
|
||||
working-directory: stability-tests
|
||||
run: ./install_and_test.sh
|
||||
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
name: Produce code coverage
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.23
|
||||
|
||||
- name: Delete the stability tests from coverage
|
||||
run: rm -r stability-tests
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -53,6 +53,7 @@ _testmain.go
|
||||
debug
|
||||
debug.test
|
||||
__debug_bin
|
||||
*__debug_*
|
||||
|
||||
# CI
|
||||
version.txt
|
||||
|
43
CODE_OF_CONDUCT.md
Normal file
43
CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,43 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainers on this [Google form][gform]. The project maintainers will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project maintainers are obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[gform]: https://forms.gle/dnKXMJL7VxdUjt3x5
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
16
README.md
16
README.md
@ -1,13 +1,15 @@
|
||||
# DEPRECATED
|
||||
|
||||
Kaspad
|
||||
====
|
||||
The full node reference implementation was [rewritten in Rust](https://github.com/kaspanet/rusty-kaspa), as a result, the Go implementation is now deprecated.
|
||||
|
||||
PLEASE NOTE: Any pull requests or issues that will be opened in this repository will be closed without treatment, except for issues or pull requests related to the kaspawallet, which remains maintained. In any other case, please use the [Rust implementation](https://github.com/kaspanet/rusty-kaspa) instead.
|
||||
|
||||
# Kaspad
|
||||
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad)
|
||||
|
||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
||||
|
||||
This project is currently under active development and is in Beta state.
|
||||
Kaspad was the reference full node Kaspa implementation written in Go (golang).
|
||||
|
||||
## What is kaspa
|
||||
|
||||
@ -15,7 +17,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.23 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
@ -42,7 +44,6 @@ $ go install . ./cmd/...
|
||||
not already add the bin directory to your system path during Go installation,
|
||||
you are encouraged to do so now.
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
Kaspad has several configuration options available to tweak how it runs, but all
|
||||
@ -53,6 +54,7 @@ $ kaspad
|
||||
```
|
||||
|
||||
## Discord
|
||||
|
||||
Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
|
||||
|
||||
## Issue Tracker
|
||||
|
@ -6,7 +6,7 @@ supported kaspa messages to and from the appmessage. This package does not deal
|
||||
with the specifics of message handling such as what to do when a message is
|
||||
received. This provides the caller with a high level of flexibility.
|
||||
|
||||
Kaspa Message Overview
|
||||
# Kaspa Message Overview
|
||||
|
||||
The kaspa protocol consists of exchanging messages between peers. Each
|
||||
message is preceded by a header which identifies information about it such as
|
||||
@ -22,7 +22,7 @@ messages, all of the details of marshalling and unmarshalling to and from the
|
||||
appmessage using kaspa encoding are handled so the caller doesn't have to concern
|
||||
themselves with the specifics.
|
||||
|
||||
Message Interaction
|
||||
# Message Interaction
|
||||
|
||||
The following provides a quick summary of how the kaspa messages are intended
|
||||
to interact with one another. As stated above, these interactions are not
|
||||
@ -45,13 +45,13 @@ interactions in no particular order.
|
||||
notfound message (MsgNotFound)
|
||||
ping message (MsgPing) pong message (MsgPong)
|
||||
|
||||
Common Parameters
|
||||
# Common Parameters
|
||||
|
||||
There are several common parameters that arise when using this package to read
|
||||
and write kaspa messages. The following sections provide a quick overview of
|
||||
these parameters so the next sections can build on them.
|
||||
|
||||
Protocol Version
|
||||
# Protocol Version
|
||||
|
||||
The protocol version should be negotiated with the remote peer at a higher
|
||||
level than this package via the version (MsgVersion) message exchange, however,
|
||||
@ -60,18 +60,18 @@ latest protocol version this package supports and is typically the value to use
|
||||
for all outbound connections before a potentially lower protocol version is
|
||||
negotiated.
|
||||
|
||||
Kaspa Network
|
||||
# Kaspa Network
|
||||
|
||||
The kaspa network is a magic number which is used to identify the start of a
|
||||
message and which kaspa network the message applies to. This package provides
|
||||
the following constants:
|
||||
|
||||
appmessage.Mainnet
|
||||
appmessage.Testnet (Test network)
|
||||
appmessage.Simnet (Simulation test network)
|
||||
appmessage.Devnet (Development network)
|
||||
appmessage.Mainnet
|
||||
appmessage.Testnet (Test network)
|
||||
appmessage.Simnet (Simulation test network)
|
||||
appmessage.Devnet (Development network)
|
||||
|
||||
Determining Message Type
|
||||
# Determining Message Type
|
||||
|
||||
As discussed in the kaspa message overview section, this package reads
|
||||
and writes kaspa messages using a generic interface named Message. In
|
||||
@ -89,7 +89,7 @@ switch or type assertion. An example of a type switch follows:
|
||||
fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount)
|
||||
}
|
||||
|
||||
Reading Messages
|
||||
# Reading Messages
|
||||
|
||||
In order to unmarshall kaspa messages from the appmessage, use the ReadMessage
|
||||
function. It accepts any io.Reader, but typically this will be a net.Conn to
|
||||
@ -104,7 +104,7 @@ a remote node running a kaspa peer. Example syntax is:
|
||||
// Log and handle the error
|
||||
}
|
||||
|
||||
Writing Messages
|
||||
# Writing Messages
|
||||
|
||||
In order to marshall kaspa messages to the appmessage, use the WriteMessage
|
||||
function. It accepts any io.Writer, but typically this will be a net.Conn to
|
||||
@ -122,7 +122,7 @@ from a remote peer is:
|
||||
// Log and handle the error
|
||||
}
|
||||
|
||||
Errors
|
||||
# Errors
|
||||
|
||||
Errors returned by this package are either the raw errors provided by underlying
|
||||
calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and
|
||||
|
@ -2,9 +2,10 @@ package appmessage
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/pkg/errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
@ -213,13 +214,14 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
||||
}
|
||||
|
||||
return &externalapi.DomainTransaction{
|
||||
Version: rpcTransaction.Version,
|
||||
Inputs: inputs,
|
||||
Outputs: outputs,
|
||||
LockTime: rpcTransaction.LockTime,
|
||||
SubnetworkID: *subnetworkID,
|
||||
Gas: rpcTransaction.LockTime,
|
||||
Payload: payload,
|
||||
Version: rpcTransaction.Version,
|
||||
Inputs: inputs,
|
||||
Outputs: outputs,
|
||||
LockTime: rpcTransaction.LockTime,
|
||||
SubnetworkID: *subnetworkID,
|
||||
Gas: rpcTransaction.Gas,
|
||||
MassCommitment: rpcTransaction.Mass,
|
||||
Payload: payload,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -286,7 +288,8 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
||||
Outputs: outputs,
|
||||
LockTime: transaction.LockTime,
|
||||
SubnetworkID: subnetworkID,
|
||||
Gas: transaction.LockTime,
|
||||
Gas: transaction.Gas,
|
||||
Mass: transaction.MassCommitment,
|
||||
Payload: payload,
|
||||
}
|
||||
}
|
||||
|
@ -156,6 +156,17 @@ const (
|
||||
CmdVirtualDaaScoreChangedNotificationMessage
|
||||
CmdGetBalancesByAddressesRequestMessage
|
||||
CmdGetBalancesByAddressesResponseMessage
|
||||
CmdNotifyNewBlockTemplateRequestMessage
|
||||
CmdNotifyNewBlockTemplateResponseMessage
|
||||
CmdNewBlockTemplateNotificationMessage
|
||||
CmdGetMempoolEntriesByAddressesRequestMessage
|
||||
CmdGetMempoolEntriesByAddressesResponseMessage
|
||||
CmdGetCoinSupplyRequestMessage
|
||||
CmdGetCoinSupplyResponseMessage
|
||||
CmdGetFeeEstimateRequestMessage
|
||||
CmdGetFeeEstimateResponseMessage
|
||||
CmdSubmitTransactionReplacementRequestMessage
|
||||
CmdSubmitTransactionReplacementResponseMessage
|
||||
)
|
||||
|
||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||
@ -286,6 +297,17 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
|
||||
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
|
||||
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
|
||||
CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest",
|
||||
CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse",
|
||||
CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification",
|
||||
CmdGetMempoolEntriesByAddressesRequestMessage: "GetMempoolEntriesByAddressesRequest",
|
||||
CmdGetMempoolEntriesByAddressesResponseMessage: "GetMempoolEntriesByAddressesResponse",
|
||||
CmdGetCoinSupplyRequestMessage: "GetCoinSupplyRequest",
|
||||
CmdGetCoinSupplyResponseMessage: "GetCoinSupplyResponse",
|
||||
CmdGetFeeEstimateRequestMessage: "GetFeeEstimateRequest",
|
||||
CmdGetFeeEstimateResponseMessage: "GetFeeEstimateResponse",
|
||||
CmdSubmitTransactionReplacementRequestMessage: "SubmitTransactionReplacementRequest",
|
||||
CmdSubmitTransactionReplacementResponseMessage: "SubmitTransactionReplacementResponse",
|
||||
}
|
||||
|
||||
// Message is an interface that describes a kaspa message. A type that
|
||||
|
@ -132,7 +132,7 @@ func TestConvertToPartial(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//blockOne is the first block in the mainnet block DAG.
|
||||
// blockOne is the first block in the mainnet block DAG.
|
||||
var blockOne = MsgBlock{
|
||||
Header: MsgBlockHeader{
|
||||
Version: 0,
|
||||
|
@ -133,8 +133,8 @@ func TestTx(t *testing.T) {
|
||||
|
||||
// TestTxHash tests the ability to generate the hash of a transaction accurately.
|
||||
func TestTxHashAndID(t *testing.T) {
|
||||
txHash1Str := "93663e597f6c968d32d229002f76408edf30d6a0151ff679fc729812d8cb2acc"
|
||||
txID1Str := "24079c6d2bdf602fc389cc307349054937744a9c8dc0f07c023e6af0e949a4e7"
|
||||
txHash1Str := "b06f8b650115b5cf4d59499e10764a9312742930cb43c9b4ff6495d76f332ed7"
|
||||
txID1Str := "e20225c3d065ee41743607ee627db44d01ef396dc9779b05b2caf55bac50e12d"
|
||||
wantTxID1, err := transactionid.FromString(txID1Str)
|
||||
if err != nil {
|
||||
t.Fatalf("NewTxIDFromStr: %v", err)
|
||||
@ -185,7 +185,7 @@ func TestTxHashAndID(t *testing.T) {
|
||||
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
|
||||
}
|
||||
|
||||
hash2Str := "8dafd1bec24527d8e3b443ceb0a3b92fffc0d60026317f890b2faf5e9afc177a"
|
||||
hash2Str := "fa16a8ce88d52ca1ff45187bbba0d33044e9f5fe309e8d0b22d4812dcf1782b7"
|
||||
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
|
||||
if err != nil {
|
||||
t.Errorf("NewTxIDFromStr: %v", err)
|
||||
|
47
app/appmessage/rpc_fee_estimate.go
Normal file
47
app/appmessage/rpc_fee_estimate.go
Normal file
@ -0,0 +1,47 @@
|
||||
package appmessage
|
||||
|
||||
// GetFeeEstimateRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetFeeEstimateRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetFeeEstimateRequestMessage) Command() MessageCommand {
|
||||
return CmdGetFeeEstimateRequestMessage
|
||||
}
|
||||
|
||||
// NewGetFeeEstimateRequestMessage returns a instance of the message
|
||||
func NewGetFeeEstimateRequestMessage() *GetFeeEstimateRequestMessage {
|
||||
return &GetFeeEstimateRequestMessage{}
|
||||
}
|
||||
|
||||
type RPCFeeRateBucket struct {
|
||||
Feerate float64
|
||||
EstimatedSeconds float64
|
||||
}
|
||||
|
||||
type RPCFeeEstimate struct {
|
||||
PriorityBucket RPCFeeRateBucket
|
||||
NormalBuckets []RPCFeeRateBucket
|
||||
LowBuckets []RPCFeeRateBucket
|
||||
}
|
||||
|
||||
// GetCoinSupplyResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetFeeEstimateResponseMessage struct {
|
||||
baseMessage
|
||||
Estimate RPCFeeEstimate
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetFeeEstimateResponseMessage) Command() MessageCommand {
|
||||
return CmdGetFeeEstimateResponseMessage
|
||||
}
|
||||
|
||||
// NewGetFeeEstimateResponseMessage returns a instance of the message
|
||||
func NewGetFeeEstimateResponseMessage() *GetFeeEstimateResponseMessage {
|
||||
return &GetFeeEstimateResponseMessage{}
|
||||
}
|
@ -5,6 +5,7 @@ package appmessage
|
||||
type GetBlockTemplateRequestMessage struct {
|
||||
baseMessage
|
||||
PayAddress string
|
||||
ExtraData string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -13,9 +14,10 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlockTemplateRequestMessage returns a instance of the message
|
||||
func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
|
||||
func NewGetBlockTemplateRequestMessage(payAddress, extraData string) *GetBlockTemplateRequestMessage {
|
||||
return &GetBlockTemplateRequestMessage{
|
||||
PayAddress: payAddress,
|
||||
ExtraData: extraData,
|
||||
}
|
||||
}
|
||||
|
||||
|
40
app/appmessage/rpc_get_coin_supply.go
Normal file
40
app/appmessage/rpc_get_coin_supply.go
Normal file
@ -0,0 +1,40 @@
|
||||
package appmessage
|
||||
|
||||
// GetCoinSupplyRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetCoinSupplyRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetCoinSupplyRequestMessage) Command() MessageCommand {
|
||||
return CmdGetCoinSupplyRequestMessage
|
||||
}
|
||||
|
||||
// NewGetCoinSupplyRequestMessage returns a instance of the message
|
||||
func NewGetCoinSupplyRequestMessage() *GetCoinSupplyRequestMessage {
|
||||
return &GetCoinSupplyRequestMessage{}
|
||||
}
|
||||
|
||||
// GetCoinSupplyResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetCoinSupplyResponseMessage struct {
|
||||
baseMessage
|
||||
MaxSompi uint64
|
||||
CirculatingSompi uint64
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetCoinSupplyResponseMessage) Command() MessageCommand {
|
||||
return CmdGetCoinSupplyResponseMessage
|
||||
}
|
||||
|
||||
// NewGetCoinSupplyResponseMessage returns a instance of the message
|
||||
func NewGetCoinSupplyResponseMessage(maxSompi uint64, circulatingSompi uint64) *GetCoinSupplyResponseMessage {
|
||||
return &GetCoinSupplyResponseMessage{
|
||||
MaxSompi: maxSompi,
|
||||
CirculatingSompi: circulatingSompi,
|
||||
}
|
||||
}
|
@ -23,6 +23,8 @@ type GetInfoResponseMessage struct {
|
||||
P2PID string
|
||||
MempoolSize uint64
|
||||
ServerVersion string
|
||||
IsUtxoIndexed bool
|
||||
IsSynced bool
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@ -33,10 +35,12 @@ func (msg *GetInfoResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetInfoResponseMessage returns a instance of the message
|
||||
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string) *GetInfoResponseMessage {
|
||||
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string, isUtxoIndexed bool, isSynced bool) *GetInfoResponseMessage {
|
||||
return &GetInfoResponseMessage{
|
||||
P2PID: p2pID,
|
||||
MempoolSize: mempoolSize,
|
||||
ServerVersion: serverVersion,
|
||||
IsUtxoIndexed: isUtxoIndexed,
|
||||
IsSynced: isSynced,
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,8 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetMempoolEntriesRequestMessage struct {
|
||||
baseMessage
|
||||
IncludeOrphanPool bool
|
||||
FilterTransactionPool bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -12,8 +14,11 @@ func (msg *GetMempoolEntriesRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntriesRequestMessage returns a instance of the message
|
||||
func NewGetMempoolEntriesRequestMessage() *GetMempoolEntriesRequestMessage {
|
||||
return &GetMempoolEntriesRequestMessage{}
|
||||
func NewGetMempoolEntriesRequestMessage(includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesRequestMessage {
|
||||
return &GetMempoolEntriesRequestMessage{
|
||||
IncludeOrphanPool: includeOrphanPool,
|
||||
FilterTransactionPool: filterTransactionPool,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMempoolEntriesResponseMessage is an appmessage corresponding to
|
||||
|
52
app/appmessage/rpc_get_mempool_entries_by_addresses.go
Normal file
52
app/appmessage/rpc_get_mempool_entries_by_addresses.go
Normal file
@ -0,0 +1,52 @@
|
||||
package appmessage
|
||||
|
||||
// MempoolEntryByAddress represents MempoolEntries associated with some address
|
||||
type MempoolEntryByAddress struct {
|
||||
Address string
|
||||
Receiving []*MempoolEntry
|
||||
Sending []*MempoolEntry
|
||||
}
|
||||
|
||||
// GetMempoolEntriesByAddressesRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetMempoolEntriesByAddressesRequestMessage struct {
|
||||
baseMessage
|
||||
Addresses []string
|
||||
IncludeOrphanPool bool
|
||||
FilterTransactionPool bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetMempoolEntriesByAddressesRequestMessage) Command() MessageCommand {
|
||||
return CmdGetMempoolEntriesByAddressesRequestMessage
|
||||
}
|
||||
|
||||
// NewGetMempoolEntriesByAddressesRequestMessage returns a instance of the message
|
||||
func NewGetMempoolEntriesByAddressesRequestMessage(addresses []string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesByAddressesRequestMessage {
|
||||
return &GetMempoolEntriesByAddressesRequestMessage{
|
||||
Addresses: addresses,
|
||||
IncludeOrphanPool: includeOrphanPool,
|
||||
FilterTransactionPool: filterTransactionPool,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMempoolEntriesByAddressesResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetMempoolEntriesByAddressesResponseMessage struct {
|
||||
baseMessage
|
||||
Entries []*MempoolEntryByAddress
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetMempoolEntriesByAddressesResponseMessage) Command() MessageCommand {
|
||||
return CmdGetMempoolEntriesByAddressesResponseMessage
|
||||
}
|
||||
|
||||
// NewGetMempoolEntriesByAddressesResponseMessage returns a instance of the message
|
||||
func NewGetMempoolEntriesByAddressesResponseMessage(entries []*MempoolEntryByAddress) *GetMempoolEntriesByAddressesResponseMessage {
|
||||
return &GetMempoolEntriesByAddressesResponseMessage{
|
||||
Entries: entries,
|
||||
}
|
||||
}
|
@ -4,7 +4,9 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetMempoolEntryRequestMessage struct {
|
||||
baseMessage
|
||||
TxID string
|
||||
TxID string
|
||||
IncludeOrphanPool bool
|
||||
FilterTransactionPool bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -13,8 +15,12 @@ func (msg *GetMempoolEntryRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntryRequestMessage returns a instance of the message
|
||||
func NewGetMempoolEntryRequestMessage(txID string) *GetMempoolEntryRequestMessage {
|
||||
return &GetMempoolEntryRequestMessage{TxID: txID}
|
||||
func NewGetMempoolEntryRequestMessage(txID string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntryRequestMessage {
|
||||
return &GetMempoolEntryRequestMessage{
|
||||
TxID: txID,
|
||||
IncludeOrphanPool: includeOrphanPool,
|
||||
FilterTransactionPool: filterTransactionPool,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMempoolEntryResponseMessage is an appmessage corresponding to
|
||||
@ -30,6 +36,7 @@ type GetMempoolEntryResponseMessage struct {
|
||||
type MempoolEntry struct {
|
||||
Fee uint64
|
||||
Transaction *RPCTransaction
|
||||
IsOrphan bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -38,11 +45,12 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntryResponseMessage returns a instance of the message
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction, isOrphan bool) *GetMempoolEntryResponseMessage {
|
||||
return &GetMempoolEntryResponseMessage{
|
||||
Entry: &MempoolEntry{
|
||||
Fee: fee,
|
||||
Transaction: transaction,
|
||||
IsOrphan: isOrphan,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,8 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
|
||||
baseMessage
|
||||
StartHash string
|
||||
StartHash string
|
||||
IncludeAcceptedTransactionIDs bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -13,18 +14,29 @@ func (msg *GetVirtualSelectedParentChainFromBlockRequestMessage) Command() Messa
|
||||
}
|
||||
|
||||
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
|
||||
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *GetVirtualSelectedParentChainFromBlockRequestMessage {
|
||||
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(
|
||||
startHash string, includeAcceptedTransactionIDs bool) *GetVirtualSelectedParentChainFromBlockRequestMessage {
|
||||
|
||||
return &GetVirtualSelectedParentChainFromBlockRequestMessage{
|
||||
StartHash: startHash,
|
||||
StartHash: startHash,
|
||||
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptedTransactionIDs is a part of the GetVirtualSelectedParentChainFromBlockResponseMessage and
|
||||
// VirtualSelectedParentChainChangedNotificationMessage appmessages
|
||||
type AcceptedTransactionIDs struct {
|
||||
AcceptingBlockHash string
|
||||
AcceptedTransactionIDs []string
|
||||
}
|
||||
|
||||
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
|
||||
baseMessage
|
||||
RemovedChainBlockHashes []string
|
||||
AddedChainBlockHashes []string
|
||||
AcceptedTransactionIDs []*AcceptedTransactionIDs
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@ -36,10 +48,11 @@ func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() Mess
|
||||
|
||||
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
|
||||
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes,
|
||||
addedChainBlockHashes []string) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
||||
addedChainBlockHashes []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
||||
|
||||
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
|
||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||
AddedChainBlockHashes: addedChainBlockHashes,
|
||||
AcceptedTransactionIDs: acceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
@ -0,0 +1,50 @@
|
||||
package appmessage
|
||||
|
||||
// NotifyNewBlockTemplateRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyNewBlockTemplateRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyNewBlockTemplateRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyNewBlockTemplateRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyNewBlockTemplateRequestMessage returns an instance of the message
|
||||
func NewNotifyNewBlockTemplateRequestMessage() *NotifyNewBlockTemplateRequestMessage {
|
||||
return &NotifyNewBlockTemplateRequestMessage{}
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplateResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyNewBlockTemplateResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyNewBlockTemplateResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyNewBlockTemplateResponseMessage
|
||||
}
|
||||
|
||||
// NewNotifyNewBlockTemplateResponseMessage returns an instance of the message
|
||||
func NewNotifyNewBlockTemplateResponseMessage() *NotifyNewBlockTemplateResponseMessage {
|
||||
return &NotifyNewBlockTemplateResponseMessage{}
|
||||
}
|
||||
|
||||
// NewBlockTemplateNotificationMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NewBlockTemplateNotificationMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NewBlockTemplateNotificationMessage) Command() MessageCommand {
|
||||
return CmdNewBlockTemplateNotificationMessage
|
||||
}
|
||||
|
||||
// NewNewBlockTemplateNotificationMessage returns an instance of the message
|
||||
func NewNewBlockTemplateNotificationMessage() *NewBlockTemplateNotificationMessage {
|
||||
return &NewBlockTemplateNotificationMessage{}
|
||||
}
|
@ -4,6 +4,7 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
|
||||
baseMessage
|
||||
IncludeAcceptedTransactionIDs bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -11,9 +12,13 @@ func (msg *NotifyVirtualSelectedParentChainChangedRequestMessage) Command() Mess
|
||||
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns a instance of the message
|
||||
func NewNotifyVirtualSelectedParentChainChangedRequestMessage() *NotifyVirtualSelectedParentChainChangedRequestMessage {
|
||||
return &NotifyVirtualSelectedParentChainChangedRequestMessage{}
|
||||
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns an instance of the message
|
||||
func NewNotifyVirtualSelectedParentChainChangedRequestMessage(
|
||||
includeAcceptedTransactionIDs bool) *NotifyVirtualSelectedParentChainChangedRequestMessage {
|
||||
|
||||
return &NotifyVirtualSelectedParentChainChangedRequestMessage{
|
||||
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
||||
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
|
||||
@ -39,6 +44,7 @@ type VirtualSelectedParentChainChangedNotificationMessage struct {
|
||||
baseMessage
|
||||
RemovedChainBlockHashes []string
|
||||
AddedChainBlockHashes []string
|
||||
AcceptedTransactionIDs []*AcceptedTransactionIDs
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -48,10 +54,11 @@ func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() Messa
|
||||
|
||||
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
|
||||
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes,
|
||||
addedChainBlocks []string) *VirtualSelectedParentChainChangedNotificationMessage {
|
||||
addedChainBlocks []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *VirtualSelectedParentChainChangedNotificationMessage {
|
||||
|
||||
return &VirtualSelectedParentChainChangedNotificationMessage{
|
||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||
AddedChainBlockHashes: addedChainBlocks,
|
||||
AcceptedTransactionIDs: acceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
@ -52,6 +52,7 @@ type RPCTransaction struct {
|
||||
SubnetworkID string
|
||||
Gas uint64
|
||||
Payload string
|
||||
Mass uint64
|
||||
VerboseData *RPCTransactionVerboseData
|
||||
}
|
||||
|
||||
|
42
app/appmessage/rpc_submit_transaction_replacement.go
Normal file
42
app/appmessage/rpc_submit_transaction_replacement.go
Normal file
@ -0,0 +1,42 @@
|
||||
package appmessage
|
||||
|
||||
// SubmitTransactionReplacementRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type SubmitTransactionReplacementRequestMessage struct {
|
||||
baseMessage
|
||||
Transaction *RPCTransaction
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *SubmitTransactionReplacementRequestMessage) Command() MessageCommand {
|
||||
return CmdSubmitTransactionReplacementRequestMessage
|
||||
}
|
||||
|
||||
// NewSubmitTransactionReplacementRequestMessage returns a instance of the message
|
||||
func NewSubmitTransactionReplacementRequestMessage(transaction *RPCTransaction) *SubmitTransactionReplacementRequestMessage {
|
||||
return &SubmitTransactionReplacementRequestMessage{
|
||||
Transaction: transaction,
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitTransactionReplacementResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type SubmitTransactionReplacementResponseMessage struct {
|
||||
baseMessage
|
||||
TransactionID string
|
||||
ReplacedTransaction *RPCTransaction
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *SubmitTransactionReplacementResponseMessage) Command() MessageCommand {
|
||||
return CmdSubmitTransactionReplacementResponseMessage
|
||||
}
|
||||
|
||||
// NewSubmitTransactionReplacementResponseMessage returns a instance of the message
|
||||
func NewSubmitTransactionReplacementResponseMessage(transactionID string) *SubmitTransactionReplacementResponseMessage {
|
||||
return &SubmitTransactionReplacementResponseMessage{
|
||||
TransactionID: transactionID,
|
||||
}
|
||||
}
|
@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol"
|
||||
@ -67,6 +69,7 @@ func (a *ComponentManager) Stop() {
|
||||
}
|
||||
|
||||
a.protocolManager.Close()
|
||||
close(a.protocolManager.Context().Domain().ConsensusEventsChannel())
|
||||
|
||||
return
|
||||
}
|
||||
@ -118,7 +121,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, interrupt)
|
||||
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, domain.ConsensusEventsChannel(), interrupt)
|
||||
|
||||
return &ComponentManager{
|
||||
cfg: cfg,
|
||||
@ -139,6 +142,7 @@ func setupRPC(
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
utxoIndex *utxoindex.UTXOIndex,
|
||||
consensusEventsChan chan externalapi.ConsensusEvent,
|
||||
shutDownChan chan<- struct{},
|
||||
) *rpc.Manager {
|
||||
|
||||
@ -150,10 +154,10 @@ func setupRPC(
|
||||
connectionManager,
|
||||
addressManager,
|
||||
utxoIndex,
|
||||
consensusEventsChan,
|
||||
shutDownChan,
|
||||
)
|
||||
protocolManager.SetOnVirtualChange(rpcManager.NotifyVirtualChange)
|
||||
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
|
||||
protocolManager.SetOnNewBlockTemplateHandler(rpcManager.NotifyNewBlockTemplate)
|
||||
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
||||
|
||||
return rpcManager
|
||||
|
@ -16,53 +16,42 @@ import (
|
||||
// OnNewBlock updates the mempool after a new block arrival, and
|
||||
// relays newly unorphaned transactions and possibly rebroadcast
|
||||
// manually added transactions when not in IBD.
|
||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock) error {
|
||||
|
||||
hash := consensushashing.BlockHash(block)
|
||||
log.Debugf("OnNewBlock start for block %s", hash)
|
||||
defer log.Debugf("OnNewBlock end for block %s", hash)
|
||||
log.Tracef("OnNewBlock start for block %s", hash)
|
||||
defer log.Tracef("OnNewBlock end for block %s", hash)
|
||||
|
||||
unorphaningResults, err := f.UnorphanBlocks(block)
|
||||
unorphanedBlocks, err := f.UnorphanBlocks(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
|
||||
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphanedBlocks))
|
||||
|
||||
newBlocks := []*externalapi.DomainBlock{block}
|
||||
newVirtualChangeSets := []*externalapi.VirtualChangeSet{virtualChangeSet}
|
||||
for _, unorphaningResult := range unorphaningResults {
|
||||
newBlocks = append(newBlocks, unorphaningResult.block)
|
||||
newVirtualChangeSets = append(newVirtualChangeSets, unorphaningResult.virtualChangeSet)
|
||||
}
|
||||
newBlocks = append(newBlocks, unorphanedBlocks...)
|
||||
|
||||
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
|
||||
for i, newBlock := range newBlocks {
|
||||
for _, newBlock := range newBlocks {
|
||||
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
|
||||
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
|
||||
|
||||
if f.onBlockAddedToDAGHandler != nil {
|
||||
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
|
||||
virtualChangeSet = newVirtualChangeSets[i]
|
||||
err := f.onBlockAddedToDAGHandler(newBlock, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
|
||||
}
|
||||
|
||||
// OnVirtualChange calls the handler function whenever the virtual block changes.
|
||||
func (f *FlowContext) OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
if f.onVirtualChangeHandler != nil && virtualChangeSet != nil {
|
||||
return f.onVirtualChangeHandler(virtualChangeSet)
|
||||
// OnNewBlockTemplate calls the handler function whenever a new block template is available for miners.
|
||||
func (f *FlowContext) OnNewBlockTemplate() error {
|
||||
// Clear current template cache. Note we call this even if the handler is nil, in order to keep the
|
||||
// state consistent without dependency on external event registration
|
||||
f.Domain().MiningManager().ClearBlockTemplate()
|
||||
if f.onNewBlockTemplateHandler != nil {
|
||||
return f.onNewBlockTemplateHandler()
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -118,14 +107,18 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
||||
return protocolerrors.Errorf(false, "cannot add header only block")
|
||||
}
|
||||
|
||||
virtualChangeSet, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = f.OnNewBlock(block, virtualChangeSet)
|
||||
err = f.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -150,7 +143,7 @@ func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
||||
return false
|
||||
}
|
||||
f.ibdPeer = ibdPeer
|
||||
log.Infof("IBD started")
|
||||
log.Infof("IBD started with peer %s", ibdPeer)
|
||||
|
||||
return true
|
||||
}
|
||||
|
@ -18,12 +18,8 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||
)
|
||||
|
||||
// OnBlockAddedToDAGHandler is a handler function that's triggered
|
||||
// when a block is added to the DAG
|
||||
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
|
||||
// OnVirtualChangeHandler is a handler function that's triggered when the virtual changes
|
||||
type OnVirtualChangeHandler func(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
// OnNewBlockTemplateHandler is a handler function that's triggered when a new block template is available
|
||||
type OnNewBlockTemplateHandler func() error
|
||||
|
||||
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
@ -44,8 +40,7 @@ type FlowContext struct {
|
||||
|
||||
timeStarted int64
|
||||
|
||||
onVirtualChangeHandler OnVirtualChangeHandler
|
||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
||||
onNewBlockTemplateHandler OnNewBlockTemplateHandler
|
||||
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||
|
||||
@ -102,14 +97,14 @@ func (f *FlowContext) ShutdownChan() <-chan struct{} {
|
||||
return f.shutdownChan
|
||||
}
|
||||
|
||||
// SetOnVirtualChangeHandler sets the onVirtualChangeHandler handler
|
||||
func (f *FlowContext) SetOnVirtualChangeHandler(onVirtualChangeHandler OnVirtualChangeHandler) {
|
||||
f.onVirtualChangeHandler = onVirtualChangeHandler
|
||||
// IsNearlySynced returns whether current consensus is considered synced or close to being synced.
|
||||
func (f *FlowContext) IsNearlySynced() (bool, error) {
|
||||
return f.Domain().Consensus().IsNearlySynced()
|
||||
}
|
||||
|
||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
||||
func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
|
||||
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
|
||||
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplateHandler handler
|
||||
func (f *FlowContext) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler OnNewBlockTemplateHandler) {
|
||||
f.onNewBlockTemplateHandler = onNewBlockTemplateHandler
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
||||
|
@ -72,3 +72,10 @@ func (f *FlowContext) Peers() []*peerpkg.Peer {
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// HasPeers returns whether there are currently active peers
|
||||
func (f *FlowContext) HasPeers() bool {
|
||||
f.peersMutex.RLock()
|
||||
defer f.peersMutex.RUnlock()
|
||||
return len(f.peers) > 0
|
||||
}
|
||||
|
@ -15,12 +15,6 @@ import (
|
||||
// on: 2^orphanResolutionRange * PHANTOM K.
|
||||
const maxOrphans = 600
|
||||
|
||||
// UnorphaningResult is the result of unorphaning a block
|
||||
type UnorphaningResult struct {
|
||||
block *externalapi.DomainBlock
|
||||
virtualChangeSet *externalapi.VirtualChangeSet
|
||||
}
|
||||
|
||||
// AddOrphan adds the block to the orphan set
|
||||
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
|
||||
f.orphansMutex.Lock()
|
||||
@ -57,7 +51,7 @@ func (f *FlowContext) IsOrphan(blockHash *externalapi.DomainHash) bool {
|
||||
}
|
||||
|
||||
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
|
||||
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*UnorphaningResult, error) {
|
||||
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*externalapi.DomainBlock, error) {
|
||||
f.orphansMutex.Lock()
|
||||
defer f.orphansMutex.Unlock()
|
||||
|
||||
@ -66,7 +60,7 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
||||
rootBlockHash := consensushashing.BlockHash(rootBlock)
|
||||
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
|
||||
|
||||
var unorphaningResults []*UnorphaningResult
|
||||
var unorphanedBlocks []*externalapi.DomainBlock
|
||||
for len(processQueue) > 0 {
|
||||
var orphanHash externalapi.DomainHash
|
||||
orphanHash, processQueue = processQueue[0], processQueue[1:]
|
||||
@ -90,21 +84,18 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
||||
}
|
||||
}
|
||||
if canBeUnorphaned {
|
||||
virtualChangeSet, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||
unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if unorphaningSucceeded {
|
||||
unorphaningResults = append(unorphaningResults, &UnorphaningResult{
|
||||
block: orphanBlock,
|
||||
virtualChangeSet: virtualChangeSet,
|
||||
})
|
||||
unorphanedBlocks = append(unorphanedBlocks, orphanBlock)
|
||||
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return unorphaningResults, nil
|
||||
return unorphanedBlocks, nil
|
||||
}
|
||||
|
||||
// addChildOrphansToProcessQueue finds all child orphans of `blockHash`
|
||||
@ -143,24 +134,24 @@ func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash)
|
||||
return childOrphans
|
||||
}
|
||||
|
||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.VirtualChangeSet, bool, error) {
|
||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (bool, error) {
|
||||
orphanBlock, ok := f.orphans[orphanHash]
|
||||
if !ok {
|
||||
return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
||||
return false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
||||
}
|
||||
delete(f.orphans, orphanHash)
|
||||
|
||||
virtualChangeSet, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
||||
err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
|
||||
return nil, false, nil
|
||||
return false, nil
|
||||
}
|
||||
return nil, false, err
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Infof("Unorphaned block %s", orphanHash)
|
||||
return virtualChangeSet, true, nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
|
||||
|
@ -1,47 +0,0 @@
|
||||
package flowcontext
|
||||
|
||||
import "github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
const (
|
||||
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
|
||||
)
|
||||
|
||||
// ShouldMine returns whether it's ok to use block template from this node
|
||||
// for mining purposes.
|
||||
func (f *FlowContext) ShouldMine() (bool, error) {
|
||||
peers := f.Peers()
|
||||
if len(peers) == 0 {
|
||||
log.Debugf("The node is not connected, so ShouldMine returns false")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if f.IsIBDRunning() {
|
||||
log.Debugf("IBD is running, so ShouldMine returns false")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
virtualSelectedParent, err := f.domain.Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if virtualSelectedParent.Equal(f.Config().NetParams().GenesisHash) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
now := mstime.Now().UnixMilliseconds()
|
||||
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
|
||||
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
|
||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
|
||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
||||
return false, nil
|
||||
}
|
@ -18,7 +18,7 @@ var (
|
||||
|
||||
// minAcceptableProtocolVersion is the lowest protocol version that a
|
||||
// connected peer may support.
|
||||
minAcceptableProtocolVersion = uint32(4)
|
||||
minAcceptableProtocolVersion = uint32(5)
|
||||
|
||||
maxAcceptableProtocolVersion = uint32(5)
|
||||
)
|
||||
|
@ -1,39 +0,0 @@
|
||||
package addressexchange
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// ReceiveAddressesContext is the interface for the context needed for the ReceiveAddresses flow.
|
||||
type ReceiveAddressesContext interface {
|
||||
AddressManager() *addressmanager.AddressManager
|
||||
}
|
||||
|
||||
// ReceiveAddresses asks a peer for more addresses if needed.
|
||||
func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||
peer *peerpkg.Peer) error {
|
||||
|
||||
subnetworkID := peer.SubnetworkID()
|
||||
msgGetAddresses := appmessage.NewMsgRequestAddresses(false, subnetworkID)
|
||||
err := outgoingRoute.Enqueue(msgGetAddresses)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
message, err := incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgAddresses := message.(*appmessage.MsgAddresses)
|
||||
if len(msgAddresses.AddressList) > addressmanager.GetAddressesMax {
|
||||
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
|
||||
}
|
||||
|
||||
return context.AddressManager().AddAddresses(msgAddresses.AddressList...)
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
package addressexchange
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// SendAddressesContext is the interface for the context needed for the SendAddresses flow.
|
||||
type SendAddressesContext interface {
|
||||
AddressManager() *addressmanager.AddressManager
|
||||
}
|
||||
|
||||
// SendAddresses sends addresses to a peer that requests it.
|
||||
func SendAddresses(context SendAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
|
||||
for {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addresses := context.AddressManager().Addresses()
|
||||
msgAddresses := appmessage.NewMsgAddresses(shuffleAddresses(addresses))
|
||||
|
||||
err = outgoingRoute.Enqueue(msgAddresses)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shuffleAddresses randomizes the given addresses sent if there are more than the maximum allowed in one message.
|
||||
func shuffleAddresses(addresses []*appmessage.NetAddress) []*appmessage.NetAddress {
|
||||
addressCount := len(addresses)
|
||||
|
||||
if addressCount < appmessage.MaxAddressesPerMsg {
|
||||
return addresses
|
||||
}
|
||||
|
||||
shuffleAddresses := make([]*appmessage.NetAddress, addressCount)
|
||||
copy(shuffleAddresses, addresses)
|
||||
|
||||
rand.Shuffle(addressCount, func(i, j int) {
|
||||
shuffleAddresses[i], shuffleAddresses[j] = shuffleAddresses[j], shuffleAddresses[i]
|
||||
})
|
||||
|
||||
// Truncate it to the maximum size.
|
||||
shuffleAddresses = shuffleAddresses[:appmessage.MaxAddressesPerMsg]
|
||||
return shuffleAddresses
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
|
||||
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
|
||||
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
|
||||
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
|
||||
if ibdBatchSize > router.DefaultMaxMessages {
|
||||
t.Fatalf("IBD batch size (%d) must be smaller than or equal to router.DefaultMaxMessages (%d)",
|
||||
ibdBatchSize, router.DefaultMaxMessages)
|
||||
}
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.DomainHash, limit uint32) error {
|
||||
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, limit)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
case *appmessage.MsgBlockLocator:
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
||||
}
|
||||
}
|
||||
}
|
@ -1,86 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleIBDBlockLocatorContext is the interface for the context needed for the HandleIBDBlockLocator flow.
|
||||
type HandleIBDBlockLocatorContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandleIBDBlockLocator listens to appmessage.MsgIBDBlockLocator messages and sends
|
||||
// the highest known block that's in the selected parent chain of `targetHash` to the
|
||||
// requesting peer.
|
||||
func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
for {
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ibdBlockLocatorMessage := message.(*appmessage.MsgIBDBlockLocator)
|
||||
|
||||
targetHash := ibdBlockLocatorMessage.TargetHash
|
||||
log.Debugf("Received IBDBlockLocator from %s with targetHash %s", peer, targetHash)
|
||||
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(targetHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
|
||||
"with an unknown targetHash %s", targetHash)
|
||||
}
|
||||
|
||||
foundHighestHashInTheSelectedParentChainOfTargetHash := false
|
||||
for _, blockLocatorHash := range ibdBlockLocatorMessage.BlockLocatorHashes {
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(blockLocatorHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The IBD block locator is checking only existing blocks with bodies.
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
continue
|
||||
}
|
||||
|
||||
isBlockLocatorHashInSelectedParentChainOfHighHash, err :=
|
||||
context.Domain().Consensus().IsInSelectedParentChainOf(blockLocatorHash, targetHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isBlockLocatorHashInSelectedParentChainOfHighHash {
|
||||
continue
|
||||
}
|
||||
|
||||
foundHighestHashInTheSelectedParentChainOfTargetHash = true
|
||||
log.Debugf("Found a known hash %s amongst peer %s's "+
|
||||
"blockLocator that's in the selected parent chain of targetHash %s", blockLocatorHash, peer, targetHash)
|
||||
|
||||
ibdBlockLocatorHighestHashMessage := appmessage.NewMsgIBDBlockLocatorHighestHash(blockLocatorHash)
|
||||
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if !foundHighestHashInTheSelectedParentChainOfTargetHash {
|
||||
log.Warnf("no hash was found in the blockLocator "+
|
||||
"that was in the selected parent chain of targetHash %s", targetHash)
|
||||
|
||||
ibdBlockLocatorHighestHashNotFoundMessage := appmessage.NewMsgIBDBlockLocatorHighestHashNotFound()
|
||||
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashNotFoundMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleIBDBlockRequestsContext is the interface for the context needed for the HandleIBDBlockRequests flow.
|
||||
type HandleIBDBlockRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandleIBDBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
|
||||
// their corresponding blocks to the requesting peer.
|
||||
func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
for {
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgRequestIBDBlocks := message.(*appmessage.MsgRequestIBDBlocks)
|
||||
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
|
||||
for i, hash := range msgRequestIBDBlocks.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
||||
ibdBlockMessage := appmessage.NewMsgIBDBlock(blockMessage)
|
||||
err = outgoingRoute.Enqueue(ibdBlockMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("sent %d out of %d", i+1, len(msgRequestIBDBlocks.Hashes))
|
||||
}
|
||||
}
|
||||
}
|
@ -1,145 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
|
||||
type PruningPointAndItsAnticoneRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
var isBusy uint32
|
||||
|
||||
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
|
||||
// the pruning point and its anticone to the requesting peer.
|
||||
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
err := func() error {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
|
||||
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
|
||||
}
|
||||
defer atomic.StoreUint32(&isBusy, 0)
|
||||
|
||||
log.Debugf("Got request for pruning point and its anticone from %s", peer)
|
||||
|
||||
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
|
||||
for i, header := range pruningPointHeaders {
|
||||
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
windowSize := context.Config().NetParams().DifficultyAdjustmentWindowSize
|
||||
daaWindowBlocks := make([]*externalapi.TrustedDataDataDAAHeader, 0, windowSize)
|
||||
daaWindowHashesToIndex := make(map[externalapi.DomainHash]int, windowSize)
|
||||
trustedDataDAABlockIndexes := make(map[externalapi.DomainHash][]uint64)
|
||||
|
||||
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, 0)
|
||||
ghostdagDataHashToIndex := make(map[externalapi.DomainHash]int)
|
||||
trustedDataGHOSTDAGDataIndexes := make(map[externalapi.DomainHash][]uint64)
|
||||
for _, blockHash := range pointAndItsAnticone {
|
||||
blockDAAWindowHashes, err := context.Domain().Consensus().BlockDAAWindowHashes(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trustedDataDAABlockIndexes[*blockHash] = make([]uint64, 0, windowSize)
|
||||
for i, daaBlockHash := range blockDAAWindowHashes {
|
||||
index, exists := daaWindowHashesToIndex[*daaBlockHash]
|
||||
if !exists {
|
||||
trustedDataDataDAAHeader, err := context.Domain().Consensus().TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daaWindowBlocks = append(daaWindowBlocks, trustedDataDataDAAHeader)
|
||||
index = len(daaWindowBlocks) - 1
|
||||
daaWindowHashesToIndex[*daaBlockHash] = index
|
||||
}
|
||||
|
||||
trustedDataDAABlockIndexes[*blockHash] = append(trustedDataDAABlockIndexes[*blockHash], uint64(index))
|
||||
}
|
||||
|
||||
ghostdagDataBlockHashes, err := context.Domain().Consensus().TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trustedDataGHOSTDAGDataIndexes[*blockHash] = make([]uint64, 0, context.Config().NetParams().K)
|
||||
for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes {
|
||||
index, exists := ghostdagDataHashToIndex[*ghostdagDataBlockHash]
|
||||
if !exists {
|
||||
data, err := context.Domain().Consensus().TrustedGHOSTDAGData(ghostdagDataBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ghostdagData = append(ghostdagData, &externalapi.BlockGHOSTDAGDataHashPair{
|
||||
Hash: ghostdagDataBlockHash,
|
||||
GHOSTDAGData: data,
|
||||
})
|
||||
index = len(ghostdagData) - 1
|
||||
ghostdagDataHashToIndex[*ghostdagDataBlockHash] = index
|
||||
}
|
||||
|
||||
trustedDataGHOSTDAGDataIndexes[*blockHash] = append(trustedDataGHOSTDAGDataIndexes[*blockHash], uint64(index))
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainTrustedDataToTrustedData(daaWindowBlocks, ghostdagData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, blockHash := range pointAndItsAnticone {
|
||||
block, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sent pruning point and its anticone to %s", peer)
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// PruningPointProofRequestsContext is the interface for the context needed for the HandlePruningPointProofRequests flow.
|
||||
type PruningPointProofRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandlePruningPointProofRequests listens to appmessage.MsgRequestPruningPointProof messages and sends
|
||||
// the pruning point proof to the requesting peer.
|
||||
func HandlePruningPointProofRequests(context PruningPointProofRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Got request for pruning point proof from %s", peer)
|
||||
|
||||
pruningPointProof, err := context.Domain().Consensus().BuildPruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pruningPointProofMessage := appmessage.DomainPruningPointProofToMsgPruningPointProof(pruningPointProof)
|
||||
err = outgoingRoute.Enqueue(pruningPointProofMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sent pruning point proof to %s", peer)
|
||||
}
|
||||
}
|
@ -1,53 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RelayBlockRequestsContext is the interface for the context needed for the HandleRelayBlockRequests flow.
|
||||
type RelayBlockRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandleRelayBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
|
||||
// their corresponding blocks to the requesting peer.
|
||||
func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
getRelayBlocksMessage := message.(*appmessage.MsgRequestRelayBlocks)
|
||||
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
|
||||
for _, hash := range getRelayBlocksMessage.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Relayed block with hash %s", hash)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,416 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// orphanResolutionRange is the maximum amount of blockLocator hashes
|
||||
// to search for known blocks. See isBlockInOrphanResolutionRange for
|
||||
// further details
|
||||
var orphanResolutionRange uint32 = 5
|
||||
|
||||
// RelayInvsContext is the interface for the context needed for the HandleRelayInvs flow.
|
||||
type RelayInvsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
||||
Broadcast(message appmessage.Message) error
|
||||
AddOrphan(orphanBlock *externalapi.DomainBlock)
|
||||
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
|
||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||
IsIBDRunning() bool
|
||||
IsRecoverableError(err error) bool
|
||||
}
|
||||
|
||||
type handleRelayInvsFlow struct {
|
||||
RelayInvsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
invsQueue []*appmessage.MsgInvRelayBlock
|
||||
}
|
||||
|
||||
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
||||
// are missing, adds them to the DAG and propagates them to the rest of the network.
|
||||
func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||
peer *peerpkg.Peer) error {
|
||||
|
||||
flow := &handleRelayInvsFlow{
|
||||
RelayInvsContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
||||
}
|
||||
err := flow.start()
|
||||
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
||||
close(peer.IBDRequestChannel())
|
||||
return err
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) start() error {
|
||||
for {
|
||||
log.Debugf("Waiting for inv")
|
||||
inv, err := flow.readInv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Got relay inv for block %s", inv.Hash)
|
||||
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(inv.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
||||
if blockInfo.BlockStatus == externalapi.StatusInvalid {
|
||||
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
|
||||
inv.Hash)
|
||||
}
|
||||
log.Debugf("Block %s already exists. continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsOrphan(inv.Hash) {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
|
||||
err := flow.AddOrphanRootsToQueue(inv.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Block relay is disabled during IBD
|
||||
if flow.IsIBDRunning() {
|
||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Requesting block %s", inv.Hash)
|
||||
block, exists, err := flow.requestBlock(inv.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
log.Debugf("Aborting requesting block %s because it already exists", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) {
|
||||
log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block))
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Infof("Ignoring duplicate block %s", inv.Hash)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(missingParents) > 0 {
|
||||
log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents)
|
||||
err := flow.processOrphan(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
oldVirtualParents := hashset.New()
|
||||
for _, parent := range oldVirtualInfo.ParentHashes {
|
||||
oldVirtualParents.Add(parent)
|
||||
}
|
||||
|
||||
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, parent := range newVirtualInfo.ParentHashes {
|
||||
if oldVirtualParents.Contains(parent) {
|
||||
continue
|
||||
}
|
||||
|
||||
block, err := flow.Domain().Consensus().GetBlock(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("Relaying block %s", blockHash)
|
||||
err = flow.relayBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||
consensushashing.BlockHash(block))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
"expecting an inv message", msg.Command())
|
||||
}
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||
exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash)
|
||||
if exists {
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
|
||||
// clean from any pending blocks.
|
||||
defer flow.SharedRequestedBlocks().Remove(requestHash)
|
||||
|
||||
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
|
||||
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
msgBlock, err := flow.readMsgBlock()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if !blockHash.Equal(requestHash) {
|
||||
return nil, false, protocolerrors.Errorf(true, "got unrequested block %s", blockHash)
|
||||
}
|
||||
|
||||
return block, false, nil
|
||||
}
|
||||
|
||||
// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive.
|
||||
//
|
||||
// Note: this function assumes msgChan can contain only appmessage.MsgInvRelayBlock and appmessage.MsgBlock messages.
|
||||
func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock, err error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
case *appmessage.MsgBlock:
|
||||
return message, nil
|
||||
default:
|
||||
return nil, errors.Errorf("unexpected message %s", message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
}
|
||||
|
||||
missingParentsError := &ruleerrors.ErrMissingParents{}
|
||||
if errors.As(err, missingParentsError) {
|
||||
return missingParentsError.MissingParentHashes, nil, nil
|
||||
}
|
||||
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
|
||||
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
}
|
||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
return nil, virtualChangeSet, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) error {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
|
||||
// Return if the block has been orphaned from elsewhere already
|
||||
if flow.IsOrphan(blockHash) {
|
||||
log.Debugf("Skipping orphan processing for block %s because it is already an orphan", blockHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the block to the orphan set if it's within orphan resolution range
|
||||
isBlockInOrphanResolutionRange, err := flow.isBlockInOrphanResolutionRange(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBlockInOrphanResolutionRange {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", blockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Block %s is within orphan resolution range. "+
|
||||
"Adding it to the orphan set", blockHash)
|
||||
flow.AddOrphan(block)
|
||||
log.Debugf("Requesting block %s missing ancestors", blockHash)
|
||||
return flow.AddOrphanRootsToQueue(blockHash)
|
||||
}
|
||||
|
||||
// Start IBD unless we already are in IBD
|
||||
log.Debugf("Block %s is out of orphan resolution range. "+
|
||||
"Attempting to start IBD against it.", blockHash)
|
||||
|
||||
// Send the block to IBD flow via the IBDRequestChannel.
|
||||
// Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it
|
||||
select {
|
||||
case flow.peer.IBDRequestChannel() <- block:
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool {
|
||||
parents := block.Header.DirectParents()
|
||||
return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash)
|
||||
}
|
||||
|
||||
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
|
||||
// retrieved via the unorphaning mechanism or via IBD. This method sends a
|
||||
// getBlockLocator request to the peer with a limit of orphanResolutionRange.
|
||||
// In the response, if we know none of the hashes, we should retrieve the given
|
||||
// blockHash via IBD. Otherwise, via unorphaning.
|
||||
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
err := flow.sendGetBlockLocator(blockHash, orphanResolutionRange)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
blockLocatorHashes, err := flow.receiveBlockLocator()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, blockLocatorHash := range blockLocatorHashes {
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockLocatorHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.DomainHash) error {
|
||||
orphanRoots, orphanExists, err := flow.GetOrphanRoots(orphan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !orphanExists {
|
||||
log.Infof("Orphan block %s was missing from the orphan pool while requesting for its roots. This "+
|
||||
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
||||
}
|
||||
|
||||
if len(orphanRoots) == 0 {
|
||||
// In some rare cases we get here when there are no orphan roots already
|
||||
return nil
|
||||
}
|
||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
||||
|
||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
||||
for i, root := range orphanRoots {
|
||||
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
||||
invMessages[i] = appmessage.NewMsgInvBlock(root)
|
||||
}
|
||||
|
||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
||||
return nil
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// RequestBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
|
||||
type RequestBlockLocatorContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestBlockLocatorFlow struct {
|
||||
RequestBlockLocatorContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestBlockLocator handles getBlockLocator messages
|
||||
func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestBlockLocatorFlow{
|
||||
RequestBlockLocatorContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) start() error {
|
||||
for {
|
||||
highHash, limit, err := flow.receiveGetBlockLocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received getBlockLocator with highHash: %s, limit: %d", highHash, limit)
|
||||
|
||||
locator, err := flow.Domain().Consensus().CreateBlockLocatorFromPruningPoint(highHash, limit)
|
||||
if err != nil || len(locator) == 0 {
|
||||
if err != nil {
|
||||
log.Debugf("Received error from CreateBlockLocatorFromPruningPoint: %s", err)
|
||||
}
|
||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||
"locator between the pruning point and %s", highHash)
|
||||
}
|
||||
|
||||
err = flow.sendBlockLocator(locator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (highHash *externalapi.DomainHash, limit uint32, err error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
|
||||
|
||||
return msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {
|
||||
msgBlockLocator := appmessage.NewMsgBlockLocator(locator)
|
||||
err := flow.outgoingRoute.Enqueue(msgBlockLocator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,107 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
|
||||
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
|
||||
const ibdBatchSize = 100
|
||||
|
||||
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestHeadersContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestHeadersFlow struct {
|
||||
RequestHeadersContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peer.Peer
|
||||
}
|
||||
|
||||
// HandleRequestHeaders handles RequestHeaders messages
|
||||
func HandleRequestHeaders(context RequestHeadersContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
flow := &handleRequestHeadersFlow{
|
||||
RequestHeadersContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestHeadersFlow) start() error {
|
||||
for {
|
||||
lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
|
||||
for !lowHash.Equal(highHash) {
|
||||
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
||||
|
||||
// GetHashesBetween is a relatively heavy operation so we limit it
|
||||
// in order to avoid locking the consensus for too long
|
||||
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
||||
const maxBlocks = 1 << 10
|
||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Got %d header hashes above lowHash %s", len(blockHashes), lowHash)
|
||||
|
||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
|
||||
}
|
||||
|
||||
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
|
||||
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := message.(*appmessage.MsgRequestNextHeaders); !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextHeaders, message.Command())
|
||||
}
|
||||
|
||||
// The next lowHash is the last element in blockHashes
|
||||
lowHash = blockHashes[len(blockHashes)-1]
|
||||
}
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func receiveRequestHeaders(incomingRoute *router.Route) (lowHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgRequestIBDBlocks := message.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
return msgRequestIBDBlocks.LowHash, msgRequestIBDBlocks.HighHash, nil
|
||||
}
|
@ -1,140 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleRequestPruningPointUTXOSetContext is the interface for the context needed for the HandleRequestPruningPointUTXOSet flow.
|
||||
type HandleRequestPruningPointUTXOSetContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestPruningPointUTXOSetFlow struct {
|
||||
HandleRequestPruningPointUTXOSetContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestPruningPointUTXOSet listens to appmessage.MsgRequestPruningPointUTXOSet messages and sends
|
||||
// the pruning point UTXO set and block body.
|
||||
func HandleRequestPruningPointUTXOSet(context HandleRequestPruningPointUTXOSetContext, incomingRoute,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestPruningPointUTXOSetFlow{
|
||||
HandleRequestPruningPointUTXOSetContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) start() error {
|
||||
for {
|
||||
msgRequestPruningPointUTXOSet, err := flow.waitForRequestPruningPointUTXOSetMessages()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.handleRequestPruningPointUTXOSetMessage(msgRequestPruningPointUTXOSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) handleRequestPruningPointUTXOSetMessage(
|
||||
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetFlow")
|
||||
defer onEnd()
|
||||
|
||||
log.Debugf("Got request for pruning point UTXO set")
|
||||
|
||||
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSet)
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXOSetMessages() (
|
||||
*appmessage.MsgRequestPruningPointUTXOSet, error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
|
||||
if !ok {
|
||||
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
|
||||
}
|
||||
return msgRequestPruningPointUTXOSet, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
|
||||
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
||||
|
||||
// Send the UTXO set in `step`-sized chunks
|
||||
const step = 1000
|
||||
var fromOutpoint *externalapi.DomainOutpoint
|
||||
chunksSent := 0
|
||||
for {
|
||||
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash, fromOutpoint, step)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Retrieved %d UTXOs for pruning block %s",
|
||||
len(pruningPointUTXOs), msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
outpointAndUTXOEntryPairs :=
|
||||
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
finished := len(pruningPointUTXOs) < step
|
||||
if finished && chunksSent%ibdBatchSize != 0 {
|
||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
}
|
||||
|
||||
if len(pruningPointUTXOs) > 0 {
|
||||
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
|
||||
}
|
||||
chunksSent++
|
||||
|
||||
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
|
||||
if chunksSent%ibdBatchSize == 0 {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
|
||||
if !ok {
|
||||
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||
return protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
|
||||
}
|
||||
|
||||
if finished {
|
||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,645 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
"github.com/pkg/errors"
|
||||
"math/big"
|
||||
"time"
|
||||
)
|
||||
|
||||
// IBDContext is the interface for the context needed for the HandleIBD flow.
|
||||
type IBDContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||
UnsetIBDRunning()
|
||||
IsRecoverableError(err error) bool
|
||||
}
|
||||
|
||||
type handleIBDFlow struct {
|
||||
IBDContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
}
|
||||
|
||||
// HandleIBD handles IBD
|
||||
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||
peer *peerpkg.Peer) error {
|
||||
|
||||
flow := &handleIBDFlow{
|
||||
IBDContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) start() error {
|
||||
for {
|
||||
// Wait for IBD requests triggered by other flows
|
||||
block, ok := <-flow.peer.IBDRequestChannel()
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
err := flow.runIBDIfNotRunning(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
||||
highHash := consensushashing.BlockHash(block)
|
||||
|
||||
// Temp code to avoid IBD from lagging nodes publishing their side-chain. This patch
|
||||
// is applied only to p2p v4 since the implemented IBD negotiation has quadratic complexity in this worst-case.
|
||||
// See IBD logic of p2p v5 for further details.
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err == nil {
|
||||
virtualSelectedParentHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualSelectedParent)
|
||||
if err == nil {
|
||||
// We first check that DAA score of the relay block is at distance of more than DAA window size.
|
||||
// This indicates a side-chain which is not in the future of any block in the current virtual DAA window.
|
||||
if virtualSelectedParentHeader.DAAScore() > block.Header.DAAScore()+2641 {
|
||||
// We then find the 'unit' of current virtual difficulty. We check if the relay block is at least
|
||||
// at distance of 180 such units. This signals another condition for a pow-weak side-chain.
|
||||
virtualDifficulty := difficulty.CalcWork(virtualSelectedParentHeader.Bits())
|
||||
var virtualSub, difficultyMul big.Int
|
||||
if difficultyMul.Mul(virtualDifficulty, big.NewInt(180)).
|
||||
Cmp(virtualSub.Sub(virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())) < 0 {
|
||||
log.Criticalf("Avoiding IBD triggered by relay %s with %d DAA score diff and lower blue work (%d, %d)",
|
||||
highHash,
|
||||
virtualSelectedParentHeader.DAAScore()-block.Header.DAAScore(),
|
||||
virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
||||
if !wasIBDNotRunning {
|
||||
log.Debugf("IBD is already running")
|
||||
return nil
|
||||
}
|
||||
|
||||
isFinishedSuccessfully := false
|
||||
defer func() {
|
||||
flow.UnsetIBDRunning()
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
}()
|
||||
|
||||
log.Infof("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
log.Infof("Syncing blocks up to %s", highHash)
|
||||
log.Infof("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !shouldSync {
|
||||
return nil
|
||||
}
|
||||
|
||||
if shouldDownloadHeadersProof {
|
||||
log.Infof("Starting IBD with headers proof")
|
||||
err := flow.ibdWithHeadersProof(highHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", highHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncMissingBlockBodies(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Finished syncing blocks up to %s", highHash)
|
||||
isFinishedSuccessfully = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
||||
successString := "successfully"
|
||||
if !isFinishedSuccessfully {
|
||||
successString = "(interrupted)"
|
||||
}
|
||||
log.Infof("IBD finished %s", successString)
|
||||
}
|
||||
|
||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) findHighestSharedBlockHash(
|
||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
for {
|
||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !highestHashFound {
|
||||
return nil, false, nil
|
||||
}
|
||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if highestHashIndex == 0 ||
|
||||
// If the block locator contains only two adjacent chain blocks, the
|
||||
// syncer will always find the same highest chain block, so to avoid
|
||||
// an endless loop, we explicitly stop the loop in such situation.
|
||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
||||
|
||||
return highestHash, true, nil
|
||||
}
|
||||
|
||||
locatorHashAboveHighestHash := highestHash
|
||||
if highestHashIndex > 0 {
|
||||
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
|
||||
}
|
||||
|
||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
if err != nil {
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
|
||||
"restarting with full block locator")
|
||||
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return blockLocator, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) findHighestHashIndex(
|
||||
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
|
||||
|
||||
highestHashIndex := 0
|
||||
highestHashIndexFound := false
|
||||
for i, blockLocatorHash := range blockLocator {
|
||||
if highestHash.Equal(blockLocatorHash) {
|
||||
highestHashIndex = i
|
||||
highestHashIndexFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !highestHashIndexFound {
|
||||
return 0, protocolerrors.Errorf(true, "highest hash %s "+
|
||||
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
|
||||
}
|
||||
log.Debugf("The index of the highest hash in the original "+
|
||||
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
|
||||
|
||||
return highestHashIndex, nil
|
||||
}
|
||||
|
||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) fetchHighestHash(
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
||||
highestHash := message.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, true, nil
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
||||
return nil, false, nil
|
||||
default:
|
||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
|
||||
log.Infof("Downloading headers from %s", flow.peer)
|
||||
|
||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestSharedBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScore, "block headers")
|
||||
|
||||
// Keep a short queue of BlockHeadersMessages so that there's
|
||||
// never a moment when the node is not validating and inserting
|
||||
// headers
|
||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
||||
errChan := make(chan error)
|
||||
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
|
||||
for {
|
||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if doneIBD {
|
||||
close(blockHeadersMessageChan)
|
||||
return
|
||||
}
|
||||
|
||||
blockHeadersMessageChan <- blockHeadersMessage
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
// If the highHash has not been received, the peer is misbehaving
|
||||
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !highHashBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"highHash block %s from peer %s during block download", highHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1]
|
||||
progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore)
|
||||
case err := <-errChan:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.BlockHeadersMessage:
|
||||
return message, false, nil
|
||||
case *appmessage.MsgDoneHeaders:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s",
|
||||
appmessage.CmdBlockHeaders,
|
||||
appmessage.CmdDoneHeaders,
|
||||
message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||
block := &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: nil,
|
||||
}
|
||||
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
||||
} else {
|
||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
|
||||
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
|
||||
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
|
||||
"tip is smaller than the current selected tip")
|
||||
}
|
||||
|
||||
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
|
||||
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
|
||||
return protocolerrors.Errorf(false, "difference between the timestamps of "+
|
||||
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
||||
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
||||
defer onEnd()
|
||||
|
||||
receivedChunkCount := 0
|
||||
receivedUTXOCount := 0
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgPruningPointUTXOSetChunk:
|
||||
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
||||
domainOutpointAndUTXOEntryPairs :=
|
||||
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
||||
|
||||
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
receivedChunkCount++
|
||||
if receivedChunkCount%ibdBatchSize == 0 {
|
||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
receivedChunkCount, receivedUTXOCount)
|
||||
|
||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
||||
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
||||
return true, nil
|
||||
|
||||
case *appmessage.MsgUnexpectedPruningPoint:
|
||||
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
|
||||
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
||||
return false, nil
|
||||
|
||||
default:
|
||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(hashes) == 0 {
|
||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
||||
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
||||
log.Debugf("No missing block body hashes found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
|
||||
highestProcessedDAAScore := lowBlockHeader.DAAScore()
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
||||
} else {
|
||||
hashesToRequest = hashes[offset:]
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, expectedHash := range hashesToRequest {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if !expectedHash.Equal(blockHash) {
|
||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||
continue
|
||||
}
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestProcessedDAAScore = block.Header.DAAScore()
|
||||
}
|
||||
|
||||
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
return flow.resolveVirtual(highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||
consensushashing.BlockHash(block))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
|
||||
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if i%10 == 0 {
|
||||
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var percents int
|
||||
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||
percents = 100
|
||||
} else {
|
||||
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||
}
|
||||
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||
}
|
||||
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isCompletelyResolved {
|
||||
log.Infof("Resolved virtual")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
type ibdProgressReporter struct {
|
||||
lowDAAScore uint64
|
||||
highDAAScore uint64
|
||||
objectName string
|
||||
totalDAAScoreDifference uint64
|
||||
lastReportedProgressPercent int
|
||||
processed int
|
||||
}
|
||||
|
||||
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
|
||||
if highDAAScore <= lowDAAScore {
|
||||
// Avoid a zero or negative diff
|
||||
highDAAScore = lowDAAScore + 1
|
||||
}
|
||||
return &ibdProgressReporter{
|
||||
lowDAAScore: lowDAAScore,
|
||||
highDAAScore: highDAAScore,
|
||||
objectName: objectName,
|
||||
totalDAAScoreDifference: highDAAScore - lowDAAScore,
|
||||
lastReportedProgressPercent: 0,
|
||||
processed: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
|
||||
ipr.processed += processedDelta
|
||||
|
||||
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
|
||||
if highestProcessedDAAScore > ipr.highDAAScore {
|
||||
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
|
||||
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
relativeDAAScore := uint64(0)
|
||||
if highestProcessedDAAScore > ipr.lowDAAScore {
|
||||
// Avoid a negative diff
|
||||
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
|
||||
if progressPercent > ipr.lastReportedProgressPercent {
|
||||
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
|
||||
ipr.lastReportedProgressPercent = progressPercent
|
||||
}
|
||||
}
|
@ -1,394 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
err := flow.Domain().InitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(highHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
if !flow.IsRecoverableError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus.", flow.peer)
|
||||
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
||||
if deleteStagingConsensusErr != nil {
|
||||
return deleteStagingConsensusErr
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
|
||||
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
|
||||
err = flow.Domain().CommitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnPruningPointUTXOSetOverride()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *externalapi.DomainBlock,
|
||||
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
|
||||
|
||||
if !highestSharedBlockFound {
|
||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
if hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore {
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock *externalapi.DomainBlock) (bool, error) {
|
||||
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if highBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return highBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
||||
log.Infof("Downloading the pruning point proof from %s", flow.peer)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointProof())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(10 * time.Minute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pruningPointProofMessage, ok := message.(*appmessage.MsgPruningPointProof)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdPruningPointProof, message.Command())
|
||||
}
|
||||
pruningPointProof := appmessage.MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage)
|
||||
err = flow.Domain().Consensus().ValidatePruningPointProof(pruningPointProof)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, protocolerrors.Wrapf(true, err, "pruning point proof validation failed")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = flow.Domain().StagingConsensus().ApplyPruningPointProof(pruningPointProof)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointsAndPruningPointAnticone(proofPruningPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Remove this condition once there's more proper way to check finality violation
|
||||
// in the headers proof.
|
||||
if proofPruningPoint.Equal(flow.Config().NetParams().GenesisHash) {
|
||||
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Headers downloaded from peer %s", flow.peer)
|
||||
|
||||
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !highHashInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
|
||||
}
|
||||
|
||||
err = flow.validatePruningPointFutureHeaderTimestamps()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Syncing the current pruning point UTXO set")
|
||||
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet(flow.Domain().StagingConsensus(), proofPruningPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !syncedPruningPointUTXOSetSuccessfully {
|
||||
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Finished syncing the current pruning point UTXO set")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error {
|
||||
log.Infof("Downloading the past pruning points and the pruning point anticone from %s", flow.peer)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.validateAndInsertPruningPoints(proofPruningPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgTrustedData, ok := message.(*appmessage.MsgTrustedData)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdTrustedData, message.Command())
|
||||
}
|
||||
|
||||
pruningPointWithMetaData, done, err := flow.receiveBlockWithTrustedData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
return protocolerrors.Errorf(true, "got `done` message before receiving the pruning point")
|
||||
}
|
||||
|
||||
if !pruningPointWithMetaData.Block.Header.BlockHash().Equal(proofPruningPoint) {
|
||||
return protocolerrors.Errorf(true, "first block with trusted data is not the pruning point")
|
||||
}
|
||||
|
||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData, msgTrustedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
break
|
||||
}
|
||||
|
||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData, msgTrustedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processBlockWithTrustedData(
|
||||
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedDataV4, data *appmessage.MsgTrustedData) error {
|
||||
|
||||
blockWithTrustedData := &externalapi.BlockWithTrustedData{
|
||||
Block: appmessage.MsgBlockToDomainBlock(block.Block),
|
||||
DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(block.DAAWindowIndices)),
|
||||
GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(block.GHOSTDAGDataIndices)),
|
||||
}
|
||||
|
||||
for _, index := range block.DAAWindowIndices {
|
||||
blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, appmessage.TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(data.DAAWindow[index]))
|
||||
}
|
||||
|
||||
for _, index := range block.GHOSTDAGDataIndices {
|
||||
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
|
||||
}
|
||||
|
||||
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||
return err
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
switch downCastedMessage := message.(type) {
|
||||
case *appmessage.MsgBlockWithTrustedDataV4:
|
||||
return downCastedMessage, false, nil
|
||||
case *appmessage.MsgDoneBlocksWithTrustedData:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s",
|
||||
(&appmessage.MsgBlockWithTrustedData{}).Command(),
|
||||
(&appmessage.MsgDoneBlocksWithTrustedData{}).Command(),
|
||||
downCastedMessage.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgPruningPoints, ok := message.(*appmessage.MsgPruningPoints)
|
||||
if !ok {
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdPruningPoints, message.Command())
|
||||
}
|
||||
|
||||
return msgPruningPoints, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error {
|
||||
currentPruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if currentPruningPoint.Equal(proofPruningPoint) {
|
||||
return protocolerrors.Errorf(true, "the proposed pruning point is the same as the current pruning point")
|
||||
}
|
||||
|
||||
pruningPoints, err := flow.receivePruningPoints()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := make([]externalapi.BlockHeader, len(pruningPoints.Headers))
|
||||
for i, header := range pruningPoints.Headers {
|
||||
headers[i] = appmessage.BlockHeaderToDomainBlockHeader(header)
|
||||
}
|
||||
|
||||
arePruningPointsViolatingFinality, err := flow.Domain().Consensus().ArePruningPointsViolatingFinality(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if arePruningPointsViolatingFinality {
|
||||
// TODO: Find a better way to deal with finality conflicts.
|
||||
return protocolerrors.Errorf(false, "pruning points are violating finality")
|
||||
}
|
||||
|
||||
lastPruningPoint := consensushashing.HeaderHash(headers[len(headers)-1])
|
||||
if !lastPruningPoint.Equal(proofPruningPoint) {
|
||||
return protocolerrors.Errorf(true, "the proof pruning point is not equal to the last pruning "+
|
||||
"point in the list")
|
||||
}
|
||||
|
||||
err = flow.Domain().StagingConsensus().ImportPruningPoints(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus,
|
||||
pruningPoint *externalapi.DomainHash) (bool, error) {
|
||||
|
||||
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint)
|
||||
isValid, err := flow.Domain().StagingConsensus().IsValidPruningPoint(pruningPoint)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !isValid {
|
||||
return false, protocolerrors.Errorf(true, "invalid pruning point %s", pruningPoint)
|
||||
}
|
||||
|
||||
log.Info("Fetching the pruning point UTXO set")
|
||||
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
||||
if err != nil {
|
||||
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !isSuccessful {
|
||||
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Info("Fetched the new pruning point UTXO set")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
|
||||
defer func() {
|
||||
err := flow.Domain().StagingConsensus().ClearImportedPruningPointData()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
|
||||
}
|
||||
}()
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSet(pruningPointHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(consensus, pruningPointHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !receivedAll {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err = flow.Domain().StagingConsensus().ValidateAndInsertImportedPruningPoint(pruningPointHash)
|
||||
if err != nil {
|
||||
// TODO: Find a better way to deal with finality conflicts.
|
||||
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
|
||||
return false, nil
|
||||
}
|
||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
@ -1,35 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// SendVirtualSelectedParentInvContext is the interface for the context needed for the SendVirtualSelectedParentInv flow.
|
||||
type SendVirtualSelectedParentInvContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
// SendVirtualSelectedParentInv sends a peer the selected parent hash of the virtual
|
||||
func SendVirtualSelectedParentInv(context SendVirtualSelectedParentInvContext,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
virtualSelectedParent, err := context.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if virtualSelectedParent.Equal(context.Config().NetParams().GenesisHash) {
|
||||
log.Debugf("Skipping sending the virtual selected parent hash to peer %s because it's the genesis", peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("Sending virtual selected parent hash %s to peer %s", virtualSelectedParent, peer)
|
||||
|
||||
virtualSelectedParentInv := appmessage.NewMsgInvBlock(virtualSelectedParent)
|
||||
return outgoingRoute.Enqueue(virtualSelectedParentInv)
|
||||
}
|
@ -1,42 +0,0 @@
|
||||
package ping
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// ReceivePingsContext is the interface for the context needed for the ReceivePings flow.
|
||||
type ReceivePingsContext interface {
|
||||
}
|
||||
|
||||
type receivePingsFlow struct {
|
||||
ReceivePingsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// ReceivePings handles all ping messages coming through incomingRoute.
|
||||
// This function assumes that incomingRoute will only return MsgPing.
|
||||
func ReceivePings(context ReceivePingsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
|
||||
flow := &receivePingsFlow{
|
||||
ReceivePingsContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *receivePingsFlow) start() error {
|
||||
for {
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pingMessage := message.(*appmessage.MsgPing)
|
||||
|
||||
pongMessage := appmessage.NewMsgPong(pingMessage.Nonce)
|
||||
err = flow.outgoingRoute.Enqueue(pongMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
@ -1,77 +0,0 @@
|
||||
package ping
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/random"
|
||||
)
|
||||
|
||||
// SendPingsContext is the interface for the context needed for the SendPings flow.
|
||||
type SendPingsContext interface {
|
||||
ShutdownChan() <-chan struct{}
|
||||
}
|
||||
|
||||
type sendPingsFlow struct {
|
||||
SendPingsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
}
|
||||
|
||||
// SendPings starts sending MsgPings every pingInterval seconds to the
|
||||
// given peer.
|
||||
// This function assumes that incomingRoute will only return MsgPong.
|
||||
func SendPings(context SendPingsContext, incomingRoute *router.Route, outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
flow := &sendPingsFlow{
|
||||
SendPingsContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *sendPingsFlow) start() error {
|
||||
const pingInterval = 2 * time.Minute
|
||||
ticker := time.NewTicker(pingInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-flow.ShutdownChan():
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flow.peer.SetPingPending(nonce)
|
||||
|
||||
pingMessage := appmessage.NewMsgPing(nonce)
|
||||
err = flow.outgoingRoute.Enqueue(pingMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
if errors.Is(err, router.ErrTimeout) {
|
||||
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
pongMessage := message.(*appmessage.MsgPong)
|
||||
if pongMessage.Nonce != pingMessage.Nonce {
|
||||
return protocolerrors.New(true, "nonce mismatch between ping and pong")
|
||||
}
|
||||
flow.peer.SetPingIdle()
|
||||
}
|
||||
}
|
@ -1,194 +0,0 @@
|
||||
package v4
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
type protocolManager interface {
|
||||
RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
|
||||
isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
|
||||
messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
Context() *flowcontext.FlowContext
|
||||
}
|
||||
|
||||
// Register is used in order to register all the protocol flows to the given router.
|
||||
func Register(m protocolManager, router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*common.Flow) {
|
||||
flows = registerAddressFlows(m, router, isStopping, errChan)
|
||||
flows = append(flows, registerBlockRelayFlows(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerPingFlows(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerTransactionRelayFlow(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerRejectsFlow(m, router, isStopping, errChan)...)
|
||||
|
||||
return flows
|
||||
}
|
||||
|
||||
func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return addressexchange.ReceiveAddresses(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
|
||||
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.SendVirtualSelectedParentInv(m.Context(), outgoingRoute, peer)
|
||||
}),
|
||||
|
||||
m.RegisterFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayInvs(m.Context(), incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBD", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdBlockHeaders, appmessage.CmdIBDBlockLocatorHighestHash, appmessage.CmdBlockWithTrustedDataV4,
|
||||
appmessage.CmdDoneBlocksWithTrustedData, appmessage.CmdIBDBlockLocatorHighestHashNotFound,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
|
||||
appmessage.CmdPruningPointProof,
|
||||
appmessage.CmdTrustedData,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBD(m.Context(), incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayBlockRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestBlockLocator(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestHeaders", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestHeaders(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBDBlockRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBDBlockRequests(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestPruningPointUTXOSet", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSet,
|
||||
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestPruningPointUTXOSet(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBDBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBDBlockLocator(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointProofRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointProofRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerPingFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ping.ReceivePings(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ping.SendPings(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerTransactionRelayFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRelayedTransactions(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
m.RegisterFlow("HandleRequestTransactions", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRequestedTransactions(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerRejectsFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlow("HandleRejects", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return rejects.HandleRejects(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
package rejects
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleRejectsContext is the interface for the context needed for the HandleRejects flow.
|
||||
type HandleRejectsContext interface {
|
||||
}
|
||||
|
||||
type handleRejectsFlow struct {
|
||||
HandleRejectsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRejects handles all reject messages coming through incomingRoute.
|
||||
// This function assumes that incomingRoute will only return MsgReject.
|
||||
func HandleRejects(context HandleRejectsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
|
||||
flow := &handleRejectsFlow{
|
||||
HandleRejectsContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRejectsFlow) start() error {
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rejectMessage := message.(*appmessage.MsgReject)
|
||||
|
||||
return protocolerrors.Errorf(false, "got reject message: `%s`", rejectMessage.Reason)
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func checkFlowError(t *testing.T, err error, isProtocolError bool, shouldBan bool, contains string) {
|
||||
pErr := protocolerrors.ProtocolError{}
|
||||
if errors.As(err, &pErr) != isProtocolError {
|
||||
t.Fatalf("Unexepcted error %+v", err)
|
||||
}
|
||||
|
||||
if pErr.ShouldBan != shouldBan {
|
||||
t.Fatalf("Exepcted shouldBan %t but got %t", shouldBan, pErr.ShouldBan)
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), contains) {
|
||||
t.Fatalf("Unexpected error. Expected error to contain '%s' but got: %+v", contains, err)
|
||||
}
|
||||
}
|
@ -1,51 +0,0 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
type fakeReceiveAddressesContext struct{}
|
||||
|
||||
func (f fakeReceiveAddressesContext) AddressManager() *addressmanager.AddressManager {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestReceiveAddressesErrors(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
incomingRoute := router.NewRoute("incoming")
|
||||
outgoingRoute := router.NewRoute("outgoing")
|
||||
peer := peerpkg.New(nil)
|
||||
errChan := make(chan error)
|
||||
go func() {
|
||||
errChan <- addressexchange.ReceiveAddresses(fakeReceiveAddressesContext{}, incomingRoute, outgoingRoute, peer)
|
||||
}()
|
||||
|
||||
_, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
|
||||
// Sending addressmanager.GetAddressesMax+1 addresses should trigger a ban
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgAddresses(make([]*appmessage.NetAddress,
|
||||
addressmanager.GetAddressesMax+1)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
checkFlowError(t, err, true, true, "address count exceeded")
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("timed out after %s", time.Second)
|
||||
}
|
||||
})
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
package testing
|
||||
|
||||
// Because of a bug in Go coverage fails if you have packages with test files only. See https://github.com/golang/go/issues/27333
|
||||
// So this is a dummy non-test go file in the package.
|
@ -1,209 +0,0 @@
|
||||
package transactionrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// TransactionsRelayContext is the interface for the context needed for the
|
||||
// HandleRelayedTransactions and HandleRequestedTransactions flows.
|
||||
type TransactionsRelayContext interface {
|
||||
NetAdapter() *netadapter.NetAdapter
|
||||
Domain() domain.Domain
|
||||
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
|
||||
OnTransactionAddedToMempool()
|
||||
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
|
||||
IsIBDRunning() bool
|
||||
}
|
||||
|
||||
type handleRelayedTransactionsFlow struct {
|
||||
TransactionsRelayContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
invsQueue []*appmessage.MsgInvTransaction
|
||||
}
|
||||
|
||||
// HandleRelayedTransactions listens to appmessage.MsgInvTransaction messages, requests their corresponding transactions if they
|
||||
// are missing, adds them to the mempool and propagates them to the rest of the network.
|
||||
func HandleRelayedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
|
||||
flow := &handleRelayedTransactionsFlow{
|
||||
TransactionsRelayContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
invsQueue: make([]*appmessage.MsgInvTransaction, 0),
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) start() error {
|
||||
for {
|
||||
inv, err := flow.readInv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsIBDRunning() {
|
||||
continue
|
||||
}
|
||||
|
||||
requestedIDs, err := flow.requestInvTransactions(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.receiveTransactions(requestedIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
|
||||
inv *appmessage.MsgInvTransaction) (requestedIDs []*externalapi.DomainTransactionID, err error) {
|
||||
|
||||
idsToRequest := make([]*externalapi.DomainTransactionID, 0, len(inv.TxIDs))
|
||||
for _, txID := range inv.TxIDs {
|
||||
if flow.isKnownTransaction(txID) {
|
||||
continue
|
||||
}
|
||||
exists := flow.SharedRequestedTransactions().AddIfNotExists(txID)
|
||||
if exists {
|
||||
continue
|
||||
}
|
||||
idsToRequest = append(idsToRequest, txID)
|
||||
}
|
||||
|
||||
if len(idsToRequest) == 0 {
|
||||
return idsToRequest, nil
|
||||
}
|
||||
|
||||
msgGetTransactions := appmessage.NewMsgRequestTransactions(idsToRequest)
|
||||
err = flow.outgoingRoute.Enqueue(msgGetTransactions)
|
||||
if err != nil {
|
||||
flow.SharedRequestedTransactions().RemoveMany(idsToRequest)
|
||||
return nil, err
|
||||
}
|
||||
return idsToRequest, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
|
||||
// Ask the transaction memory pool if the transaction is known
|
||||
// to it in any form (main pool or orphan).
|
||||
if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) readInv() (*appmessage.MsgInvTransaction, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvTransaction
|
||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inv, ok := msg.(*appmessage.MsgInvTransaction)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay flow while "+
|
||||
"expecting an inv message", msg.Command())
|
||||
}
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) broadcastAcceptedTransactions(acceptedTxIDs []*externalapi.DomainTransactionID) error {
|
||||
return flow.EnqueueTransactionIDsForPropagation(acceptedTxIDs)
|
||||
}
|
||||
|
||||
// readMsgTxOrNotFound returns the next msgTx or msgTransactionNotFound in incomingRoute,
|
||||
// returning only one of the message types at a time.
|
||||
//
|
||||
// and populates invsQueue with any inv messages that meanwhile arrive.
|
||||
func (flow *handleRelayedTransactionsFlow) readMsgTxOrNotFound() (
|
||||
msgTx *appmessage.MsgTx, msgNotFound *appmessage.MsgTransactionNotFound, err error) {
|
||||
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvTransaction:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
case *appmessage.MsgTx:
|
||||
return message, nil, nil
|
||||
case *appmessage.MsgTransactionNotFound:
|
||||
return nil, message, nil
|
||||
default:
|
||||
return nil, nil, errors.Errorf("unexpected message %s", message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransactions []*externalapi.DomainTransactionID) error {
|
||||
// In case the function returns earlier than expected, we want to make sure sharedRequestedTransactions is
|
||||
// clean from any pending transactions.
|
||||
defer flow.SharedRequestedTransactions().RemoveMany(requestedTransactions)
|
||||
for _, expectedID := range requestedTransactions {
|
||||
msgTx, msgTxNotFound, err := flow.readMsgTxOrNotFound()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if msgTxNotFound != nil {
|
||||
if !msgTxNotFound.ID.Equal(expectedID) {
|
||||
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
|
||||
expectedID, msgTxNotFound.ID)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
tx := appmessage.MsgTxToDomainTransaction(msgTx)
|
||||
txID := consensushashing.TransactionID(tx)
|
||||
if !txID.Equal(expectedID) {
|
||||
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
|
||||
expectedID, txID)
|
||||
}
|
||||
|
||||
acceptedTransactions, err :=
|
||||
flow.Domain().MiningManager().ValidateAndInsertTransaction(tx, false, true)
|
||||
if err != nil {
|
||||
ruleErr := &mempool.RuleError{}
|
||||
if !errors.As(err, ruleErr) {
|
||||
return errors.Wrapf(err, "failed to process transaction %s", txID)
|
||||
}
|
||||
|
||||
shouldBan := false
|
||||
if txRuleErr := (&mempool.TxRuleError{}); errors.As(ruleErr.Err, txRuleErr) {
|
||||
if txRuleErr.RejectCode == mempool.RejectInvalid {
|
||||
shouldBan = true
|
||||
}
|
||||
}
|
||||
|
||||
if !shouldBan {
|
||||
continue
|
||||
}
|
||||
|
||||
return protocolerrors.Errorf(true, "rejected transaction %s: %s", txID, ruleErr)
|
||||
}
|
||||
err = flow.broadcastAcceptedTransactions(consensushashing.TransactionIDs(acceptedTransactions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flow.OnTransactionAddedToMempool()
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,196 +0,0 @@
|
||||
package transactionrelay_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
type mocTransactionsRelayContext struct {
|
||||
netAdapter *netadapter.NetAdapter
|
||||
domain domain.Domain
|
||||
sharedRequestedTransactions *flowcontext.SharedRequestedTransactions
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) NetAdapter() *netadapter.NetAdapter {
|
||||
return m.netAdapter
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) Domain() domain.Domain {
|
||||
return m.domain
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions {
|
||||
return m.sharedRequestedTransactions
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
|
||||
// have the requested transactions in the mempool.
|
||||
func TestHandleRelayedTransactionsNotFound(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
factory := consensus.NewFactory()
|
||||
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRelayedTransactionsNotFound")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up test consensus: %+v", err)
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
|
||||
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a NetAdapter: %v", err)
|
||||
}
|
||||
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set up a domain instance: %v", err)
|
||||
}
|
||||
context := &mocTransactionsRelayContext{
|
||||
netAdapter: adapter,
|
||||
domain: domainInstance,
|
||||
sharedRequestedTransactions: sharedRequestedTransactions,
|
||||
}
|
||||
incomingRoute := router.NewRoute("incoming")
|
||||
defer incomingRoute.Close()
|
||||
peerIncomingRoute := router.NewRoute("outgoing")
|
||||
defer peerIncomingRoute.Close()
|
||||
|
||||
txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02})
|
||||
txIDs := []*externalapi.DomainTransactionID{txID1, txID2}
|
||||
invMessage := appmessage.NewMsgInvTransaction(txIDs)
|
||||
err = incomingRoute.Enqueue(invMessage)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
|
||||
}
|
||||
// The goroutine is representing the peer's actions.
|
||||
spawn("peerResponseToTheTransactionsRequest", func() {
|
||||
msg, err := peerIncomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
t.Fatalf("Dequeue: %v", err)
|
||||
}
|
||||
inv := msg.(*appmessage.MsgRequestTransactions)
|
||||
|
||||
if len(txIDs) != len(inv.IDs) {
|
||||
t.Fatalf("TestHandleRelayedTransactions: expected %d transactions ID, but got %d", len(txIDs), len(inv.IDs))
|
||||
}
|
||||
|
||||
for i, id := range inv.IDs {
|
||||
if txIDs[i].String() != id.String() {
|
||||
t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String())
|
||||
}
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgTransactionNotFound(txIDs[i]))
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
|
||||
}
|
||||
}
|
||||
// Insert an unexpected message type to stop the infinity loop.
|
||||
err = incomingRoute.Enqueue(&appmessage.MsgAddresses{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, peerIncomingRoute)
|
||||
// Since we inserted an unexpected message type to stop the infinity loop,
|
||||
// we expect the error will be infected from this specific message and also the
|
||||
// error will count as a protocol message.
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); err == nil || !errors.As(err, &protocolErr) {
|
||||
t.Fatalf("Expected to protocol error")
|
||||
} else {
|
||||
if !protocolErr.ShouldBan {
|
||||
t.Fatalf("Exepcted shouldBan true, but got false.")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "unexpected Addresses [code 3] message in the block relay flow while expecting an inv message") {
|
||||
t.Fatalf("Unexpected error: expected: an error due to existence of an Addresses message "+
|
||||
"in the block relay flow, but got: %v", protocolErr.Cause)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestOnClosedIncomingRoute verifies that an appropriate error message will be returned when
|
||||
// trying to dequeue a message from a closed route.
|
||||
func TestOnClosedIncomingRoute(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
|
||||
factory := consensus.NewFactory()
|
||||
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestOnClosedIncomingRoute")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up test consensus: %+v", err)
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
|
||||
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to creat a NetAdapter : %v", err)
|
||||
}
|
||||
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set up a domain instance: %v", err)
|
||||
}
|
||||
context := &mocTransactionsRelayContext{
|
||||
netAdapter: adapter,
|
||||
domain: domainInstance,
|
||||
sharedRequestedTransactions: sharedRequestedTransactions,
|
||||
}
|
||||
incomingRoute := router.NewRoute("incoming")
|
||||
outgoingRoute := router.NewRoute("outgoing")
|
||||
defer outgoingRoute.Close()
|
||||
|
||||
txID := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
txIDs := []*externalapi.DomainTransactionID{txID}
|
||||
|
||||
err = incomingRoute.Enqueue(&appmessage.MsgInvTransaction{TxIDs: txIDs})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
|
||||
}
|
||||
incomingRoute.Close()
|
||||
err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, outgoingRoute)
|
||||
if err == nil || !errors.Is(err, router.ErrRouteClosed) {
|
||||
t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err)
|
||||
}
|
||||
})
|
||||
}
|
@ -1,59 +0,0 @@
|
||||
package transactionrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
type handleRequestedTransactionsFlow struct {
|
||||
TransactionsRelayContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestedTransactions listens to appmessage.MsgRequestTransactions messages, responding with the requested
|
||||
// transactions if those are in the mempool.
|
||||
// Missing transactions would be ignored
|
||||
func HandleRequestedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
|
||||
flow := &handleRequestedTransactionsFlow{
|
||||
TransactionsRelayContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestedTransactionsFlow) start() error {
|
||||
for {
|
||||
msgRequestTransactions, err := flow.readRequestTransactions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, transactionID := range msgRequestTransactions.IDs {
|
||||
tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
|
||||
|
||||
if !ok {
|
||||
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
|
||||
err := flow.outgoingRoute.Enqueue(msgTransactionNotFound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestedTransactionsFlow) readRequestTransactions() (*appmessage.MsgRequestTransactions, error) {
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msg.(*appmessage.MsgRequestTransactions), nil
|
||||
}
|
@ -1,91 +0,0 @@
|
||||
package transactionrelay_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// TestHandleRequestedTransactionsNotFound tests the flow of HandleRequestedTransactions
|
||||
// when the requested transactions don't found in the mempool.
|
||||
func TestHandleRequestedTransactionsNotFound(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
factory := consensus.NewFactory()
|
||||
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRequestedTransactionsNotFound")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up test Consensus: %+v", err)
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
|
||||
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a NetAdapter: %v", err)
|
||||
}
|
||||
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set up a domain Instance: %v", err)
|
||||
}
|
||||
context := &mocTransactionsRelayContext{
|
||||
netAdapter: adapter,
|
||||
domain: domainInstance,
|
||||
sharedRequestedTransactions: sharedRequestedTransactions,
|
||||
}
|
||||
incomingRoute := router.NewRoute("incoming")
|
||||
outgoingRoute := router.NewRoute("outgoing")
|
||||
defer outgoingRoute.Close()
|
||||
|
||||
txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02})
|
||||
txIDs := []*externalapi.DomainTransactionID{txID1, txID2}
|
||||
msg := appmessage.NewMsgRequestTransactions(txIDs)
|
||||
err = incomingRoute.Enqueue(msg)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
|
||||
}
|
||||
// The goroutine is representing the peer's actions.
|
||||
spawn("peerResponseToTheTransactionsMessages", func() {
|
||||
for i, id := range txIDs {
|
||||
msg, err := outgoingRoute.Dequeue()
|
||||
if err != nil {
|
||||
t.Fatalf("Dequeue: %s", err)
|
||||
}
|
||||
outMsg := msg.(*appmessage.MsgTransactionNotFound)
|
||||
if txIDs[i].String() != outMsg.ID.String() {
|
||||
t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String())
|
||||
}
|
||||
}
|
||||
// Closed the incomingRoute for stop the infinity loop.
|
||||
incomingRoute.Close()
|
||||
})
|
||||
|
||||
err = transactionrelay.HandleRequestedTransactions(context, incomingRoute, outgoingRoute)
|
||||
// Make sure the error is due to the closed route.
|
||||
if err == nil || !errors.Is(err, router.ErrRouteClosed) {
|
||||
t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err)
|
||||
}
|
||||
})
|
||||
}
|
@ -21,7 +21,7 @@ func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*ex
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||
case *appmessage.MsgBlockLocator:
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
@ -34,7 +33,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists {
|
||||
if !blockInfo.HasHeader() {
|
||||
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
|
||||
"with an unknown targetHash %s", targetHash)
|
||||
}
|
||||
@ -47,7 +46,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
||||
}
|
||||
|
||||
// The IBD block locator is checking only existing blocks with bodies.
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
if !blockInfo.HasBody() {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -28,18 +27,15 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
||||
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
|
||||
for i, hash := range msgRequestIBDBlocks.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
block, found, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "IBD block %s not found", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
||||
|
@ -119,11 +119,15 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
|
||||
}
|
||||
|
||||
for i, blockHash := range pointAndItsAnticone {
|
||||
block, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||
block, found, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "pruning point anticone block %s not found", blockHash)
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -29,18 +28,15 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
|
||||
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
|
||||
for _, hash := range getRelayBlocksMessage.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
block, found, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "Relay block %s not found", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@ -25,8 +26,8 @@ var orphanResolutionRange uint32 = 5
|
||||
type RelayInvsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnNewBlock(block *externalapi.DomainBlock) error
|
||||
OnNewBlockTemplate() error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
||||
Broadcast(message appmessage.Message) error
|
||||
@ -35,13 +36,19 @@ type RelayInvsContext interface {
|
||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||
IsIBDRunning() bool
|
||||
IsRecoverableError(err error) bool
|
||||
IsNearlySynced() (bool, error)
|
||||
}
|
||||
|
||||
type invRelayBlock struct {
|
||||
Hash *externalapi.DomainHash
|
||||
IsOrphanRoot bool
|
||||
}
|
||||
|
||||
type handleRelayInvsFlow struct {
|
||||
RelayInvsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
invsQueue []*appmessage.MsgInvRelayBlock
|
||||
invsQueue []invRelayBlock
|
||||
}
|
||||
|
||||
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
||||
@ -54,7 +61,7 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
||||
invsQueue: make([]invRelayBlock, 0),
|
||||
}
|
||||
err := flow.start()
|
||||
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
||||
@ -105,10 +112,16 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Block relay is disabled during IBD
|
||||
// Block relay is disabled if the node is already during IBD AND considered out of sync
|
||||
if flow.IsIBDRunning() {
|
||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
||||
continue
|
||||
isNearlySynced, err := flow.IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isNearlySynced {
|
||||
log.Debugf("Got block %s while in IBD and the node is out of sync. Continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Requesting block %s", inv.Hash)
|
||||
@ -131,12 +144,36 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Note we do not apply the heuristic below if inv was queued as an orphan root, since
|
||||
// that means the process started by a proper and relevant relay block
|
||||
if !inv.IsOrphanRoot {
|
||||
// Check bounded merge depth to avoid requesting irrelevant data which cannot be merged under virtual
|
||||
virtualMergeDepthRoot, err := flow.Domain().Consensus().VirtualMergeDepthRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !virtualMergeDepthRoot.Equal(model.VirtualGenesisBlockHash) {
|
||||
mergeDepthRootHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualMergeDepthRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Since `BlueWork` respects topology, this condition means that the relay
|
||||
// block is not in the future of virtual's merge depth root, and thus cannot be merged unless
|
||||
// other valid blocks Kosherize it, in which case it will be obtained once the merger is relayed
|
||||
if block.Header.BlueWork().Cmp(mergeDepthRootHeader.BlueWork()) <= 0 {
|
||||
log.Debugf("Block %s has lower blue work than virtual's merge root %s (%d <= %d), hence we are skipping it",
|
||||
inv.Hash, virtualMergeDepthRoot, block.Header.BlueWork(), mergeDepthRootHeader.BlueWork())
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
||||
missingParents, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||
@ -168,15 +205,20 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualHasNewParents := false
|
||||
for _, parent := range newVirtualInfo.ParentHashes {
|
||||
if oldVirtualParents.Contains(parent) {
|
||||
continue
|
||||
}
|
||||
|
||||
block, err := flow.Domain().Consensus().GetBlock(parent)
|
||||
virtualHasNewParents = true
|
||||
block, found, err := flow.Domain().Consensus().GetBlock(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "Virtual parent %s not found", parent)
|
||||
}
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("Relaying block %s", blockHash)
|
||||
err = flow.relayBlock(block)
|
||||
@ -185,8 +227,16 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
}
|
||||
}
|
||||
|
||||
if virtualHasNewParents {
|
||||
log.Debugf("Virtual %d has new parents, raising new block template event", newVirtualInfo.DAAScore)
|
||||
err = flow.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -202,24 +252,24 @@ func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.Domai
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
func (flow *handleRelayInvsFlow) readInv() (invRelayBlock, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
var inv invRelayBlock
|
||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return invRelayBlock{}, err
|
||||
}
|
||||
|
||||
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
msgInv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
return invRelayBlock{}, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
"expecting an inv message", msg.Command())
|
||||
}
|
||||
return inv, nil
|
||||
return invRelayBlock{Hash: msgInv.Hash, IsOrphanRoot: false}, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||
@ -264,7 +314,7 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||
case *appmessage.MsgBlock:
|
||||
return message, nil
|
||||
default:
|
||||
@ -273,25 +323,25 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, error) {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
return nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
}
|
||||
|
||||
missingParentsError := &ruleerrors.ErrMissingParents{}
|
||||
if errors.As(err, missingParentsError) {
|
||||
return missingParentsError.MissingParentHashes, nil, nil
|
||||
return missingParentsError.MissingParentHashes, nil
|
||||
}
|
||||
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
|
||||
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
}
|
||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
return nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
return nil, virtualChangeSet, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
||||
@ -405,10 +455,10 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
|
||||
}
|
||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
||||
|
||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
||||
invMessages := make([]invRelayBlock, len(orphanRoots))
|
||||
for i, root := range orphanRoots {
|
||||
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
||||
invMessages[i] = appmessage.NewMsgInvBlock(root)
|
||||
invMessages[i] = invRelayBlock{Hash: root, IsOrphanRoot: true}
|
||||
}
|
||||
|
||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
||||
|
@ -47,9 +47,9 @@ func (flow *handleRequestAnticoneFlow) start() error {
|
||||
|
||||
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
|
||||
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
|
||||
// we relay blocks only if they enter virtual's mergeset. We add 2 for a small margin error.
|
||||
// we relay blocks only if they enter virtual's mergeset. We add a 2 factor for possible sync gaps.
|
||||
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
|
||||
flow.Config().ActiveNetParams.MergeSetSizeLimit+2)
|
||||
flow.Config().ActiveNetParams.MergeSetSizeLimit*2)
|
||||
if err != nil {
|
||||
return protocolerrors.Wrap(true, err, "Failed querying anticone")
|
||||
}
|
||||
|
@ -44,9 +44,27 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
|
||||
isLowSelectedAncestorOfHigh, err := flow.Domain().Consensus().IsInSelectedParentChainOf(lowHash, highHash)
|
||||
consensus := flow.Domain().Consensus()
|
||||
|
||||
lowHashInfo, err := consensus.GetBlockInfo(lowHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !lowHashInfo.HasHeader() {
|
||||
return protocolerrors.Errorf(true, "Block %s does not exist", lowHash)
|
||||
}
|
||||
|
||||
highHashInfo, err := consensus.GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !highHashInfo.HasHeader() {
|
||||
return protocolerrors.Errorf(true, "Block %s does not exist", highHash)
|
||||
}
|
||||
|
||||
isLowSelectedAncestorOfHigh, err := consensus.IsInSelectedParentChainOf(lowHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -62,7 +80,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
// in order to avoid locking the consensus for too long
|
||||
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
||||
const maxBlocks = 1 << 10
|
||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||
blockHashes, _, err := consensus.GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -70,7 +88,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
|
||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||
blockHeader, err := consensus.GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
@ -20,8 +21,8 @@ import (
|
||||
type IBDContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnNewBlock(block *externalapi.DomainBlock) error
|
||||
OnNewBlockTemplate() error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||
@ -70,99 +71,22 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
}
|
||||
|
||||
isFinishedSuccessfully := false
|
||||
var err error
|
||||
defer func() {
|
||||
flow.UnsetIBDRunning()
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
flow.logIBDFinished(isFinishedSuccessfully, err)
|
||||
}()
|
||||
|
||||
relayBlockHash := consensushashing.BlockHash(block)
|
||||
|
||||
log.Debugf("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
|
||||
log.Debugf("Syncing blocks up to %s", relayBlockHash)
|
||||
log.Debugf("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
|
||||
log.Infof("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
|
||||
log.Infof("Syncing blocks up to %s", relayBlockHash)
|
||||
log.Infof("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
|
||||
|
||||
/*
|
||||
Algorithm:
|
||||
Request full selected chain block locator from syncer
|
||||
Find the highest block which we know
|
||||
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
|
||||
*/
|
||||
|
||||
// Empty hashes indicate that the full chain is queried
|
||||
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil)
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
syncerHeaderSelectedTipHash := locatorHashes[0]
|
||||
var highestKnownSyncerChainHash *externalapi.DomainHash
|
||||
chainNegotiationRestartCounter := 0
|
||||
for {
|
||||
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
|
||||
for _, syncerChainHash := range locatorHashes {
|
||||
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Exists {
|
||||
currentHighestKnownSyncerChainHash = syncerChainHash
|
||||
break
|
||||
}
|
||||
lowestUnknownSyncerChainHash = syncerChainHash
|
||||
}
|
||||
// No shared block, break
|
||||
if currentHighestKnownSyncerChainHash == nil {
|
||||
break
|
||||
}
|
||||
// No point in zooming further
|
||||
if len(locatorHashes) == 1 {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// Zoom in
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(
|
||||
lowestUnknownSyncerChainHash,
|
||||
currentHighestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(locatorHashes) == 2 {
|
||||
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
|
||||
!locatorHashes[1].Equal(currentHighestKnownSyncerChainHash) {
|
||||
return protocolerrors.Errorf(true, "Expecting the high and low "+
|
||||
"hashes to match the locatorHashes if len(locatorHashes) is 2")
|
||||
}
|
||||
// We found our search target
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
chainNegotiationRestartCounter++
|
||||
if chainNegotiationRestartCounter > 64 {
|
||||
return protocolerrors.Errorf(false,
|
||||
"Chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
|
||||
}
|
||||
|
||||
// An empty locator signals that the syncer chain was modified and no longer contains one of
|
||||
// the queried hashes, so we restart the search
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
// Reset syncer's header selected tip
|
||||
syncerHeaderSelectedTipHash = locatorHashes[0]
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Found highest known syncer chain block %s from peer %s",
|
||||
highestKnownSyncerChainHash, flow.peer)
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(
|
||||
block, highestKnownSyncerChainHash)
|
||||
@ -176,7 +100,7 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
|
||||
if shouldDownloadHeadersProof {
|
||||
log.Infof("Starting IBD with headers proof")
|
||||
err := flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
|
||||
err = flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -227,6 +151,146 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) {
|
||||
/*
|
||||
Algorithm:
|
||||
Request full selected chain block locator from syncer
|
||||
Find the highest block which we know
|
||||
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
|
||||
*/
|
||||
|
||||
// Empty hashes indicate that the full chain is queried
|
||||
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer,
|
||||
len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
syncerHeaderSelectedTipHash := locatorHashes[0]
|
||||
var highestKnownSyncerChainHash *externalapi.DomainHash
|
||||
chainNegotiationRestartCounter := 0
|
||||
chainNegotiationZoomCounts := 0
|
||||
initialLocatorLen := len(locatorHashes)
|
||||
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
|
||||
for _, syncerChainHash := range locatorHashes {
|
||||
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if info.Exists {
|
||||
if info.BlockStatus == externalapi.StatusInvalid {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Sent invalid chain block %s", syncerChainHash)
|
||||
}
|
||||
|
||||
isPruningPointOnSyncerChain, err := flow.Domain().Consensus().IsInSelectedParentChainOf(pruningPoint, syncerChainHash)
|
||||
if err != nil {
|
||||
log.Errorf("Error checking isPruningPointOnSyncerChain: %s", err)
|
||||
}
|
||||
|
||||
// We're only interested in syncer chain blocks that have our pruning
|
||||
// point in their selected chain. Otherwise, it means one of the following:
|
||||
// 1) We will not switch the virtual selected chain to the syncers chain since it will violate finality
|
||||
// (hence we can ignore it unless merged by others).
|
||||
// 2) syncerChainHash is actually in the past of our pruning point so there's no
|
||||
// point in syncing from it.
|
||||
if err == nil && isPruningPointOnSyncerChain {
|
||||
currentHighestKnownSyncerChainHash = syncerChainHash
|
||||
break
|
||||
}
|
||||
}
|
||||
lowestUnknownSyncerChainHash = syncerChainHash
|
||||
}
|
||||
// No unknown blocks, break. Note this can only happen in the first iteration
|
||||
if lowestUnknownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// No shared block, break
|
||||
if currentHighestKnownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = nil
|
||||
break
|
||||
}
|
||||
// No point in zooming further
|
||||
if len(locatorHashes) == 1 {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// Zoom in
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(
|
||||
lowestUnknownSyncerChainHash,
|
||||
currentHighestKnownSyncerChainHash, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) > 0 {
|
||||
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
|
||||
!locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+
|
||||
"hashes to match the locator bounds")
|
||||
}
|
||||
|
||||
chainNegotiationZoomCounts++
|
||||
log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
if len(locatorHashes) == 2 {
|
||||
// We found our search target
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
|
||||
if chainNegotiationZoomCounts > initialLocatorLen*2 {
|
||||
// Since the zoom-in always queries two consecutive entries in the previous locator, it is
|
||||
// expected to decrease in size at least every two iterations
|
||||
return nil, nil, protocolerrors.Errorf(true,
|
||||
"IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d",
|
||||
chainNegotiationZoomCounts, initialLocatorLen)
|
||||
}
|
||||
|
||||
} else { // Empty locator signals a restart due to chain changes
|
||||
chainNegotiationZoomCounts = 0
|
||||
chainNegotiationRestartCounter++
|
||||
if chainNegotiationRestartCounter > 32 {
|
||||
return nil, nil, protocolerrors.Errorf(false,
|
||||
"IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
|
||||
}
|
||||
log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter)
|
||||
|
||||
// An empty locator signals that the syncer chain was modified and no longer contains one of
|
||||
// the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
initialLocatorLen = len(locatorHashes)
|
||||
// Reset syncer's header selected tip
|
||||
syncerHeaderSelectedTipHash = locatorHashes[0]
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Found highest known syncer chain block %s from peer %s",
|
||||
highestKnownSyncerChainHash, flow.peer)
|
||||
|
||||
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
@ -236,28 +300,38 @@ func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool, err error) {
|
||||
successString := "successfully"
|
||||
if !isFinishedSuccessfully {
|
||||
successString = "(interrupted)"
|
||||
if err != nil {
|
||||
successString = fmt.Sprintf("(interrupted: %s)", err)
|
||||
} else {
|
||||
successString = fmt.Sprintf("(interrupted)")
|
||||
}
|
||||
}
|
||||
log.Infof("IBD finished %s", successString)
|
||||
log.Infof("IBD with peer %s finished %s", flow.peer, successString)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) getSyncerChainBlockLocator(
|
||||
highHash, lowHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) {
|
||||
|
||||
requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash)
|
||||
err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDChainBlockLocator:
|
||||
if len(message.BlockLocatorHashes) > 64 {
|
||||
return nil, protocolerrors.Errorf(true,
|
||||
"Got block locator of size %d>64 while expecting locator to have size "+
|
||||
"which is logarithmic in DAG size (which should never exceed 2^64)",
|
||||
len(message.BlockLocatorHashes))
|
||||
}
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
@ -271,6 +345,11 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
|
||||
log.Infof("Downloading headers from %s", flow.peer)
|
||||
|
||||
if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) {
|
||||
// No need to get syncer selected tip headers, so sync relay past and return
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
|
||||
err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -318,55 +397,7 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
select {
|
||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
// Finished downloading syncer selected tip blocks,
|
||||
// check if we already have the triggering relayBlockHash
|
||||
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
// Send a special header request for the selected tip anticone. This is expected to
|
||||
// be a small set, as it is bounded to the size of virtual's mergeset.
|
||||
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
_, anticoneDone, err = flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
for _, header := range anticoneHeadersMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the relayBlockHash has still not been received, the peer is misbehaving
|
||||
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
@ -383,6 +414,58 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
// Finished downloading syncer selected tip blocks,
|
||||
// check if we already have the triggering relayBlockHash
|
||||
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
// Send a special header request for the selected tip anticone. This is expected to
|
||||
// be a small set, as it is bounded to the size of virtual's mergeset.
|
||||
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
_, anticoneDone, err = flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
for _, header := range anticoneHeadersMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the relayBlockHash has still not been received, the peer is misbehaving
|
||||
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestAnticone(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
|
||||
@ -433,7 +516,7 @@ func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlo
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
||||
err = consensus.ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
@ -511,7 +594,7 @@ func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
||||
|
||||
receivedChunkCount++
|
||||
if receivedChunkCount%ibdBatchSize == 0 {
|
||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
log.Infof("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
receivedChunkCount, receivedUTXOCount)
|
||||
|
||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||
@ -563,6 +646,12 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
|
||||
highestProcessedDAAScore := lowBlockHeader.DAAScore()
|
||||
|
||||
// If the IBD is small, we want to update the virtual after each block in order to avoid complications and possible bugs.
|
||||
updateVirtual, err := flow.Domain().Consensus().IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
@ -599,7 +688,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
return err
|
||||
}
|
||||
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
|
||||
err = flow.Domain().Consensus().ValidateAndInsertBlock(block, updateVirtual)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||
@ -607,7 +696,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
}
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -618,7 +707,15 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
return flow.resolveVirtual(highestProcessedDAAScore)
|
||||
// We need to resolve virtual only if it wasn't updated while syncing block bodies
|
||||
if !updateVirtual {
|
||||
err := flow.resolveVirtual(highestProcessedDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return flow.OnNewBlockTemplate()
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
@ -631,38 +728,24 @@ func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
|
||||
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
err := flow.Domain().Consensus().ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) {
|
||||
var percents int
|
||||
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||
percents = 100
|
||||
} else {
|
||||
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||
}
|
||||
if percents < 0 {
|
||||
percents = 0
|
||||
} else if percents > 100 {
|
||||
percents = 100
|
||||
}
|
||||
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if i%10 == 0 {
|
||||
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var percents int
|
||||
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||
percents = 100
|
||||
} else {
|
||||
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||
}
|
||||
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||
}
|
||||
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isCompletelyResolved {
|
||||
log.Infof("Resolved virtual")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
log.Infof("Resolved virtual")
|
||||
return nil
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
err := flow.Domain().InitStagingConsensus()
|
||||
err := flow.Domain().InitStagingConsensusWithoutGenesis()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -25,7 +25,7 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus.", flow.peer)
|
||||
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus. (%s)", flow.peer, err)
|
||||
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
||||
if deleteStagingConsensusErr != nil {
|
||||
return deleteStagingConsensusErr
|
||||
@ -55,7 +55,12 @@ func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
|
||||
|
||||
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
|
||||
if highestKnownSyncerChainHash != nil {
|
||||
highestSharedBlockFound = true
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
highestSharedBlockFound = blockInfo.HasBody()
|
||||
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
@ -80,28 +85,33 @@ func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
return false, false, nil
|
||||
if highestKnownSyncerChainHash == nil {
|
||||
log.Infof("Stopping IBD since IBD from this node will cause a finality conflict")
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
|
||||
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
|
||||
virtualSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if relayBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
if relayBlock.Header.BlueScore() < virtualSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return relayBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
||||
return relayBlock.Header.BlueWork().Cmp(virtualSelectedTipInfo.BlueWork) > 0, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
||||
@ -280,8 +290,14 @@ func (flow *handleIBDFlow) processBlockWithTrustedData(
|
||||
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
|
||||
}
|
||||
|
||||
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||
return err
|
||||
err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return protocolerrors.Wrapf(true, err, "failed validating block with trusted data")
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
|
||||
|
@ -22,7 +22,7 @@ type TransactionsRelayContext interface {
|
||||
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
|
||||
OnTransactionAddedToMempool()
|
||||
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
|
||||
IsIBDRunning() bool
|
||||
IsNearlySynced() (bool, error)
|
||||
}
|
||||
|
||||
type handleRelayedTransactionsFlow struct {
|
||||
@ -50,7 +50,12 @@ func (flow *handleRelayedTransactionsFlow) start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsIBDRunning() {
|
||||
isNearlySynced, err := flow.IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Transaction relay is disabled if the node is out of sync and thus not mining
|
||||
if !isNearlySynced {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -97,7 +102,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
|
||||
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
|
||||
// Ask the transaction memory pool if the transaction is known
|
||||
// to it in any form (main pool or orphan).
|
||||
if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
|
||||
if _, _, ok := flow.Domain().MiningManager().GetTransaction(txID, true, true); ok {
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -47,8 +47,8 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
|
||||
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
|
||||
return false
|
||||
func (m *mocTransactionsRelayContext) IsNearlySynced() (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
|
||||
|
@ -30,7 +30,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
|
||||
}
|
||||
|
||||
for _, transactionID := range msgRequestTransactions.IDs {
|
||||
tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
|
||||
tx, _, ok := flow.Domain().MiningManager().GetTransaction(transactionID, true, false)
|
||||
|
||||
if !ok {
|
||||
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
|
||||
@ -40,7 +40,6 @@ func (flow *handleRequestedTransactionsFlow) start() error {
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -2,10 +2,11 @@ package protocol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
@ -90,14 +91,9 @@ func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-c
|
||||
return <-errChan
|
||||
}
|
||||
|
||||
// SetOnVirtualChange sets the onVirtualChangeHandler handler
|
||||
func (m *Manager) SetOnVirtualChange(onVirtualChangeHandler flowcontext.OnVirtualChangeHandler) {
|
||||
m.context.SetOnVirtualChangeHandler(onVirtualChangeHandler)
|
||||
}
|
||||
|
||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
||||
func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowcontext.OnBlockAddedToDAGHandler) {
|
||||
m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)
|
||||
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplate handler
|
||||
func (m *Manager) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler flowcontext.OnNewBlockTemplateHandler) {
|
||||
m.context.SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler)
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
|
||||
@ -110,12 +106,6 @@ func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMemp
|
||||
m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler)
|
||||
}
|
||||
|
||||
// ShouldMine returns whether it's ok to use block template from this node
|
||||
// for mining purposes.
|
||||
func (m *Manager) ShouldMine() (bool, error) {
|
||||
return m.context.ShouldMine()
|
||||
}
|
||||
|
||||
// IsIBDRunning returns true if IBD is currently marked as running
|
||||
func (m *Manager) IsIBDRunning() bool {
|
||||
return m.context.IsIBDRunning()
|
||||
|
@ -3,8 +3,7 @@ package protocol
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
|
||||
v4 "github.com/kaspanet/kaspad/app/protocol/flows/v4"
|
||||
v5 "github.com/kaspanet/kaspad/app/protocol/flows/v5"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
@ -24,7 +23,7 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
// errChan is used by the flow goroutines to return to runFlows when an error occurs.
|
||||
// They are both initialized here and passed to register flows.
|
||||
isStopping := uint32(0)
|
||||
errChan := make(chan error)
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
receiveVersionRoute, sendVersionRoute, receiveReadyRoute := registerHandshakeRoutes(router)
|
||||
|
||||
@ -77,8 +76,6 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
var flows []*common.Flow
|
||||
log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion())
|
||||
switch peer.ProtocolVersion() {
|
||||
case 4:
|
||||
flows = v4.Register(m, router, errChan, &isStopping)
|
||||
case 5:
|
||||
flows = v5.Register(m, router, errChan, &isStopping)
|
||||
default:
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Manager is an RPC manager
|
||||
@ -28,6 +29,7 @@ func NewManager(
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
utxoIndex *utxoindex.UTXOIndex,
|
||||
consensusEventsChan chan externalapi.ConsensusEvent,
|
||||
shutDownChan chan<- struct{}) *Manager {
|
||||
|
||||
manager := Manager{
|
||||
@ -44,50 +46,90 @@ func NewManager(
|
||||
}
|
||||
netAdapter.SetRPCRouterInitializer(manager.routerInitializer)
|
||||
|
||||
manager.initConsensusEventsHandler(consensusEventsChan)
|
||||
|
||||
return &manager
|
||||
}
|
||||
|
||||
// NotifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
|
||||
func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
|
||||
func (m *Manager) initConsensusEventsHandler(consensusEventsChan chan externalapi.ConsensusEvent) {
|
||||
spawn("consensusEventsHandler", func() {
|
||||
for {
|
||||
consensusEvent, ok := <-consensusEventsChan
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
switch event := consensusEvent.(type) {
|
||||
case *externalapi.VirtualChangeSet:
|
||||
err := m.notifyVirtualChange(event)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
case *externalapi.BlockAdded:
|
||||
err := m.notifyBlockAddedToDAG(event.Block)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
default:
|
||||
panic(errors.Errorf("Got event of unsupported type %T", consensusEvent))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// notifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
|
||||
func (m *Manager) notifyBlockAddedToDAG(block *externalapi.DomainBlock) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.notifyBlockAddedToDAG")
|
||||
defer onEnd()
|
||||
|
||||
err := m.NotifyVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
// Before converting the block and populating it, we check if any listeners are interested.
|
||||
// This is done since most nodes do not use this event.
|
||||
if !m.context.NotificationManager.HasBlockAddedListeners() {
|
||||
return nil
|
||||
}
|
||||
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
|
||||
err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
|
||||
err := m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
|
||||
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||
err = m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyVirtualChange notifies the manager that the virtual block has been changed.
|
||||
func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
|
||||
// notifyVirtualChange notifies the manager that the virtual block has been changed.
|
||||
func (m *Manager) notifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualChange")
|
||||
defer onEnd()
|
||||
|
||||
if m.context.Config.UTXOIndex {
|
||||
if m.context.Config.UTXOIndex && virtualChangeSet.VirtualUTXODiff != nil {
|
||||
err := m.notifyUTXOsChanged(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := m.notifyVirtualSelectedParentBlueScoreChanged()
|
||||
err := m.notifyVirtualSelectedParentBlueScoreChanged(virtualChangeSet.VirtualSelectedParentBlueScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.notifyVirtualDaaScoreChanged()
|
||||
err = m.notifyVirtualDaaScoreChanged(virtualChangeSet.VirtualDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if virtualChangeSet.VirtualSelectedParentChainChanges == nil ||
|
||||
(len(virtualChangeSet.VirtualSelectedParentChainChanges.Added) == 0 &&
|
||||
len(virtualChangeSet.VirtualSelectedParentChainChanges.Removed) == 0) {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err = m.notifyVirtualSelectedParentChainChanged(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -96,6 +138,13 @@ func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChang
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplate notifies the manager that a new
|
||||
// block template is available for miners
|
||||
func (m *Manager) NotifyNewBlockTemplate() error {
|
||||
notification := appmessage.NewNewBlockTemplateNotificationMessage()
|
||||
return m.context.NotificationManager.NotifyNewBlockTemplate(notification)
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
|
||||
// resets due to pruning point change via IBD.
|
||||
func (m *Manager) NotifyPruningPointUTXOSetOverride() error {
|
||||
@ -138,6 +187,7 @@ func (m *Manager) notifyUTXOsChanged(virtualChangeSet *externalapi.VirtualChange
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.context.NotificationManager.NotifyUTXOsChanged(utxoIndexChanges)
|
||||
}
|
||||
|
||||
@ -153,33 +203,18 @@ func (m *Manager) notifyPruningPointUTXOSetOverride() error {
|
||||
return m.context.NotificationManager.NotifyPruningPointUTXOSetOverride()
|
||||
}
|
||||
|
||||
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged() error {
|
||||
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged(virtualSelectedParentBlueScore uint64) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentBlueScoreChanged")
|
||||
defer onEnd()
|
||||
|
||||
virtualSelectedParent, err := m.context.Domain.Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockInfo, err := m.context.Domain.Consensus().GetBlockInfo(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(blockInfo.BlueScore)
|
||||
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(virtualSelectedParentBlueScore)
|
||||
return m.context.NotificationManager.NotifyVirtualSelectedParentBlueScoreChanged(notification)
|
||||
}
|
||||
|
||||
func (m *Manager) notifyVirtualDaaScoreChanged() error {
|
||||
func (m *Manager) notifyVirtualDaaScoreChanged(virtualDAAScore uint64) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualDaaScoreChanged")
|
||||
defer onEnd()
|
||||
|
||||
virtualDAAScore, err := m.context.Domain.Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notification := appmessage.NewVirtualDaaScoreChangedNotificationMessage(virtualDAAScore)
|
||||
return m.context.NotificationManager.NotifyVirtualDaaScoreChanged(notification)
|
||||
}
|
||||
@ -188,10 +223,16 @@ func (m *Manager) notifyVirtualSelectedParentChainChanged(virtualChangeSet *exte
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged")
|
||||
defer onEnd()
|
||||
|
||||
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
virtualChangeSet.VirtualSelectedParentChainChanges)
|
||||
if err != nil {
|
||||
return err
|
||||
hasListeners, includeAcceptedTransactionIDs := m.context.NotificationManager.HasListenersThatPropagateVirtualSelectedParentChainChanged()
|
||||
|
||||
if hasListeners {
|
||||
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
virtualChangeSet.VirtualSelectedParentChainChanges, includeAcceptedTransactionIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification)
|
||||
}
|
||||
return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -48,6 +48,9 @@ var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
|
||||
appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond,
|
||||
appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged,
|
||||
appmessage.CmdNotifyNewBlockTemplateRequestMessage: rpchandlers.HandleNotifyNewBlockTemplate,
|
||||
appmessage.CmdGetCoinSupplyRequestMessage: rpchandlers.HandleGetCoinSupply,
|
||||
appmessage.CmdGetMempoolEntriesByAddressesRequestMessage: rpchandlers.HandleGetMempoolEntriesByAddresses,
|
||||
}
|
||||
|
||||
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {
|
||||
|
@ -3,12 +3,14 @@ package rpccontext
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
)
|
||||
|
||||
// ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage converts
|
||||
// VirtualSelectedParentChainChanges to VirtualSelectedParentChainChangedNotificationMessage
|
||||
func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
selectedParentChainChanges *externalapi.SelectedChainPath) (*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
|
||||
selectedParentChainChanges *externalapi.SelectedChainPath, includeAcceptedTransactionIDs bool) (
|
||||
*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
|
||||
|
||||
removedChainBlockHashes := make([]string, len(selectedParentChainChanges.Removed))
|
||||
for i, removed := range selectedParentChainChanges.Removed {
|
||||
@ -20,5 +22,58 @@ func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotifi
|
||||
addedChainBlocks[i] = added.String()
|
||||
}
|
||||
|
||||
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes, addedChainBlocks), nil
|
||||
var acceptedTransactionIDs []*appmessage.AcceptedTransactionIDs
|
||||
if includeAcceptedTransactionIDs {
|
||||
var err error
|
||||
acceptedTransactionIDs, err = ctx.getAndConvertAcceptedTransactionIDs(selectedParentChainChanges)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(
|
||||
removedChainBlockHashes, addedChainBlocks, acceptedTransactionIDs), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) getAndConvertAcceptedTransactionIDs(selectedParentChainChanges *externalapi.SelectedChainPath) (
|
||||
[]*appmessage.AcceptedTransactionIDs, error) {
|
||||
|
||||
acceptedTransactionIDs := make([]*appmessage.AcceptedTransactionIDs, len(selectedParentChainChanges.Added))
|
||||
|
||||
const chunk = 1000
|
||||
position := 0
|
||||
|
||||
for position < len(selectedParentChainChanges.Added) {
|
||||
var chainBlocksChunk []*externalapi.DomainHash
|
||||
if position+chunk > len(selectedParentChainChanges.Added) {
|
||||
chainBlocksChunk = selectedParentChainChanges.Added[position:]
|
||||
} else {
|
||||
chainBlocksChunk = selectedParentChainChanges.Added[position : position+chunk]
|
||||
}
|
||||
// We use chunks in order to avoid blocking consensus for too long
|
||||
chainBlocksAcceptanceData, err := ctx.Domain.Consensus().GetBlocksAcceptanceData(chainBlocksChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, addedChainBlock := range chainBlocksChunk {
|
||||
chainBlockAcceptanceData := chainBlocksAcceptanceData[i]
|
||||
acceptedTransactionIDs[position+i] = &appmessage.AcceptedTransactionIDs{
|
||||
AcceptingBlockHash: addedChainBlock.String(),
|
||||
AcceptedTransactionIDs: nil,
|
||||
}
|
||||
for _, blockAcceptanceData := range chainBlockAcceptanceData {
|
||||
for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData {
|
||||
if transactionAcceptanceData.IsAccepted {
|
||||
acceptedTransactionIDs[position+i].AcceptedTransactionIDs =
|
||||
append(acceptedTransactionIDs[position+i].AcceptedTransactionIDs,
|
||||
consensushashing.TransactionID(transactionAcceptanceData.Transaction).String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
position += chunk
|
||||
}
|
||||
|
||||
return acceptedTransactionIDs, nil
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ func NewContext(cfg *config.Config,
|
||||
UTXOIndex: utxoIndex,
|
||||
ShutDownChan: shutDownChan,
|
||||
}
|
||||
context.NotificationManager = NewNotificationManager()
|
||||
context.NotificationManager = NewNotificationManager(cfg.ActiveNetParams)
|
||||
|
||||
return context
|
||||
}
|
||||
|
@ -3,6 +3,11 @@ package rpccontext
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
@ -13,6 +18,7 @@ import (
|
||||
type NotificationManager struct {
|
||||
sync.RWMutex
|
||||
listeners map[*routerpkg.Router]*NotificationListener
|
||||
params *dagconfig.Params
|
||||
}
|
||||
|
||||
// UTXOsChangedNotificationAddress represents a kaspad address.
|
||||
@ -24,6 +30,8 @@ type UTXOsChangedNotificationAddress struct {
|
||||
|
||||
// NotificationListener represents a registered RPC notification listener
|
||||
type NotificationListener struct {
|
||||
params *dagconfig.Params
|
||||
|
||||
propagateBlockAddedNotifications bool
|
||||
propagateVirtualSelectedParentChainChangedNotifications bool
|
||||
propagateFinalityConflictNotifications bool
|
||||
@ -32,13 +40,16 @@ type NotificationListener struct {
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications bool
|
||||
propagateVirtualDaaScoreChangedNotifications bool
|
||||
propagatePruningPointUTXOSetOverrideNotifications bool
|
||||
propagateNewBlockTemplateNotifications bool
|
||||
|
||||
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
||||
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
||||
includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications bool
|
||||
}
|
||||
|
||||
// NewNotificationManager creates a new NotificationManager
|
||||
func NewNotificationManager() *NotificationManager {
|
||||
func NewNotificationManager(params *dagconfig.Params) *NotificationManager {
|
||||
return &NotificationManager{
|
||||
params: params,
|
||||
listeners: make(map[*routerpkg.Router]*NotificationListener),
|
||||
}
|
||||
}
|
||||
@ -48,7 +59,7 @@ func (nm *NotificationManager) AddListener(router *routerpkg.Router) {
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
listener := newNotificationListener()
|
||||
listener := newNotificationListener(nm.params)
|
||||
nm.listeners[router] = listener
|
||||
}
|
||||
|
||||
@ -72,6 +83,19 @@ func (nm *NotificationManager) Listener(router *routerpkg.Router) (*Notification
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
// HasBlockAddedListeners indicates if the notification manager has any listeners for `BlockAdded` events
|
||||
func (nm *NotificationManager) HasBlockAddedListeners() bool {
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for _, listener := range nm.listeners {
|
||||
if listener.propagateBlockAddedNotifications {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NotifyBlockAdded notifies the notification manager that a block has been added to the DAG
|
||||
func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAddedNotificationMessage) error {
|
||||
nm.RLock()
|
||||
@ -79,10 +103,8 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateBlockAddedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
log.Warnf("Couldn't send notification: %s", err)
|
||||
} else if err != nil {
|
||||
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -91,13 +113,27 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
|
||||
}
|
||||
|
||||
// NotifyVirtualSelectedParentChainChanged notifies the notification manager that the DAG's selected parent chain has changed
|
||||
func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
|
||||
func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(
|
||||
notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
notificationWithoutAcceptedTransactionIDs := &appmessage.VirtualSelectedParentChainChangedNotificationMessage{
|
||||
RemovedChainBlockHashes: notification.RemovedChainBlockHashes,
|
||||
AddedChainBlockHashes: notification.AddedChainBlockHashes,
|
||||
}
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateVirtualSelectedParentChainChangedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
var err error
|
||||
|
||||
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
|
||||
err = router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
} else {
|
||||
err = router.OutgoingRoute().MaybeEnqueue(notificationWithoutAcceptedTransactionIDs)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -106,6 +142,31 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(notificat
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasListenersThatPropagateVirtualSelectedParentChainChanged returns whether there's any listener that is
|
||||
// subscribed to VirtualSelectedParentChainChanged notifications as well as checks if any such listener requested
|
||||
// to include AcceptedTransactionIDs.
|
||||
func (nm *NotificationManager) HasListenersThatPropagateVirtualSelectedParentChainChanged() (hasListeners, hasListenersThatRequireAcceptedTransactionIDs bool) {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
hasListeners = false
|
||||
hasListenersThatRequireAcceptedTransactionIDs = false
|
||||
|
||||
for _, listener := range nm.listeners {
|
||||
if listener.propagateVirtualSelectedParentChainChangedNotifications {
|
||||
hasListeners = true
|
||||
// Generating acceptedTransactionIDs is a heavy operation, so we check if it's needed by any listener.
|
||||
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
|
||||
hasListenersThatRequireAcceptedTransactionIDs = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hasListeners, hasListenersThatRequireAcceptedTransactionIDs
|
||||
}
|
||||
|
||||
// NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG
|
||||
func (nm *NotificationManager) NotifyFinalityConflict(notification *appmessage.FinalityConflictNotificationMessage) error {
|
||||
nm.RLock()
|
||||
@ -146,7 +207,10 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateUTXOsChangedNotifications {
|
||||
// Filter utxoChanges and create a notification
|
||||
notification := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges)
|
||||
notification, err := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Don't send the notification if it's empty
|
||||
if len(notification.Added) == 0 && len(notification.Removed) == 0 {
|
||||
@ -154,7 +218,7 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
|
||||
}
|
||||
|
||||
// Enqueue the notification
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
err = router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -173,7 +237,7 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentBlueScoreChanged(
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateVirtualSelectedParentBlueScoreChangedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -192,6 +256,25 @@ func (nm *NotificationManager) NotifyVirtualDaaScoreChanged(
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateVirtualDaaScoreChangedNotifications {
|
||||
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplate notifies the notification manager that a new
|
||||
// block template is available for miners
|
||||
func (nm *NotificationManager) NotifyNewBlockTemplate(
|
||||
notification *appmessage.NewBlockTemplateNotificationMessage) error {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateNewBlockTemplateNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -218,18 +301,27 @@ func (nm *NotificationManager) NotifyPruningPointUTXOSetOverride() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newNotificationListener() *NotificationListener {
|
||||
func newNotificationListener(params *dagconfig.Params) *NotificationListener {
|
||||
return &NotificationListener{
|
||||
params: params,
|
||||
|
||||
propagateBlockAddedNotifications: false,
|
||||
propagateVirtualSelectedParentChainChangedNotifications: false,
|
||||
propagateFinalityConflictNotifications: false,
|
||||
propagateFinalityConflictResolvedNotifications: false,
|
||||
propagateUTXOsChangedNotifications: false,
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications: false,
|
||||
propagateNewBlockTemplateNotifications: false,
|
||||
propagatePruningPointUTXOSetOverrideNotifications: false,
|
||||
}
|
||||
}
|
||||
|
||||
// IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications returns true if this listener
|
||||
// includes accepted transaction IDs in it's virtual-selected-parent-chain-changed notifications
|
||||
func (nl *NotificationListener) IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications() bool {
|
||||
return nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications
|
||||
}
|
||||
|
||||
// PropagateBlockAddedNotifications instructs the listener to send block added notifications
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateBlockAddedNotifications() {
|
||||
@ -238,8 +330,9 @@ func (nl *NotificationListener) PropagateBlockAddedNotifications() {
|
||||
|
||||
// PropagateVirtualSelectedParentChainChangedNotifications instructs the listener to send chain changed notifications
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications() {
|
||||
func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications(includeAcceptedTransactionIDs bool) {
|
||||
nl.propagateVirtualSelectedParentChainChangedNotifications = true
|
||||
nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications = includeAcceptedTransactionIDs
|
||||
}
|
||||
|
||||
// PropagateFinalityConflictNotifications instructs the listener to send finality conflict notifications
|
||||
@ -258,7 +351,11 @@ func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications()
|
||||
// to the remote listener for the given addresses. Subsequent calls instruct the listener to
|
||||
// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses
|
||||
// are ignored.
|
||||
func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
func (nm *NotificationManager) PropagateUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
|
||||
// Apply a write-lock since the internal listener address map is modified
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
nl.propagateUTXOsChangedNotifications = true
|
||||
nl.propagateUTXOsChangedNotificationAddresses =
|
||||
@ -273,7 +370,11 @@ func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*
|
||||
// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs
|
||||
// changed notifications to the remote listener for the given addresses. Addresses for which
|
||||
// notifications are not currently sent are ignored.
|
||||
func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
func (nm *NotificationManager) StopPropagatingUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
|
||||
// Apply a write-lock since the internal listener address map is modified
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
return
|
||||
}
|
||||
@ -284,7 +385,7 @@ func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(address
|
||||
}
|
||||
|
||||
func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
||||
utxoChanges *utxoindex.UTXOChanges) *appmessage.UTXOsChangedNotificationMessage {
|
||||
utxoChanges *utxoindex.UTXOChanges) (*appmessage.UTXOsChangedNotificationMessage, error) {
|
||||
|
||||
// As an optimization, we iterate over the smaller set (O(n)) among the two below
|
||||
// and check existence over the larger set (O(1))
|
||||
@ -299,27 +400,64 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
for scriptPublicKeyString, removedOutpoints := range utxoChanges.Removed {
|
||||
for scriptPublicKeyString, removedPairs := range utxoChanges.Removed {
|
||||
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
} else if addressesSize > 0 {
|
||||
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
|
||||
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
|
||||
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
||||
if removedPairs, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for scriptPublicKeyString, addedPairs := range utxoChanges.Added {
|
||||
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, addedPairs)
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
for scriptPublicKeyString, removedPAirs := range utxoChanges.Removed {
|
||||
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, removedPAirs)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
|
||||
return notification
|
||||
return notification, nil
|
||||
}
|
||||
|
||||
func (nl *NotificationListener) scriptPubKeyStringToAddressString(scriptPublicKeyString utxoindex.ScriptPublicKeyString) (string, error) {
|
||||
scriptPubKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString))
|
||||
|
||||
// ignore error because it is often returned when the script is of unknown type
|
||||
scriptType, address, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey, nl.params)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var addressString string
|
||||
if scriptType == txscript.NonStandardTy {
|
||||
addressString = ""
|
||||
} else {
|
||||
addressString = address.String()
|
||||
}
|
||||
return addressString, nil
|
||||
}
|
||||
|
||||
// PropagateVirtualSelectedParentBlueScoreChangedNotifications instructs the listener to send
|
||||
@ -334,6 +472,12 @@ func (nl *NotificationListener) PropagateVirtualDaaScoreChangedNotifications() {
|
||||
nl.propagateVirtualDaaScoreChangedNotifications = true
|
||||
}
|
||||
|
||||
// PropagateNewBlockTemplateNotifications instructs the listener to send
|
||||
// new block template notifications to the remote listener
|
||||
func (nl *NotificationListener) PropagateNewBlockTemplateNotifications() {
|
||||
nl.propagateNewBlockTemplateNotifications = true
|
||||
}
|
||||
|
||||
// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications
|
||||
// to the remote listener.
|
||||
func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() {
|
||||
|
@ -32,22 +32,6 @@ func ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(address string, pair
|
||||
return utxosByAddressesEntries
|
||||
}
|
||||
|
||||
// convertUTXOOutpointsToUTXOsByAddressesEntries converts
|
||||
// UTXOOutpoints to a slice of UTXOsByAddressesEntry
|
||||
func convertUTXOOutpointsToUTXOsByAddressesEntries(address string, outpoints utxoindex.UTXOOutpoints) []*appmessage.UTXOsByAddressesEntry {
|
||||
utxosByAddressesEntries := make([]*appmessage.UTXOsByAddressesEntry, 0, len(outpoints))
|
||||
for outpoint := range outpoints {
|
||||
utxosByAddressesEntries = append(utxosByAddressesEntries, &appmessage.UTXOsByAddressesEntry{
|
||||
Address: address,
|
||||
Outpoint: &appmessage.RPCOutpoint{
|
||||
TransactionID: outpoint.TransactionID.String(),
|
||||
Index: outpoint.Index,
|
||||
},
|
||||
})
|
||||
}
|
||||
return utxosByAddressesEntries
|
||||
}
|
||||
|
||||
// ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings
|
||||
// to UTXOsChangedNotificationAddresses
|
||||
func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
|
||||
@ -63,7 +47,7 @@ func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||
}
|
||||
scriptPublicKeyString := utxoindex.ConvertScriptPublicKeyToString(scriptPublicKey)
|
||||
scriptPublicKeyString := utxoindex.ScriptPublicKeyString(scriptPublicKey.String())
|
||||
addresses[i] = &UTXOsChangedNotificationAddress{
|
||||
Address: addressString,
|
||||
ScriptPublicKeyString: scriptPublicKeyString,
|
||||
|
@ -81,10 +81,6 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
|
||||
block.VerboseData.SelectedParentHash = blockInfo.SelectedParent.String()
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the block if we didn't receive it previously
|
||||
if domainBlock == nil {
|
||||
domainBlock, err = ctx.Domain.Consensus().GetBlockEvenIfHeaderOnly(blockHash)
|
||||
@ -93,6 +89,10 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
|
||||
}
|
||||
}
|
||||
|
||||
if len(domainBlock.Transactions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
transactionIDs := make([]string, len(domainBlock.Transactions))
|
||||
for i, transaction := range domainBlock.Transactions {
|
||||
transactionIDs[i] = consensushashing.TransactionID(transaction).String()
|
||||
@ -122,6 +122,7 @@ func (ctx *Context) PopulateTransactionWithVerboseData(
|
||||
}
|
||||
|
||||
ctx.Domain.Consensus().PopulateMass(domainTransaction)
|
||||
|
||||
transaction.VerboseData = &appmessage.RPCTransactionVerboseData{
|
||||
TransactionID: consensushashing.TransactionID(domainTransaction).String(),
|
||||
Hash: consensushashing.TransactionHash(domainTransaction).String(),
|
||||
|
@ -9,6 +9,14 @@ import (
|
||||
|
||||
// HandleAddPeer handles the respectively named RPC command
|
||||
func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("AddPeer RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := appmessage.NewAddPeerResponseMessage()
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("AddPeer RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
AddPeerRequest := request.(*appmessage.AddPeerRequestMessage)
|
||||
address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort)
|
||||
if err != nil {
|
||||
|
@ -9,6 +9,14 @@ import (
|
||||
|
||||
// HandleBan handles the respectively named RPC command
|
||||
func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("Ban RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := appmessage.NewBanResponseMessage()
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("Ban RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
banRequest := request.(*appmessage.BanRequestMessage)
|
||||
ip := net.ParseIP(banRequest.IP)
|
||||
if ip == nil {
|
||||
|
@ -27,6 +27,27 @@ func HandleEstimateNetworkHashesPerSecond(
|
||||
}
|
||||
}
|
||||
|
||||
if context.Config.SafeRPC {
|
||||
const windowSizeLimit = 10000
|
||||
if windowSize > windowSizeLimit {
|
||||
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||
response.Error =
|
||||
appmessage.RPCErrorf(
|
||||
"Requested window size %d is larger than max allowed in RPC safe mode (%d)",
|
||||
windowSize, windowSizeLimit)
|
||||
return response, nil
|
||||
}
|
||||
}
|
||||
|
||||
if uint64(windowSize) > context.Config.ActiveNetParams.PruningDepth() {
|
||||
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||
response.Error =
|
||||
appmessage.RPCErrorf(
|
||||
"Requested window size %d is larger than pruning point depth %d",
|
||||
windowSize, context.Config.ActiveNetParams.PruningDepth())
|
||||
return response, nil
|
||||
}
|
||||
|
||||
networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize)
|
||||
if err != nil {
|
||||
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||
|
@ -22,7 +22,7 @@ func HandleGetBalanceByAddress(context *rpccontext.Context, _ *router.Router, re
|
||||
balance, err := getBalanceByAddress(context, getBalanceByAddressRequest.Address)
|
||||
if err != nil {
|
||||
rpcError := &appmessage.RPCError{}
|
||||
if !errors.As(err, rpcError) {
|
||||
if !errors.As(err, &rpcError) {
|
||||
return nil, err
|
||||
}
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
|
@ -23,7 +23,7 @@ func HandleGetBalancesByAddresses(context *rpccontext.Context, _ *router.Router,
|
||||
|
||||
if err != nil {
|
||||
rpcError := &appmessage.RPCError{}
|
||||
if !errors.As(err, rpcError) {
|
||||
if !errors.As(err, &rpcError) {
|
||||
return nil, err
|
||||
}
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
@ -16,7 +17,7 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
|
||||
payAddress, err := util.DecodeAddress(getBlockTemplateRequest.PayAddress, context.Config.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetBlockResponseMessage{}
|
||||
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not decode address: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
@ -26,18 +27,20 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
return nil, err
|
||||
}
|
||||
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version())}
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version() + "/" + getBlockTemplateRequest.ExtraData)}
|
||||
|
||||
templateBlock, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||
templateBlock, isNearlySynced, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint64(len(templateBlock.Transactions[transactionhelper.CoinbaseTransactionIndex].Payload)) > context.Config.NetParams().MaxCoinbasePayloadLength {
|
||||
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Coinbase payload is above max length (%d). Try to shorten the extra data.", context.Config.NetParams().MaxCoinbasePayloadLength)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
|
||||
|
||||
isSynced, err := context.ProtocolManager.ShouldMine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, isSynced), nil
|
||||
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, context.ProtocolManager.Context().HasPeers() && isNearlySynced), nil
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appm
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !blockInfo.Exists {
|
||||
if !blockInfo.HasHeader() {
|
||||
return &appmessage.GetBlocksResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Could not find lowHash %s", getBlocksRequest.LowHash),
|
||||
}, nil
|
||||
|
@ -23,6 +23,10 @@ type fakeDomain struct {
|
||||
testapi.TestConsensus
|
||||
}
|
||||
|
||||
func (d fakeDomain) ConsensusEventsChannel() chan externalapi.ConsensusEvent {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d fakeDomain) DeleteStagingConsensus() error {
|
||||
panic("implement me")
|
||||
}
|
||||
@ -31,7 +35,7 @@ func (d fakeDomain) StagingConsensus() externalapi.Consensus {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d fakeDomain) InitStagingConsensus() error {
|
||||
func (d fakeDomain) InitStagingConsensusWithoutGenesis() error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
29
app/rpc/rpchandlers/get_coin_supply.go
Normal file
29
app/rpc/rpchandlers/get_coin_supply.go
Normal file
@ -0,0 +1,29 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetCoinSupply handles the respectively named RPC command
|
||||
func HandleGetCoinSupply(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
if !context.Config.UTXOIndex {
|
||||
errorMessage := &appmessage.GetCoinSupplyResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
circulatingSompiSupply, err := context.UTXOIndex.GetCirculatingSompiSupply()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := appmessage.NewGetCoinSupplyResponseMessage(
|
||||
constants.MaxSompi,
|
||||
circulatingSompiSupply,
|
||||
)
|
||||
|
||||
return response, nil
|
||||
}
|
@ -9,10 +9,17 @@ import (
|
||||
|
||||
// HandleGetInfo handles the respectively named RPC command
|
||||
func HandleGetInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
isNearlySynced, err := context.Domain.Consensus().IsNearlySynced()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := appmessage.NewGetInfoResponseMessage(
|
||||
context.NetAdapter.ID().String(),
|
||||
uint64(context.Domain.MiningManager().TransactionCount()),
|
||||
uint64(context.Domain.MiningManager().TransactionCount(true, false)),
|
||||
version.Version(),
|
||||
context.Config.UTXOIndex,
|
||||
context.ProtocolManager.Context().HasPeers() && isNearlySynced,
|
||||
)
|
||||
|
||||
return response, nil
|
||||
|
@ -7,19 +7,40 @@ import (
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntries handles the respectively named RPC command
|
||||
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
transactions := context.Domain.MiningManager().AllTransactions()
|
||||
entries := make([]*appmessage.MempoolEntry, 0, len(transactions))
|
||||
for _, transaction := range transactions {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
getMempoolEntriesRequest := request.(*appmessage.GetMempoolEntriesRequestMessage)
|
||||
|
||||
entries := make([]*appmessage.MempoolEntry, 0)
|
||||
|
||||
transactionPoolTransactions, orphanPoolTransactions := context.Domain.MiningManager().AllTransactions(!getMempoolEntriesRequest.FilterTransactionPool, getMempoolEntriesRequest.IncludeOrphanPool)
|
||||
|
||||
if !getMempoolEntriesRequest.FilterTransactionPool {
|
||||
for _, transaction := range transactionPoolTransactions {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
if getMempoolEntriesRequest.IncludeOrphanPool {
|
||||
for _, transaction := range orphanPoolTransactions {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: true,
|
||||
})
|
||||
}
|
||||
entries = append(entries, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
})
|
||||
}
|
||||
|
||||
return appmessage.NewGetMempoolEntriesResponseMessage(entries), nil
|
||||
|
122
app/rpc/rpchandlers/get_mempool_entries_by_addresses.go
Normal file
122
app/rpc/rpchandlers/get_mempool_entries_by_addresses.go
Normal file
@ -0,0 +1,122 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntriesByAddresses handles the respectively named RPC command
|
||||
func HandleGetMempoolEntriesByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
getMempoolEntriesByAddressesRequest := request.(*appmessage.GetMempoolEntriesByAddressesRequestMessage)
|
||||
|
||||
mempoolEntriesByAddresses := make([]*appmessage.MempoolEntryByAddress, 0)
|
||||
|
||||
sendingInTransactionPool, receivingInTransactionPool, sendingInOrphanPool, receivingInOrphanPool, err := context.Domain.MiningManager().GetTransactionsByAddresses(!getMempoolEntriesByAddressesRequest.FilterTransactionPool, getMempoolEntriesByAddressesRequest.IncludeOrphanPool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, addressString := range getMempoolEntriesByAddressesRequest.Addresses {
|
||||
|
||||
address, err := util.DecodeAddress(addressString, context.Config.NetParams().Prefix)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not decode address '%s': %s", addressString, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
sending := make([]*appmessage.MempoolEntry, 0)
|
||||
receiving := make([]*appmessage.MempoolEntry, 0)
|
||||
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not extract scriptPublicKey from address '%s': %s", addressString, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
if !getMempoolEntriesByAddressesRequest.FilterTransactionPool {
|
||||
|
||||
if transaction, found := sendingInTransactionPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sending = append(sending, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if transaction, found := receivingInTransactionPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
receiving = append(receiving, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
if getMempoolEntriesByAddressesRequest.IncludeOrphanPool {
|
||||
|
||||
if transaction, found := sendingInOrphanPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sending = append(sending, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: true,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if transaction, found := receivingInOrphanPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
receiving = append(receiving, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: true,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(sending) > 0 || len(receiving) > 0 {
|
||||
mempoolEntriesByAddresses = append(
|
||||
mempoolEntriesByAddresses,
|
||||
&appmessage.MempoolEntryByAddress{
|
||||
Address: address.String(),
|
||||
Sending: sending,
|
||||
Receiving: receiving,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return appmessage.NewGetMempoolEntriesByAddressesResponseMessage(mempoolEntriesByAddresses), nil
|
||||
}
|
@ -3,12 +3,18 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntry handles the respectively named RPC command
|
||||
func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
transaction := &externalapi.DomainTransaction{}
|
||||
var found bool
|
||||
var isOrphan bool
|
||||
|
||||
getMempoolEntryRequest := request.(*appmessage.GetMempoolEntryRequestMessage)
|
||||
|
||||
transactionID, err := transactionid.FromString(getMempoolEntryRequest.TxID)
|
||||
@ -18,17 +24,18 @@ func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, reques
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
transaction, ok := context.Domain.MiningManager().GetTransaction(transactionID)
|
||||
if !ok {
|
||||
mempoolTransaction, isOrphan, found := context.Domain.MiningManager().GetTransaction(transactionID, !getMempoolEntryRequest.FilterTransactionPool, getMempoolEntryRequest.IncludeOrphanPool)
|
||||
|
||||
if !found {
|
||||
errorMessage := &appmessage.GetMempoolEntryResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Transaction %s was not found", transactionID)
|
||||
return errorMessage, nil
|
||||
}
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(mempoolTransaction)
|
||||
err = context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction), nil
|
||||
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction, isOrphan), nil
|
||||
}
|
||||
|
@ -26,12 +26,14 @@ func HandleGetVirtualSelectedParentChainFromBlock(context *rpccontext.Context, _
|
||||
return response, nil
|
||||
}
|
||||
|
||||
chainChangedNotification, err := context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(virtualSelectedParentChain)
|
||||
chainChangedNotification, err := context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
virtualSelectedParentChain, getVirtualSelectedParentChainFromBlockRequest.IncludeAcceptedTransactionIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := appmessage.NewGetVirtualSelectedParentChainFromBlockResponseMessage(
|
||||
chainChangedNotification.RemovedChainBlockHashes, chainChangedNotification.AddedChainBlockHashes)
|
||||
chainChangedNotification.RemovedChainBlockHashes, chainChangedNotification.AddedChainBlockHashes,
|
||||
chainChangedNotification.AcceptedTransactionIDs)
|
||||
return response, nil
|
||||
}
|
||||
|
19
app/rpc/rpchandlers/notify_new_block_template.go
Normal file
19
app/rpc/rpchandlers/notify_new_block_template.go
Normal file
@ -0,0 +1,19 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleNotifyNewBlockTemplate handles the respectively named RPC command
|
||||
func HandleNotifyNewBlockTemplate(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateNewBlockTemplateNotifications()
|
||||
|
||||
response := appmessage.NewNotifyNewBlockTemplateResponseMessage()
|
||||
return response, nil
|
||||
}
|
@ -26,7 +26,7 @@ func HandleNotifyUTXOsChanged(context *rpccontext.Context, router *router.Router
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateUTXOsChangedNotifications(addresses)
|
||||
context.NotificationManager.PropagateUTXOsChangedNotifications(listener, addresses)
|
||||
|
||||
response := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
return response, nil
|
||||
|
@ -7,12 +7,17 @@ import (
|
||||
)
|
||||
|
||||
// HandleNotifyVirtualSelectedParentChainChanged handles the respectively named RPC command
|
||||
func HandleNotifyVirtualSelectedParentChainChanged(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
func HandleNotifyVirtualSelectedParentChainChanged(context *rpccontext.Context, router *router.Router,
|
||||
request appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
notifyVirtualSelectedParentChainChangedRequest := request.(*appmessage.NotifyVirtualSelectedParentChainChangedRequestMessage)
|
||||
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateVirtualSelectedParentChainChangedNotifications()
|
||||
listener.PropagateVirtualSelectedParentChainChangedNotifications(
|
||||
notifyVirtualSelectedParentChainChangedRequest.IncludeAcceptedTransactionIDs)
|
||||
|
||||
response := appmessage.NewNotifyVirtualSelectedParentChainChangedResponseMessage()
|
||||
return response, nil
|
||||
|
@ -8,6 +8,14 @@ import (
|
||||
|
||||
// HandleResolveFinalityConflict handles the respectively named RPC command
|
||||
func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("ResolveFinalityConflict RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := &appmessage.ResolveFinalityConflictResponseMessage{}
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("ResolveFinalityConflict RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
response := &appmessage.ResolveFinalityConflictResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
|
@ -12,6 +12,14 @@ const pauseBeforeShutDown = time.Second
|
||||
|
||||
// HandleShutDown handles the respectively named RPC command
|
||||
func HandleShutDown(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("ShutDown RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := appmessage.NewShutDownResponseMessage()
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("ShutDown RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
log.Warn("ShutDown RPC called.")
|
||||
|
||||
// Wait a second before shutting down, to allow time to return the response to the caller
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user