mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-09-13 21:10:12 +00:00
Compare commits
168 Commits
v0.11.13-r
...
master
Author | SHA1 | Date | |
---|---|---|---|
![]() |
4bb5bf25d3 | ||
![]() |
25c2dd8670 | ||
![]() |
c93100ccd0 | ||
![]() |
03cc7dfc19 | ||
![]() |
ed745a9acb | ||
![]() |
c23c1d141c | ||
![]() |
352d261fd6 | ||
![]() |
43b9523919 | ||
![]() |
6085d1fc84 | ||
![]() |
1e9ddc42d0 | ||
![]() |
48a142e12f | ||
![]() |
86b89065cf | ||
![]() |
f41dc7fa0b | ||
![]() |
6b38bf7069 | ||
![]() |
d2453f8e7b | ||
![]() |
629faa8436 | ||
![]() |
91e6c6b74b | ||
![]() |
0819244ba1 | ||
![]() |
a0149cd8d0 | ||
![]() |
5a3b8a0066 | ||
![]() |
8e71f79f98 | ||
![]() |
346341a709 | ||
![]() |
8c881aea39 | ||
![]() |
40ec440dcf | ||
![]() |
88bdcb43bc | ||
![]() |
9d1e44673f | ||
![]() |
387fade044 | ||
![]() |
c417c8b525 | ||
![]() |
bd1420220a | ||
![]() |
5640ec4020 | ||
![]() |
1c0887ca60 | ||
![]() |
7be3f41aa7 | ||
![]() |
26c4c73624 | ||
![]() |
880d917e58 | ||
![]() |
3c53c6d8cd | ||
![]() |
3c4b973090 | ||
![]() |
8aee8f81c5 | ||
![]() |
ec3441e63f | ||
![]() |
e3ba1ca07e | ||
![]() |
27fdbd9c88 | ||
![]() |
377d9aaaeb | ||
![]() |
beee947dda | ||
![]() |
d4a27bf1c1 | ||
![]() |
eec6eb9669 | ||
![]() |
d5c10832c2 | ||
![]() |
9fbfba17b6 | ||
![]() |
09d698dd0e | ||
![]() |
ec51c6926a | ||
![]() |
7d44275eb1 | ||
![]() |
a3387a56b3 | ||
![]() |
c2ae03fc89 | ||
![]() |
6c774c966b | ||
![]() |
2d54c9693b | ||
![]() |
d8350d62b0 | ||
![]() |
26c7db251f | ||
![]() |
4d435f2b3a | ||
![]() |
067688f549 | ||
![]() |
3a3fa0d3f0 | ||
![]() |
cf4073b773 | ||
![]() |
6a5e7c9e3f | ||
![]() |
7e9b5b9010 | ||
![]() |
953838e0d8 | ||
![]() |
a1dcb34c29 | ||
![]() |
23764e1b0b | ||
![]() |
0838cc8e32 | ||
![]() |
9f51330f38 | ||
![]() |
f6d46fd23f | ||
![]() |
2a7e03e232 | ||
![]() |
3286a7d010 | ||
![]() |
aabbc741d7 | ||
![]() |
20b7ab89f9 | ||
![]() |
10f1e7e3f4 | ||
![]() |
d941c73701 | ||
![]() |
3f80638c86 | ||
![]() |
266ec6c270 | ||
![]() |
9ee409afaa | ||
![]() |
715cb3b1ac | ||
![]() |
eb693c4a86 | ||
![]() |
7a61c637b0 | ||
![]() |
c7bd84ef9d | ||
![]() |
b26b9f6c4b | ||
![]() |
1c9bb54cc2 | ||
![]() |
b9093d59eb | ||
![]() |
18d000f625 | ||
![]() |
c5aade7e7f | ||
![]() |
d4b741fd7c | ||
![]() |
74a4f927e9 | ||
![]() |
847aafc91f | ||
![]() |
c87e541570 | ||
![]() |
2ea1c4f922 | ||
![]() |
5e9c28b77b | ||
![]() |
d957a6d93a | ||
![]() |
b2648aa5bd | ||
![]() |
3908f274ae | ||
![]() |
fa7ea121ff | ||
![]() |
24848da895 | ||
![]() |
b200b77541 | ||
![]() |
d50ad0667c | ||
![]() |
5cea285960 | ||
![]() |
7eb5085f6b | ||
![]() |
491e3569d2 | ||
![]() |
440aea19b0 | ||
![]() |
968d47c3e6 | ||
![]() |
052193865e | ||
![]() |
85febcb551 | ||
![]() |
a4d9fa10bf | ||
![]() |
cd5fd86ad3 | ||
![]() |
b84d6fed2c | ||
![]() |
24c94b38be | ||
![]() |
4dd7113dc5 | ||
![]() |
48c7fa0104 | ||
![]() |
4d0cf2169a | ||
![]() |
5f7cc079e9 | ||
![]() |
016ddfdfce | ||
![]() |
5d24e2afbc | ||
![]() |
8735da045f | ||
![]() |
c839337425 | ||
![]() |
7390651072 | ||
![]() |
52fbeedf20 | ||
![]() |
1660cf0cf1 | ||
![]() |
2b5202be7a | ||
![]() |
9ffbb15160 | ||
![]() |
540b0d3a22 | ||
![]() |
8d5faee53a | ||
![]() |
6e2fd0633b | ||
![]() |
beb038c815 | ||
![]() |
35a959b56f | ||
![]() |
57c6118be8 | ||
![]() |
723aebbec9 | ||
![]() |
2b395e34b1 | ||
![]() |
ada559f007 | ||
![]() |
357e8ce73c | ||
![]() |
6725902663 | ||
![]() |
99bb21c512 | ||
![]() |
a4669f3fb5 | ||
![]() |
e8f40bdff9 | ||
![]() |
68a407ea37 | ||
![]() |
80879cabe1 | ||
![]() |
71afc62298 | ||
![]() |
ca5c8549b9 | ||
![]() |
ab73def07a | ||
![]() |
3f840233d8 | ||
![]() |
90d9edb8e5 | ||
![]() |
b9b360bce4 | ||
![]() |
27654961f9 | ||
![]() |
d45af760d8 | ||
![]() |
95fa045297 | ||
![]() |
cb65dae63d | ||
![]() |
21b82d7efc | ||
![]() |
63c6d7443b | ||
![]() |
753f4a2ec1 | ||
![]() |
ed667f7e54 | ||
![]() |
c4a034eb43 | ||
![]() |
2eca0f0b5f | ||
![]() |
58d627e05a | ||
![]() |
639183ba0e | ||
![]() |
9fa08442cf | ||
![]() |
0dd50394ec | ||
![]() |
ac8d4e1341 | ||
![]() |
2488fbde78 | ||
![]() |
2ab8065142 | ||
![]() |
25410b86ae | ||
![]() |
4e44dd8510 | ||
![]() |
1e56a22b32 | ||
![]() |
7a95f0c7a4 | ||
![]() |
c81506220b | ||
![]() |
e5598c15a7 | ||
![]() |
433af5e0fe |
23
.github/workflows/deploy.yaml
vendored
23
.github/workflows/deploy.yaml
vendored
@ -1,7 +1,7 @@
|
||||
name: Build and upload assets
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@ -9,7 +9,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest, windows-latest, macos-latest ]
|
||||
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||
name: Building, ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Fix CRLF on Windows
|
||||
@ -17,18 +17,12 @@ jobs:
|
||||
run: git config --global core.autocrlf false
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Increase the pagefile size on Windows to aviod running out of memory
|
||||
- name: Increase pagefile size on Windows
|
||||
if: runner.os == 'Windows'
|
||||
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
||||
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.21
|
||||
|
||||
- name: Build on Linux
|
||||
if: runner.os == 'Linux'
|
||||
@ -36,7 +30,7 @@ jobs:
|
||||
# `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
|
||||
# `-s -w` strips the binary to produce smaller size binaries
|
||||
run: |
|
||||
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ . ./cmd/...
|
||||
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./cmd/...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
@ -47,7 +41,7 @@ jobs:
|
||||
if: runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o bin/ . ./cmd/...
|
||||
go build -v -ldflags="-s -w" -o bin/ ./cmd/...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
powershell "Compress-Archive bin/* \"${archive}\""
|
||||
@ -57,14 +51,13 @@ jobs:
|
||||
- name: Build on MacOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o ./bin/ . ./cmd/...
|
||||
go build -v -ldflags="-s -w" -o ./bin/ ./cmd/...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Upload release asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
|
8
.github/workflows/race.yaml
vendored
8
.github/workflows/race.yaml
vendored
@ -11,18 +11,18 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
branch: [ master, latest ]
|
||||
branch: [master, latest]
|
||||
name: Race detection on ${{ matrix.branch }}
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.23
|
||||
|
||||
- name: Set scheduled branch name
|
||||
shell: bash
|
||||
|
28
.github/workflows/tests.yaml
vendored
28
.github/workflows/tests.yaml
vendored
@ -8,22 +8,20 @@ on:
|
||||
types: [opened, synchronize, edited]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest, macos-latest ]
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
name: Tests, ${{ matrix.os }}
|
||||
steps:
|
||||
|
||||
- name: Fix CRLF on Windows
|
||||
if: runner.os == 'Windows'
|
||||
run: git config --global core.autocrlf false
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Increase the pagefile size on Windows to aviod running out of memory
|
||||
- name: Increase pagefile size on Windows
|
||||
@ -31,14 +29,13 @@ jobs:
|
||||
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
go-version: 1.23
|
||||
|
||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||
- name: Go Cache
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@ -49,19 +46,17 @@ jobs:
|
||||
shell: bash
|
||||
run: ./build_and_test.sh -v
|
||||
|
||||
|
||||
stability-test-fast:
|
||||
runs-on: ubuntu-latest
|
||||
name: Fast stability tests, ${{ github.head_ref }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.23
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@ -75,18 +70,17 @@ jobs:
|
||||
working-directory: stability-tests
|
||||
run: ./install_and_test.sh
|
||||
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
name: Produce code coverage
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.23
|
||||
|
||||
- name: Delete the stability tests from coverage
|
||||
run: rm -r stability-tests
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -53,6 +53,7 @@ _testmain.go
|
||||
debug
|
||||
debug.test
|
||||
__debug_bin
|
||||
*__debug_*
|
||||
|
||||
# CI
|
||||
version.txt
|
||||
|
43
CODE_OF_CONDUCT.md
Normal file
43
CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,43 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainers on this [Google form][gform]. The project maintainers will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project maintainers are obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[gform]: https://forms.gle/dnKXMJL7VxdUjt3x5
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
16
README.md
16
README.md
@ -1,13 +1,15 @@
|
||||
# DEPRECATED
|
||||
|
||||
Kaspad
|
||||
====
|
||||
The full node reference implementation was [rewritten in Rust](https://github.com/kaspanet/rusty-kaspa), as a result, the Go implementation is now deprecated.
|
||||
|
||||
PLEASE NOTE: Any pull requests or issues that will be opened in this repository will be closed without treatment, except for issues or pull requests related to the kaspawallet, which remains maintained. In any other case, please use the [Rust implementation](https://github.com/kaspanet/rusty-kaspa) instead.
|
||||
|
||||
# Kaspad
|
||||
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad)
|
||||
|
||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
||||
|
||||
This project is currently under active development and is in Beta state.
|
||||
Kaspad was the reference full node Kaspa implementation written in Go (golang).
|
||||
|
||||
## What is kaspa
|
||||
|
||||
@ -15,7 +17,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.23 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
@ -42,7 +44,6 @@ $ go install . ./cmd/...
|
||||
not already add the bin directory to your system path during Go installation,
|
||||
you are encouraged to do so now.
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
Kaspad has several configuration options available to tweak how it runs, but all
|
||||
@ -53,6 +54,7 @@ $ kaspad
|
||||
```
|
||||
|
||||
## Discord
|
||||
|
||||
Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
|
||||
|
||||
## Issue Tracker
|
||||
|
@ -6,7 +6,7 @@ supported kaspa messages to and from the appmessage. This package does not deal
|
||||
with the specifics of message handling such as what to do when a message is
|
||||
received. This provides the caller with a high level of flexibility.
|
||||
|
||||
Kaspa Message Overview
|
||||
# Kaspa Message Overview
|
||||
|
||||
The kaspa protocol consists of exchanging messages between peers. Each
|
||||
message is preceded by a header which identifies information about it such as
|
||||
@ -22,7 +22,7 @@ messages, all of the details of marshalling and unmarshalling to and from the
|
||||
appmessage using kaspa encoding are handled so the caller doesn't have to concern
|
||||
themselves with the specifics.
|
||||
|
||||
Message Interaction
|
||||
# Message Interaction
|
||||
|
||||
The following provides a quick summary of how the kaspa messages are intended
|
||||
to interact with one another. As stated above, these interactions are not
|
||||
@ -45,13 +45,13 @@ interactions in no particular order.
|
||||
notfound message (MsgNotFound)
|
||||
ping message (MsgPing) pong message (MsgPong)
|
||||
|
||||
Common Parameters
|
||||
# Common Parameters
|
||||
|
||||
There are several common parameters that arise when using this package to read
|
||||
and write kaspa messages. The following sections provide a quick overview of
|
||||
these parameters so the next sections can build on them.
|
||||
|
||||
Protocol Version
|
||||
# Protocol Version
|
||||
|
||||
The protocol version should be negotiated with the remote peer at a higher
|
||||
level than this package via the version (MsgVersion) message exchange, however,
|
||||
@ -60,18 +60,18 @@ latest protocol version this package supports and is typically the value to use
|
||||
for all outbound connections before a potentially lower protocol version is
|
||||
negotiated.
|
||||
|
||||
Kaspa Network
|
||||
# Kaspa Network
|
||||
|
||||
The kaspa network is a magic number which is used to identify the start of a
|
||||
message and which kaspa network the message applies to. This package provides
|
||||
the following constants:
|
||||
|
||||
appmessage.Mainnet
|
||||
appmessage.Testnet (Test network)
|
||||
appmessage.Simnet (Simulation test network)
|
||||
appmessage.Devnet (Development network)
|
||||
appmessage.Mainnet
|
||||
appmessage.Testnet (Test network)
|
||||
appmessage.Simnet (Simulation test network)
|
||||
appmessage.Devnet (Development network)
|
||||
|
||||
Determining Message Type
|
||||
# Determining Message Type
|
||||
|
||||
As discussed in the kaspa message overview section, this package reads
|
||||
and writes kaspa messages using a generic interface named Message. In
|
||||
@ -89,7 +89,7 @@ switch or type assertion. An example of a type switch follows:
|
||||
fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount)
|
||||
}
|
||||
|
||||
Reading Messages
|
||||
# Reading Messages
|
||||
|
||||
In order to unmarshall kaspa messages from the appmessage, use the ReadMessage
|
||||
function. It accepts any io.Reader, but typically this will be a net.Conn to
|
||||
@ -104,7 +104,7 @@ a remote node running a kaspa peer. Example syntax is:
|
||||
// Log and handle the error
|
||||
}
|
||||
|
||||
Writing Messages
|
||||
# Writing Messages
|
||||
|
||||
In order to marshall kaspa messages to the appmessage, use the WriteMessage
|
||||
function. It accepts any io.Writer, but typically this will be a net.Conn to
|
||||
@ -122,7 +122,7 @@ from a remote peer is:
|
||||
// Log and handle the error
|
||||
}
|
||||
|
||||
Errors
|
||||
# Errors
|
||||
|
||||
Errors returned by this package are either the raw errors provided by underlying
|
||||
calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and
|
||||
|
@ -2,9 +2,10 @@ package appmessage
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/pkg/errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
@ -213,13 +214,14 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
||||
}
|
||||
|
||||
return &externalapi.DomainTransaction{
|
||||
Version: rpcTransaction.Version,
|
||||
Inputs: inputs,
|
||||
Outputs: outputs,
|
||||
LockTime: rpcTransaction.LockTime,
|
||||
SubnetworkID: *subnetworkID,
|
||||
Gas: rpcTransaction.LockTime,
|
||||
Payload: payload,
|
||||
Version: rpcTransaction.Version,
|
||||
Inputs: inputs,
|
||||
Outputs: outputs,
|
||||
LockTime: rpcTransaction.LockTime,
|
||||
SubnetworkID: *subnetworkID,
|
||||
Gas: rpcTransaction.Gas,
|
||||
MassCommitment: rpcTransaction.Mass,
|
||||
Payload: payload,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -286,7 +288,8 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
||||
Outputs: outputs,
|
||||
LockTime: transaction.LockTime,
|
||||
SubnetworkID: subnetworkID,
|
||||
Gas: transaction.LockTime,
|
||||
Gas: transaction.Gas,
|
||||
Mass: transaction.MassCommitment,
|
||||
Payload: payload,
|
||||
}
|
||||
}
|
||||
|
@ -69,6 +69,10 @@ const (
|
||||
CmdReady
|
||||
CmdTrustedData
|
||||
CmdBlockWithTrustedDataV4
|
||||
CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||
CmdRequestIBDChainBlockLocator
|
||||
CmdIBDChainBlockLocator
|
||||
CmdRequestAnticone
|
||||
|
||||
// rpc
|
||||
CmdGetCurrentNetworkRequestMessage
|
||||
@ -152,6 +156,17 @@ const (
|
||||
CmdVirtualDaaScoreChangedNotificationMessage
|
||||
CmdGetBalancesByAddressesRequestMessage
|
||||
CmdGetBalancesByAddressesResponseMessage
|
||||
CmdNotifyNewBlockTemplateRequestMessage
|
||||
CmdNotifyNewBlockTemplateResponseMessage
|
||||
CmdNewBlockTemplateNotificationMessage
|
||||
CmdGetMempoolEntriesByAddressesRequestMessage
|
||||
CmdGetMempoolEntriesByAddressesResponseMessage
|
||||
CmdGetCoinSupplyRequestMessage
|
||||
CmdGetCoinSupplyResponseMessage
|
||||
CmdGetFeeEstimateRequestMessage
|
||||
CmdGetFeeEstimateResponseMessage
|
||||
CmdSubmitTransactionReplacementRequestMessage
|
||||
CmdSubmitTransactionReplacementResponseMessage
|
||||
)
|
||||
|
||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||
@ -195,6 +210,10 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdReady: "Ready",
|
||||
CmdTrustedData: "TrustedData",
|
||||
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
|
||||
CmdRequestNextPruningPointAndItsAnticoneBlocks: "RequestNextPruningPointAndItsAnticoneBlocks",
|
||||
CmdRequestIBDChainBlockLocator: "RequestIBDChainBlockLocator",
|
||||
CmdIBDChainBlockLocator: "IBDChainBlockLocator",
|
||||
CmdRequestAnticone: "RequestAnticone",
|
||||
}
|
||||
|
||||
// RPCMessageCommandToString maps all MessageCommands to their string representation
|
||||
@ -278,6 +297,17 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
|
||||
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
|
||||
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
|
||||
CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest",
|
||||
CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse",
|
||||
CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification",
|
||||
CmdGetMempoolEntriesByAddressesRequestMessage: "GetMempoolEntriesByAddressesRequest",
|
||||
CmdGetMempoolEntriesByAddressesResponseMessage: "GetMempoolEntriesByAddressesResponse",
|
||||
CmdGetCoinSupplyRequestMessage: "GetCoinSupplyRequest",
|
||||
CmdGetCoinSupplyResponseMessage: "GetCoinSupplyResponse",
|
||||
CmdGetFeeEstimateRequestMessage: "GetFeeEstimateRequest",
|
||||
CmdGetFeeEstimateResponseMessage: "GetFeeEstimateResponse",
|
||||
CmdSubmitTransactionReplacementRequestMessage: "SubmitTransactionReplacementRequest",
|
||||
CmdSubmitTransactionReplacementResponseMessage: "SubmitTransactionReplacementResponse",
|
||||
}
|
||||
|
||||
// Message is an interface that describes a kaspa message. A type that
|
||||
|
@ -132,7 +132,7 @@ func TestConvertToPartial(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//blockOne is the first block in the mainnet block DAG.
|
||||
// blockOne is the first block in the mainnet block DAG.
|
||||
var blockOne = MsgBlock{
|
||||
Header: MsgBlockHeader{
|
||||
Version: 0,
|
||||
|
27
app/appmessage/p2p_msgibdchainblocklocator.go
Normal file
27
app/appmessage/p2p_msgibdchainblocklocator.go
Normal file
@ -0,0 +1,27 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||
// locator message. It is used to find the blockLocator of a peer that is
|
||||
// syncing with you.
|
||||
type MsgIBDChainBlockLocator struct {
|
||||
baseMessage
|
||||
BlockLocatorHashes []*externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgIBDChainBlockLocator) Command() MessageCommand {
|
||||
return CmdIBDChainBlockLocator
|
||||
}
|
||||
|
||||
// NewMsgIBDChainBlockLocator returns a new kaspa locator message that conforms to
|
||||
// the Message interface. See MsgBlockLocator for details.
|
||||
func NewMsgIBDChainBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgIBDChainBlockLocator {
|
||||
return &MsgIBDChainBlockLocator{
|
||||
BlockLocatorHashes: locatorHashes,
|
||||
}
|
||||
}
|
33
app/appmessage/p2p_msgrequestanticone.go
Normal file
33
app/appmessage/p2p_msgrequestanticone.go
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgRequestAnticone implements the Message interface and represents a kaspa
|
||||
// RequestHeaders message. It is used to request the set past(ContextHash) \cap anticone(BlockHash)
|
||||
type MsgRequestAnticone struct {
|
||||
baseMessage
|
||||
BlockHash *externalapi.DomainHash
|
||||
ContextHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestAnticone) Command() MessageCommand {
|
||||
return CmdRequestAnticone
|
||||
}
|
||||
|
||||
// NewMsgRequestAnticone returns a new kaspa RequestPastDiff message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgRequestAnticone(blockHash, contextHash *externalapi.DomainHash) *MsgRequestAnticone {
|
||||
return &MsgRequestAnticone{
|
||||
BlockHash: blockHash,
|
||||
ContextHash: contextHash,
|
||||
}
|
||||
}
|
31
app/appmessage/p2p_msgrequestibdchainblocklocator.go
Normal file
31
app/appmessage/p2p_msgrequestibdchainblocklocator.go
Normal file
@ -0,0 +1,31 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgRequestIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||
// IBDRequestChainBlockLocator message. It is used to request a block locator between low
|
||||
// and high hash.
|
||||
// The locator is returned via a locator message (MsgIBDChainBlockLocator).
|
||||
type MsgRequestIBDChainBlockLocator struct {
|
||||
baseMessage
|
||||
HighHash *externalapi.DomainHash
|
||||
LowHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestIBDChainBlockLocator) Command() MessageCommand {
|
||||
return CmdRequestIBDChainBlockLocator
|
||||
}
|
||||
|
||||
// NewMsgIBDRequestChainBlockLocator returns a new IBDRequestChainBlockLocator message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgIBDRequestChainBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestIBDChainBlockLocator {
|
||||
return &MsgRequestIBDChainBlockLocator{
|
||||
HighHash: highHash,
|
||||
LowHash: lowHash,
|
||||
}
|
||||
}
|
@ -0,0 +1,22 @@
|
||||
package appmessage
|
||||
|
||||
// MsgRequestNextPruningPointAndItsAnticoneBlocks implements the Message interface and represents a kaspa
|
||||
// RequestNextPruningPointAndItsAnticoneBlocks message. It is used to notify the IBD syncer peer to send
|
||||
// more blocks from the pruning anticone.
|
||||
//
|
||||
// This message has no payload.
|
||||
type MsgRequestNextPruningPointAndItsAnticoneBlocks struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestNextPruningPointAndItsAnticoneBlocks) Command() MessageCommand {
|
||||
return CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||
}
|
||||
|
||||
// NewMsgRequestNextPruningPointAndItsAnticoneBlocks returns a new kaspa RequestNextPruningPointAndItsAnticoneBlocks message that conforms to the
|
||||
// Message interface.
|
||||
func NewMsgRequestNextPruningPointAndItsAnticoneBlocks() *MsgRequestNextPruningPointAndItsAnticoneBlocks {
|
||||
return &MsgRequestNextPruningPointAndItsAnticoneBlocks{}
|
||||
}
|
@ -133,8 +133,8 @@ func TestTx(t *testing.T) {
|
||||
|
||||
// TestTxHash tests the ability to generate the hash of a transaction accurately.
|
||||
func TestTxHashAndID(t *testing.T) {
|
||||
txHash1Str := "93663e597f6c968d32d229002f76408edf30d6a0151ff679fc729812d8cb2acc"
|
||||
txID1Str := "24079c6d2bdf602fc389cc307349054937744a9c8dc0f07c023e6af0e949a4e7"
|
||||
txHash1Str := "b06f8b650115b5cf4d59499e10764a9312742930cb43c9b4ff6495d76f332ed7"
|
||||
txID1Str := "e20225c3d065ee41743607ee627db44d01ef396dc9779b05b2caf55bac50e12d"
|
||||
wantTxID1, err := transactionid.FromString(txID1Str)
|
||||
if err != nil {
|
||||
t.Fatalf("NewTxIDFromStr: %v", err)
|
||||
@ -185,7 +185,7 @@ func TestTxHashAndID(t *testing.T) {
|
||||
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
|
||||
}
|
||||
|
||||
hash2Str := "8dafd1bec24527d8e3b443ceb0a3b92fffc0d60026317f890b2faf5e9afc177a"
|
||||
hash2Str := "fa16a8ce88d52ca1ff45187bbba0d33044e9f5fe309e8d0b22d4812dcf1782b7"
|
||||
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
|
||||
if err != nil {
|
||||
t.Errorf("NewTxIDFromStr: %v", err)
|
||||
|
47
app/appmessage/rpc_fee_estimate.go
Normal file
47
app/appmessage/rpc_fee_estimate.go
Normal file
@ -0,0 +1,47 @@
|
||||
package appmessage
|
||||
|
||||
// GetFeeEstimateRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetFeeEstimateRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetFeeEstimateRequestMessage) Command() MessageCommand {
|
||||
return CmdGetFeeEstimateRequestMessage
|
||||
}
|
||||
|
||||
// NewGetFeeEstimateRequestMessage returns a instance of the message
|
||||
func NewGetFeeEstimateRequestMessage() *GetFeeEstimateRequestMessage {
|
||||
return &GetFeeEstimateRequestMessage{}
|
||||
}
|
||||
|
||||
type RPCFeeRateBucket struct {
|
||||
Feerate float64
|
||||
EstimatedSeconds float64
|
||||
}
|
||||
|
||||
type RPCFeeEstimate struct {
|
||||
PriorityBucket RPCFeeRateBucket
|
||||
NormalBuckets []RPCFeeRateBucket
|
||||
LowBuckets []RPCFeeRateBucket
|
||||
}
|
||||
|
||||
// GetCoinSupplyResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetFeeEstimateResponseMessage struct {
|
||||
baseMessage
|
||||
Estimate RPCFeeEstimate
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetFeeEstimateResponseMessage) Command() MessageCommand {
|
||||
return CmdGetFeeEstimateResponseMessage
|
||||
}
|
||||
|
||||
// NewGetFeeEstimateResponseMessage returns a instance of the message
|
||||
func NewGetFeeEstimateResponseMessage() *GetFeeEstimateResponseMessage {
|
||||
return &GetFeeEstimateResponseMessage{}
|
||||
}
|
@ -5,6 +5,7 @@ package appmessage
|
||||
type GetBlockTemplateRequestMessage struct {
|
||||
baseMessage
|
||||
PayAddress string
|
||||
ExtraData string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -13,9 +14,10 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlockTemplateRequestMessage returns a instance of the message
|
||||
func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
|
||||
func NewGetBlockTemplateRequestMessage(payAddress, extraData string) *GetBlockTemplateRequestMessage {
|
||||
return &GetBlockTemplateRequestMessage{
|
||||
PayAddress: payAddress,
|
||||
ExtraData: extraData,
|
||||
}
|
||||
}
|
||||
|
||||
|
40
app/appmessage/rpc_get_coin_supply.go
Normal file
40
app/appmessage/rpc_get_coin_supply.go
Normal file
@ -0,0 +1,40 @@
|
||||
package appmessage
|
||||
|
||||
// GetCoinSupplyRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetCoinSupplyRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetCoinSupplyRequestMessage) Command() MessageCommand {
|
||||
return CmdGetCoinSupplyRequestMessage
|
||||
}
|
||||
|
||||
// NewGetCoinSupplyRequestMessage returns a instance of the message
|
||||
func NewGetCoinSupplyRequestMessage() *GetCoinSupplyRequestMessage {
|
||||
return &GetCoinSupplyRequestMessage{}
|
||||
}
|
||||
|
||||
// GetCoinSupplyResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetCoinSupplyResponseMessage struct {
|
||||
baseMessage
|
||||
MaxSompi uint64
|
||||
CirculatingSompi uint64
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetCoinSupplyResponseMessage) Command() MessageCommand {
|
||||
return CmdGetCoinSupplyResponseMessage
|
||||
}
|
||||
|
||||
// NewGetCoinSupplyResponseMessage returns a instance of the message
|
||||
func NewGetCoinSupplyResponseMessage(maxSompi uint64, circulatingSompi uint64) *GetCoinSupplyResponseMessage {
|
||||
return &GetCoinSupplyResponseMessage{
|
||||
MaxSompi: maxSompi,
|
||||
CirculatingSompi: circulatingSompi,
|
||||
}
|
||||
}
|
@ -23,6 +23,8 @@ type GetInfoResponseMessage struct {
|
||||
P2PID string
|
||||
MempoolSize uint64
|
||||
ServerVersion string
|
||||
IsUtxoIndexed bool
|
||||
IsSynced bool
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@ -33,10 +35,12 @@ func (msg *GetInfoResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetInfoResponseMessage returns a instance of the message
|
||||
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string) *GetInfoResponseMessage {
|
||||
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string, isUtxoIndexed bool, isSynced bool) *GetInfoResponseMessage {
|
||||
return &GetInfoResponseMessage{
|
||||
P2PID: p2pID,
|
||||
MempoolSize: mempoolSize,
|
||||
ServerVersion: serverVersion,
|
||||
IsUtxoIndexed: isUtxoIndexed,
|
||||
IsSynced: isSynced,
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,8 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetMempoolEntriesRequestMessage struct {
|
||||
baseMessage
|
||||
IncludeOrphanPool bool
|
||||
FilterTransactionPool bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -12,8 +14,11 @@ func (msg *GetMempoolEntriesRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntriesRequestMessage returns a instance of the message
|
||||
func NewGetMempoolEntriesRequestMessage() *GetMempoolEntriesRequestMessage {
|
||||
return &GetMempoolEntriesRequestMessage{}
|
||||
func NewGetMempoolEntriesRequestMessage(includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesRequestMessage {
|
||||
return &GetMempoolEntriesRequestMessage{
|
||||
IncludeOrphanPool: includeOrphanPool,
|
||||
FilterTransactionPool: filterTransactionPool,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMempoolEntriesResponseMessage is an appmessage corresponding to
|
||||
|
52
app/appmessage/rpc_get_mempool_entries_by_addresses.go
Normal file
52
app/appmessage/rpc_get_mempool_entries_by_addresses.go
Normal file
@ -0,0 +1,52 @@
|
||||
package appmessage
|
||||
|
||||
// MempoolEntryByAddress represents MempoolEntries associated with some address
|
||||
type MempoolEntryByAddress struct {
|
||||
Address string
|
||||
Receiving []*MempoolEntry
|
||||
Sending []*MempoolEntry
|
||||
}
|
||||
|
||||
// GetMempoolEntriesByAddressesRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetMempoolEntriesByAddressesRequestMessage struct {
|
||||
baseMessage
|
||||
Addresses []string
|
||||
IncludeOrphanPool bool
|
||||
FilterTransactionPool bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetMempoolEntriesByAddressesRequestMessage) Command() MessageCommand {
|
||||
return CmdGetMempoolEntriesByAddressesRequestMessage
|
||||
}
|
||||
|
||||
// NewGetMempoolEntriesByAddressesRequestMessage returns a instance of the message
|
||||
func NewGetMempoolEntriesByAddressesRequestMessage(addresses []string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesByAddressesRequestMessage {
|
||||
return &GetMempoolEntriesByAddressesRequestMessage{
|
||||
Addresses: addresses,
|
||||
IncludeOrphanPool: includeOrphanPool,
|
||||
FilterTransactionPool: filterTransactionPool,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMempoolEntriesByAddressesResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetMempoolEntriesByAddressesResponseMessage struct {
|
||||
baseMessage
|
||||
Entries []*MempoolEntryByAddress
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetMempoolEntriesByAddressesResponseMessage) Command() MessageCommand {
|
||||
return CmdGetMempoolEntriesByAddressesResponseMessage
|
||||
}
|
||||
|
||||
// NewGetMempoolEntriesByAddressesResponseMessage returns a instance of the message
|
||||
func NewGetMempoolEntriesByAddressesResponseMessage(entries []*MempoolEntryByAddress) *GetMempoolEntriesByAddressesResponseMessage {
|
||||
return &GetMempoolEntriesByAddressesResponseMessage{
|
||||
Entries: entries,
|
||||
}
|
||||
}
|
@ -4,7 +4,9 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetMempoolEntryRequestMessage struct {
|
||||
baseMessage
|
||||
TxID string
|
||||
TxID string
|
||||
IncludeOrphanPool bool
|
||||
FilterTransactionPool bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -13,8 +15,12 @@ func (msg *GetMempoolEntryRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntryRequestMessage returns a instance of the message
|
||||
func NewGetMempoolEntryRequestMessage(txID string) *GetMempoolEntryRequestMessage {
|
||||
return &GetMempoolEntryRequestMessage{TxID: txID}
|
||||
func NewGetMempoolEntryRequestMessage(txID string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntryRequestMessage {
|
||||
return &GetMempoolEntryRequestMessage{
|
||||
TxID: txID,
|
||||
IncludeOrphanPool: includeOrphanPool,
|
||||
FilterTransactionPool: filterTransactionPool,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMempoolEntryResponseMessage is an appmessage corresponding to
|
||||
@ -30,6 +36,7 @@ type GetMempoolEntryResponseMessage struct {
|
||||
type MempoolEntry struct {
|
||||
Fee uint64
|
||||
Transaction *RPCTransaction
|
||||
IsOrphan bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -38,11 +45,12 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntryResponseMessage returns a instance of the message
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction, isOrphan bool) *GetMempoolEntryResponseMessage {
|
||||
return &GetMempoolEntryResponseMessage{
|
||||
Entry: &MempoolEntry{
|
||||
Fee: fee,
|
||||
Transaction: transaction,
|
||||
IsOrphan: isOrphan,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,8 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
|
||||
baseMessage
|
||||
StartHash string
|
||||
StartHash string
|
||||
IncludeAcceptedTransactionIDs bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -13,18 +14,29 @@ func (msg *GetVirtualSelectedParentChainFromBlockRequestMessage) Command() Messa
|
||||
}
|
||||
|
||||
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
|
||||
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *GetVirtualSelectedParentChainFromBlockRequestMessage {
|
||||
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(
|
||||
startHash string, includeAcceptedTransactionIDs bool) *GetVirtualSelectedParentChainFromBlockRequestMessage {
|
||||
|
||||
return &GetVirtualSelectedParentChainFromBlockRequestMessage{
|
||||
StartHash: startHash,
|
||||
StartHash: startHash,
|
||||
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptedTransactionIDs is a part of the GetVirtualSelectedParentChainFromBlockResponseMessage and
|
||||
// VirtualSelectedParentChainChangedNotificationMessage appmessages
|
||||
type AcceptedTransactionIDs struct {
|
||||
AcceptingBlockHash string
|
||||
AcceptedTransactionIDs []string
|
||||
}
|
||||
|
||||
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
|
||||
baseMessage
|
||||
RemovedChainBlockHashes []string
|
||||
AddedChainBlockHashes []string
|
||||
AcceptedTransactionIDs []*AcceptedTransactionIDs
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@ -36,10 +48,11 @@ func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() Mess
|
||||
|
||||
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
|
||||
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes,
|
||||
addedChainBlockHashes []string) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
||||
addedChainBlockHashes []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
||||
|
||||
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
|
||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||
AddedChainBlockHashes: addedChainBlockHashes,
|
||||
AcceptedTransactionIDs: acceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
@ -0,0 +1,50 @@
|
||||
package appmessage
|
||||
|
||||
// NotifyNewBlockTemplateRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyNewBlockTemplateRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyNewBlockTemplateRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyNewBlockTemplateRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyNewBlockTemplateRequestMessage returns an instance of the message
|
||||
func NewNotifyNewBlockTemplateRequestMessage() *NotifyNewBlockTemplateRequestMessage {
|
||||
return &NotifyNewBlockTemplateRequestMessage{}
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplateResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyNewBlockTemplateResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyNewBlockTemplateResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyNewBlockTemplateResponseMessage
|
||||
}
|
||||
|
||||
// NewNotifyNewBlockTemplateResponseMessage returns an instance of the message
|
||||
func NewNotifyNewBlockTemplateResponseMessage() *NotifyNewBlockTemplateResponseMessage {
|
||||
return &NotifyNewBlockTemplateResponseMessage{}
|
||||
}
|
||||
|
||||
// NewBlockTemplateNotificationMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NewBlockTemplateNotificationMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NewBlockTemplateNotificationMessage) Command() MessageCommand {
|
||||
return CmdNewBlockTemplateNotificationMessage
|
||||
}
|
||||
|
||||
// NewNewBlockTemplateNotificationMessage returns an instance of the message
|
||||
func NewNewBlockTemplateNotificationMessage() *NewBlockTemplateNotificationMessage {
|
||||
return &NewBlockTemplateNotificationMessage{}
|
||||
}
|
@ -4,6 +4,7 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
|
||||
baseMessage
|
||||
IncludeAcceptedTransactionIDs bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -11,9 +12,13 @@ func (msg *NotifyVirtualSelectedParentChainChangedRequestMessage) Command() Mess
|
||||
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns a instance of the message
|
||||
func NewNotifyVirtualSelectedParentChainChangedRequestMessage() *NotifyVirtualSelectedParentChainChangedRequestMessage {
|
||||
return &NotifyVirtualSelectedParentChainChangedRequestMessage{}
|
||||
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns an instance of the message
|
||||
func NewNotifyVirtualSelectedParentChainChangedRequestMessage(
|
||||
includeAcceptedTransactionIDs bool) *NotifyVirtualSelectedParentChainChangedRequestMessage {
|
||||
|
||||
return &NotifyVirtualSelectedParentChainChangedRequestMessage{
|
||||
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
||||
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
|
||||
@ -39,6 +44,7 @@ type VirtualSelectedParentChainChangedNotificationMessage struct {
|
||||
baseMessage
|
||||
RemovedChainBlockHashes []string
|
||||
AddedChainBlockHashes []string
|
||||
AcceptedTransactionIDs []*AcceptedTransactionIDs
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@ -48,10 +54,11 @@ func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() Messa
|
||||
|
||||
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
|
||||
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes,
|
||||
addedChainBlocks []string) *VirtualSelectedParentChainChangedNotificationMessage {
|
||||
addedChainBlocks []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *VirtualSelectedParentChainChangedNotificationMessage {
|
||||
|
||||
return &VirtualSelectedParentChainChangedNotificationMessage{
|
||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||
AddedChainBlockHashes: addedChainBlocks,
|
||||
AcceptedTransactionIDs: acceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
@ -52,6 +52,7 @@ type RPCTransaction struct {
|
||||
SubnetworkID string
|
||||
Gas uint64
|
||||
Payload string
|
||||
Mass uint64
|
||||
VerboseData *RPCTransactionVerboseData
|
||||
}
|
||||
|
||||
|
42
app/appmessage/rpc_submit_transaction_replacement.go
Normal file
42
app/appmessage/rpc_submit_transaction_replacement.go
Normal file
@ -0,0 +1,42 @@
|
||||
package appmessage
|
||||
|
||||
// SubmitTransactionReplacementRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type SubmitTransactionReplacementRequestMessage struct {
|
||||
baseMessage
|
||||
Transaction *RPCTransaction
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *SubmitTransactionReplacementRequestMessage) Command() MessageCommand {
|
||||
return CmdSubmitTransactionReplacementRequestMessage
|
||||
}
|
||||
|
||||
// NewSubmitTransactionReplacementRequestMessage returns a instance of the message
|
||||
func NewSubmitTransactionReplacementRequestMessage(transaction *RPCTransaction) *SubmitTransactionReplacementRequestMessage {
|
||||
return &SubmitTransactionReplacementRequestMessage{
|
||||
Transaction: transaction,
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitTransactionReplacementResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type SubmitTransactionReplacementResponseMessage struct {
|
||||
baseMessage
|
||||
TransactionID string
|
||||
ReplacedTransaction *RPCTransaction
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *SubmitTransactionReplacementResponseMessage) Command() MessageCommand {
|
||||
return CmdSubmitTransactionReplacementResponseMessage
|
||||
}
|
||||
|
||||
// NewSubmitTransactionReplacementResponseMessage returns a instance of the message
|
||||
func NewSubmitTransactionReplacementResponseMessage(transactionID string) *SubmitTransactionReplacementResponseMessage {
|
||||
return &SubmitTransactionReplacementResponseMessage{
|
||||
TransactionID: transactionID,
|
||||
}
|
||||
}
|
@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol"
|
||||
@ -67,6 +69,7 @@ func (a *ComponentManager) Stop() {
|
||||
}
|
||||
|
||||
a.protocolManager.Close()
|
||||
close(a.protocolManager.Context().Domain().ConsensusEventsChannel())
|
||||
|
||||
return
|
||||
}
|
||||
@ -118,7 +121,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, interrupt)
|
||||
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, domain.ConsensusEventsChannel(), interrupt)
|
||||
|
||||
return &ComponentManager{
|
||||
cfg: cfg,
|
||||
@ -139,6 +142,7 @@ func setupRPC(
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
utxoIndex *utxoindex.UTXOIndex,
|
||||
consensusEventsChan chan externalapi.ConsensusEvent,
|
||||
shutDownChan chan<- struct{},
|
||||
) *rpc.Manager {
|
||||
|
||||
@ -150,10 +154,10 @@ func setupRPC(
|
||||
connectionManager,
|
||||
addressManager,
|
||||
utxoIndex,
|
||||
consensusEventsChan,
|
||||
shutDownChan,
|
||||
)
|
||||
protocolManager.SetOnVirtualChange(rpcManager.NotifyVirtualChange)
|
||||
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
|
||||
protocolManager.SetOnNewBlockTemplateHandler(rpcManager.NotifyNewBlockTemplate)
|
||||
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
||||
|
||||
return rpcManager
|
||||
|
@ -16,53 +16,42 @@ import (
|
||||
// OnNewBlock updates the mempool after a new block arrival, and
|
||||
// relays newly unorphaned transactions and possibly rebroadcast
|
||||
// manually added transactions when not in IBD.
|
||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock) error {
|
||||
|
||||
hash := consensushashing.BlockHash(block)
|
||||
log.Debugf("OnNewBlock start for block %s", hash)
|
||||
defer log.Debugf("OnNewBlock end for block %s", hash)
|
||||
log.Tracef("OnNewBlock start for block %s", hash)
|
||||
defer log.Tracef("OnNewBlock end for block %s", hash)
|
||||
|
||||
unorphaningResults, err := f.UnorphanBlocks(block)
|
||||
unorphanedBlocks, err := f.UnorphanBlocks(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
|
||||
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphanedBlocks))
|
||||
|
||||
newBlocks := []*externalapi.DomainBlock{block}
|
||||
newVirtualChangeSets := []*externalapi.VirtualChangeSet{virtualChangeSet}
|
||||
for _, unorphaningResult := range unorphaningResults {
|
||||
newBlocks = append(newBlocks, unorphaningResult.block)
|
||||
newVirtualChangeSets = append(newVirtualChangeSets, unorphaningResult.virtualChangeSet)
|
||||
}
|
||||
newBlocks = append(newBlocks, unorphanedBlocks...)
|
||||
|
||||
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
|
||||
for i, newBlock := range newBlocks {
|
||||
for _, newBlock := range newBlocks {
|
||||
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
|
||||
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
|
||||
|
||||
if f.onBlockAddedToDAGHandler != nil {
|
||||
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
|
||||
virtualChangeSet = newVirtualChangeSets[i]
|
||||
err := f.onBlockAddedToDAGHandler(newBlock, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
|
||||
}
|
||||
|
||||
// OnVirtualChange calls the handler function whenever the virtual block changes.
|
||||
func (f *FlowContext) OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
if f.onVirtualChangeHandler != nil && virtualChangeSet != nil {
|
||||
return f.onVirtualChangeHandler(virtualChangeSet)
|
||||
// OnNewBlockTemplate calls the handler function whenever a new block template is available for miners.
|
||||
func (f *FlowContext) OnNewBlockTemplate() error {
|
||||
// Clear current template cache. Note we call this even if the handler is nil, in order to keep the
|
||||
// state consistent without dependency on external event registration
|
||||
f.Domain().MiningManager().ClearBlockTemplate()
|
||||
if f.onNewBlockTemplateHandler != nil {
|
||||
return f.onNewBlockTemplateHandler()
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -118,14 +107,18 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
||||
return protocolerrors.Errorf(false, "cannot add header only block")
|
||||
}
|
||||
|
||||
virtualChangeSet, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = f.OnNewBlock(block, virtualChangeSet)
|
||||
err = f.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -150,7 +143,7 @@ func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
||||
return false
|
||||
}
|
||||
f.ibdPeer = ibdPeer
|
||||
log.Infof("IBD started")
|
||||
log.Infof("IBD started with peer %s", ibdPeer)
|
||||
|
||||
return true
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package flowcontext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
@ -9,6 +10,11 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPingTimeout signifies that a ping operation timed out.
|
||||
ErrPingTimeout = protocolerrors.New(false, "timeout expired on ping")
|
||||
)
|
||||
|
||||
// HandleError handles an error from a flow,
|
||||
// It sends the error to errChan if isStopping == 0 and increments isStopping
|
||||
//
|
||||
@ -21,8 +27,15 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
if errors.Is(err, ErrPingTimeout) {
|
||||
// Avoid printing the call stack on ping timeouts, since users get panicked and this case is not interesting
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
} else {
|
||||
// Explain to the user that this is not a panic, but only a protocol error with a specific peer
|
||||
logFrame := strings.Repeat("=", 52)
|
||||
log.Errorf("Non-critical peer protocol error from %s, printing the full stack for debug purposes: \n%s\n%+v \n%s",
|
||||
flowName, logFrame, err, logFrame)
|
||||
}
|
||||
}
|
||||
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
|
@ -18,12 +18,8 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||
)
|
||||
|
||||
// OnBlockAddedToDAGHandler is a handler function that's triggered
|
||||
// when a block is added to the DAG
|
||||
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
|
||||
// OnVirtualChangeHandler is a handler function that's triggered when the virtual changes
|
||||
type OnVirtualChangeHandler func(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
// OnNewBlockTemplateHandler is a handler function that's triggered when a new block template is available
|
||||
type OnNewBlockTemplateHandler func() error
|
||||
|
||||
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
@ -44,8 +40,7 @@ type FlowContext struct {
|
||||
|
||||
timeStarted int64
|
||||
|
||||
onVirtualChangeHandler OnVirtualChangeHandler
|
||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
||||
onNewBlockTemplateHandler OnNewBlockTemplateHandler
|
||||
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||
|
||||
@ -102,14 +97,14 @@ func (f *FlowContext) ShutdownChan() <-chan struct{} {
|
||||
return f.shutdownChan
|
||||
}
|
||||
|
||||
// SetOnVirtualChangeHandler sets the onVirtualChangeHandler handler
|
||||
func (f *FlowContext) SetOnVirtualChangeHandler(onVirtualChangeHandler OnVirtualChangeHandler) {
|
||||
f.onVirtualChangeHandler = onVirtualChangeHandler
|
||||
// IsNearlySynced returns whether current consensus is considered synced or close to being synced.
|
||||
func (f *FlowContext) IsNearlySynced() (bool, error) {
|
||||
return f.Domain().Consensus().IsNearlySynced()
|
||||
}
|
||||
|
||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
||||
func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
|
||||
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
|
||||
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplateHandler handler
|
||||
func (f *FlowContext) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler OnNewBlockTemplateHandler) {
|
||||
f.onNewBlockTemplateHandler = onNewBlockTemplateHandler
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
||||
|
@ -72,3 +72,10 @@ func (f *FlowContext) Peers() []*peerpkg.Peer {
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// HasPeers returns whether there are currently active peers
|
||||
func (f *FlowContext) HasPeers() bool {
|
||||
f.peersMutex.RLock()
|
||||
defer f.peersMutex.RUnlock()
|
||||
return len(f.peers) > 0
|
||||
}
|
||||
|
@ -15,12 +15,6 @@ import (
|
||||
// on: 2^orphanResolutionRange * PHANTOM K.
|
||||
const maxOrphans = 600
|
||||
|
||||
// UnorphaningResult is the result of unorphaning a block
|
||||
type UnorphaningResult struct {
|
||||
block *externalapi.DomainBlock
|
||||
virtualChangeSet *externalapi.VirtualChangeSet
|
||||
}
|
||||
|
||||
// AddOrphan adds the block to the orphan set
|
||||
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
|
||||
f.orphansMutex.Lock()
|
||||
@ -57,7 +51,7 @@ func (f *FlowContext) IsOrphan(blockHash *externalapi.DomainHash) bool {
|
||||
}
|
||||
|
||||
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
|
||||
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*UnorphaningResult, error) {
|
||||
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*externalapi.DomainBlock, error) {
|
||||
f.orphansMutex.Lock()
|
||||
defer f.orphansMutex.Unlock()
|
||||
|
||||
@ -66,7 +60,7 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
||||
rootBlockHash := consensushashing.BlockHash(rootBlock)
|
||||
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
|
||||
|
||||
var unorphaningResults []*UnorphaningResult
|
||||
var unorphanedBlocks []*externalapi.DomainBlock
|
||||
for len(processQueue) > 0 {
|
||||
var orphanHash externalapi.DomainHash
|
||||
orphanHash, processQueue = processQueue[0], processQueue[1:]
|
||||
@ -90,21 +84,18 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
||||
}
|
||||
}
|
||||
if canBeUnorphaned {
|
||||
virtualChangeSet, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||
unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if unorphaningSucceeded {
|
||||
unorphaningResults = append(unorphaningResults, &UnorphaningResult{
|
||||
block: orphanBlock,
|
||||
virtualChangeSet: virtualChangeSet,
|
||||
})
|
||||
unorphanedBlocks = append(unorphanedBlocks, orphanBlock)
|
||||
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return unorphaningResults, nil
|
||||
return unorphanedBlocks, nil
|
||||
}
|
||||
|
||||
// addChildOrphansToProcessQueue finds all child orphans of `blockHash`
|
||||
@ -143,24 +134,24 @@ func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash)
|
||||
return childOrphans
|
||||
}
|
||||
|
||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.VirtualChangeSet, bool, error) {
|
||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (bool, error) {
|
||||
orphanBlock, ok := f.orphans[orphanHash]
|
||||
if !ok {
|
||||
return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
||||
return false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
||||
}
|
||||
delete(f.orphans, orphanHash)
|
||||
|
||||
virtualChangeSet, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
||||
err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
|
||||
return nil, false, nil
|
||||
return false, nil
|
||||
}
|
||||
return nil, false, err
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Infof("Unorphaned block %s", orphanHash)
|
||||
return virtualChangeSet, true, nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
|
||||
|
@ -1,47 +0,0 @@
|
||||
package flowcontext
|
||||
|
||||
import "github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
const (
|
||||
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
|
||||
)
|
||||
|
||||
// ShouldMine returns whether it's ok to use block template from this node
|
||||
// for mining purposes.
|
||||
func (f *FlowContext) ShouldMine() (bool, error) {
|
||||
peers := f.Peers()
|
||||
if len(peers) == 0 {
|
||||
log.Debugf("The node is not connected, so ShouldMine returns false")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if f.IsIBDRunning() {
|
||||
log.Debugf("IBD is running, so ShouldMine returns false")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
virtualSelectedParent, err := f.domain.Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if virtualSelectedParent.Equal(f.Config().NetParams().GenesisHash) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
now := mstime.Now().UnixMilliseconds()
|
||||
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
|
||||
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
|
||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
|
||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
||||
return false, nil
|
||||
}
|
@ -18,9 +18,9 @@ var (
|
||||
|
||||
// minAcceptableProtocolVersion is the lowest protocol version that a
|
||||
// connected peer may support.
|
||||
minAcceptableProtocolVersion = uint32(4)
|
||||
minAcceptableProtocolVersion = uint32(5)
|
||||
|
||||
maxAcceptableProtocolVersion = uint32(4)
|
||||
maxAcceptableProtocolVersion = uint32(5)
|
||||
)
|
||||
|
||||
type receiveVersionFlow struct {
|
||||
|
16
app/protocol/flows/v5/blockrelay/batch_size_test.go
Normal file
16
app/protocol/flows/v5/blockrelay/batch_size_test.go
Normal file
@ -0,0 +1,16 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
|
||||
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
|
||||
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
|
||||
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
|
||||
if ibdBatchSize >= router.DefaultMaxMessages {
|
||||
t.Fatalf("IBD batch size (%d) must be smaller than router.DefaultMaxMessages (%d)",
|
||||
ibdBatchSize, router.DefaultMaxMessages)
|
||||
}
|
||||
}
|
@ -21,7 +21,7 @@ func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*ex
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||
case *appmessage.MsgBlockLocator:
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
@ -5,7 +5,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
@ -34,7 +33,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists {
|
||||
if !blockInfo.HasHeader() {
|
||||
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
|
||||
"with an unknown targetHash %s", targetHash)
|
||||
}
|
||||
@ -47,7 +46,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
||||
}
|
||||
|
||||
// The IBD block locator is checking only existing blocks with bodies.
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
if !blockInfo.HasBody() {
|
||||
continue
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -28,18 +27,15 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
||||
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
|
||||
for i, hash := range msgRequestIBDBlocks.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
block, found, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "IBD block %s not found", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
@ -0,0 +1,85 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RequestIBDChainBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
|
||||
type RequestIBDChainBlockLocatorContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestIBDChainBlockLocatorFlow struct {
|
||||
RequestIBDChainBlockLocatorContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestIBDChainBlockLocator handles getBlockLocator messages
|
||||
func HandleRequestIBDChainBlockLocator(context RequestIBDChainBlockLocatorContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestIBDChainBlockLocatorFlow{
|
||||
RequestIBDChainBlockLocatorContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) start() error {
|
||||
for {
|
||||
highHash, lowHash, err := flow.receiveRequestIBDChainBlockLocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received getIBDChainBlockLocator with highHash: %s, lowHash: %s", highHash, lowHash)
|
||||
|
||||
var locator externalapi.BlockLocator
|
||||
if highHash == nil || lowHash == nil {
|
||||
locator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
} else {
|
||||
locator, err = flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
// The chain has been modified, signal it by sending an empty locator
|
||||
locator, err = externalapi.BlockLocator{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Debugf("Received error from CreateHeadersSelectedChainBlockLocator: %s", err)
|
||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||
"locator between %s and %s", lowHash, highHash)
|
||||
}
|
||||
|
||||
err = flow.sendIBDChainBlockLocator(locator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) receiveRequestIBDChainBlockLocator() (highHash, lowHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgGetBlockLocator := message.(*appmessage.MsgRequestIBDChainBlockLocator)
|
||||
|
||||
return msgGetBlockLocator.HighHash, msgGetBlockLocator.LowHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) sendIBDChainBlockLocator(locator externalapi.BlockLocator) error {
|
||||
msgIBDChainBlockLocator := appmessage.NewMsgIBDChainBlockLocator(locator)
|
||||
err := flow.outgoingRoute.Enqueue(msgIBDChainBlockLocator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@ -118,16 +118,33 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
|
||||
return err
|
||||
}
|
||||
|
||||
for _, blockHash := range pointAndItsAnticone {
|
||||
block, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||
for i, blockHash := range pointAndItsAnticone {
|
||||
block, found, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "pruning point anticone block %s not found", blockHash)
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if (i+1)%ibdBatchSize == 0 {
|
||||
// No timeout here, as we don't care if the syncee takes its time computing,
|
||||
// since it only blocks this dedicated flow
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := message.(*appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks); !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks, message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
@ -5,7 +5,6 @@ import (
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -29,18 +28,15 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
|
||||
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
|
||||
for _, hash := range getRelayBlocksMessage.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
block, found, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "Relay block %s not found", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
@ -7,6 +7,7 @@ import (
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@ -25,8 +26,8 @@ var orphanResolutionRange uint32 = 5
|
||||
type RelayInvsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnNewBlock(block *externalapi.DomainBlock) error
|
||||
OnNewBlockTemplate() error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
||||
Broadcast(message appmessage.Message) error
|
||||
@ -35,13 +36,19 @@ type RelayInvsContext interface {
|
||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||
IsIBDRunning() bool
|
||||
IsRecoverableError(err error) bool
|
||||
IsNearlySynced() (bool, error)
|
||||
}
|
||||
|
||||
type invRelayBlock struct {
|
||||
Hash *externalapi.DomainHash
|
||||
IsOrphanRoot bool
|
||||
}
|
||||
|
||||
type handleRelayInvsFlow struct {
|
||||
RelayInvsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
invsQueue []*appmessage.MsgInvRelayBlock
|
||||
invsQueue []invRelayBlock
|
||||
}
|
||||
|
||||
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
||||
@ -54,7 +61,7 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
||||
invsQueue: make([]invRelayBlock, 0),
|
||||
}
|
||||
err := flow.start()
|
||||
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
||||
@ -105,10 +112,16 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Block relay is disabled during IBD
|
||||
// Block relay is disabled if the node is already during IBD AND considered out of sync
|
||||
if flow.IsIBDRunning() {
|
||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
||||
continue
|
||||
isNearlySynced, err := flow.IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isNearlySynced {
|
||||
log.Debugf("Got block %s while in IBD and the node is out of sync. Continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Requesting block %s", inv.Hash)
|
||||
@ -131,12 +144,36 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Note we do not apply the heuristic below if inv was queued as an orphan root, since
|
||||
// that means the process started by a proper and relevant relay block
|
||||
if !inv.IsOrphanRoot {
|
||||
// Check bounded merge depth to avoid requesting irrelevant data which cannot be merged under virtual
|
||||
virtualMergeDepthRoot, err := flow.Domain().Consensus().VirtualMergeDepthRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !virtualMergeDepthRoot.Equal(model.VirtualGenesisBlockHash) {
|
||||
mergeDepthRootHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualMergeDepthRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Since `BlueWork` respects topology, this condition means that the relay
|
||||
// block is not in the future of virtual's merge depth root, and thus cannot be merged unless
|
||||
// other valid blocks Kosherize it, in which case it will be obtained once the merger is relayed
|
||||
if block.Header.BlueWork().Cmp(mergeDepthRootHeader.BlueWork()) <= 0 {
|
||||
log.Debugf("Block %s has lower blue work than virtual's merge root %s (%d <= %d), hence we are skipping it",
|
||||
inv.Hash, virtualMergeDepthRoot, block.Header.BlueWork(), mergeDepthRootHeader.BlueWork())
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
||||
missingParents, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||
@ -168,15 +205,20 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualHasNewParents := false
|
||||
for _, parent := range newVirtualInfo.ParentHashes {
|
||||
if oldVirtualParents.Contains(parent) {
|
||||
continue
|
||||
}
|
||||
|
||||
block, err := flow.Domain().Consensus().GetBlock(parent)
|
||||
virtualHasNewParents = true
|
||||
block, found, err := flow.Domain().Consensus().GetBlock(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "Virtual parent %s not found", parent)
|
||||
}
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("Relaying block %s", blockHash)
|
||||
err = flow.relayBlock(block)
|
||||
@ -185,8 +227,16 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
}
|
||||
}
|
||||
|
||||
if virtualHasNewParents {
|
||||
log.Debugf("Virtual %d has new parents, raising new block template event", newVirtualInfo.DAAScore)
|
||||
err = flow.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -202,24 +252,24 @@ func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.Domai
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
func (flow *handleRelayInvsFlow) readInv() (invRelayBlock, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
var inv invRelayBlock
|
||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return invRelayBlock{}, err
|
||||
}
|
||||
|
||||
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
msgInv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
return invRelayBlock{}, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
"expecting an inv message", msg.Command())
|
||||
}
|
||||
return inv, nil
|
||||
return invRelayBlock{Hash: msgInv.Hash, IsOrphanRoot: false}, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||
@ -264,7 +314,7 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||
case *appmessage.MsgBlock:
|
||||
return message, nil
|
||||
default:
|
||||
@ -273,22 +323,25 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, error) {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
return nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
}
|
||||
|
||||
missingParentsError := &ruleerrors.ErrMissingParents{}
|
||||
if errors.As(err, missingParentsError) {
|
||||
return missingParentsError.MissingParentHashes, nil, nil
|
||||
return missingParentsError.MissingParentHashes, nil
|
||||
}
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
|
||||
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
}
|
||||
return nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
return nil, virtualChangeSet, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
||||
@ -396,12 +449,16 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
|
||||
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
||||
}
|
||||
|
||||
if len(orphanRoots) == 0 {
|
||||
// In some rare cases we get here when there are no orphan roots already
|
||||
return nil
|
||||
}
|
||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
||||
|
||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
||||
invMessages := make([]invRelayBlock, len(orphanRoots))
|
||||
for i, root := range orphanRoots {
|
||||
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
||||
invMessages[i] = appmessage.NewMsgInvBlock(root)
|
||||
invMessages[i] = invRelayBlock{Hash: root, IsOrphanRoot: true}
|
||||
}
|
||||
|
||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
95
app/protocol/flows/v5/blockrelay/handle_request_anticone.go
Normal file
95
app/protocol/flows/v5/blockrelay/handle_request_anticone.go
Normal file
@ -0,0 +1,95 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// RequestAnticoneContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestAnticoneContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
type handleRequestAnticoneFlow struct {
|
||||
RequestAnticoneContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peer.Peer
|
||||
}
|
||||
|
||||
// HandleRequestAnticone handles RequestAnticone messages
|
||||
func HandleRequestAnticone(context RequestAnticoneContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
flow := &handleRequestAnticoneFlow{
|
||||
RequestAnticoneContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestAnticoneFlow) start() error {
|
||||
for {
|
||||
blockHash, contextHash, err := receiveRequestAnticone(flow.incomingRoute)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received requestAnticone with blockHash: %s, contextHash: %s", blockHash, contextHash)
|
||||
log.Debugf("Getting past(%s) cap anticone(%s) for peer %s", contextHash, blockHash, flow.peer)
|
||||
|
||||
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
|
||||
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
|
||||
// we relay blocks only if they enter virtual's mergeset. We add a 2 factor for possible sync gaps.
|
||||
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
|
||||
flow.Config().ActiveNetParams.MergeSetSizeLimit*2)
|
||||
if err != nil {
|
||||
return protocolerrors.Wrap(true, err, "Failed querying anticone")
|
||||
}
|
||||
log.Debugf("Got %d header hashes in past(%s) cap anticone(%s)", len(blockHashes), contextHash, blockHash)
|
||||
|
||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
|
||||
}
|
||||
|
||||
// We sort the headers in bottom-up topological order before sending
|
||||
sort.Slice(blockHeaders, func(i, j int) bool {
|
||||
return blockHeaders[i].BlueWork.Cmp(blockHeaders[j].BlueWork) < 0
|
||||
})
|
||||
|
||||
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
|
||||
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func receiveRequestAnticone(incomingRoute *router.Route) (blockHash *externalapi.DomainHash,
|
||||
contextHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgRequestAnticone := message.(*appmessage.MsgRequestAnticone)
|
||||
|
||||
return msgRequestAnticone.BlockHash, msgRequestAnticone.ContextHash, nil
|
||||
}
|
@ -10,7 +10,9 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
const ibdBatchSize = router.DefaultMaxMessages
|
||||
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
|
||||
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
|
||||
const ibdBatchSize = 99
|
||||
|
||||
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestHeadersContext interface {
|
||||
@ -42,7 +44,34 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
|
||||
consensus := flow.Domain().Consensus()
|
||||
|
||||
lowHashInfo, err := consensus.GetBlockInfo(lowHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !lowHashInfo.HasHeader() {
|
||||
return protocolerrors.Errorf(true, "Block %s does not exist", lowHash)
|
||||
}
|
||||
|
||||
highHashInfo, err := consensus.GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !highHashInfo.HasHeader() {
|
||||
return protocolerrors.Errorf(true, "Block %s does not exist", highHash)
|
||||
}
|
||||
|
||||
isLowSelectedAncestorOfHigh, err := consensus.IsInSelectedParentChainOf(lowHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isLowSelectedAncestorOfHigh {
|
||||
return protocolerrors.Errorf(true, "Expected %s to be on the selected chain of %s",
|
||||
lowHash, highHash)
|
||||
}
|
||||
|
||||
for !lowHash.Equal(highHash) {
|
||||
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
||||
@ -51,7 +80,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
// in order to avoid locking the consensus for too long
|
||||
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
||||
const maxBlocks = 1 << 10
|
||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||
blockHashes, _, err := consensus.GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -59,7 +88,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
|
||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||
blockHeader, err := consensus.GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
@ -1,12 +1,12 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@ -21,8 +21,8 @@ import (
|
||||
type IBDContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnNewBlock(block *externalapi.DomainBlock) error
|
||||
OnNewBlockTemplate() error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||
@ -71,22 +71,25 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
}
|
||||
|
||||
isFinishedSuccessfully := false
|
||||
var err error
|
||||
defer func() {
|
||||
flow.UnsetIBDRunning()
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
flow.logIBDFinished(isFinishedSuccessfully, err)
|
||||
}()
|
||||
|
||||
highHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
log.Debugf("Syncing blocks up to %s", highHash)
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
||||
relayBlockHash := consensushashing.BlockHash(block)
|
||||
|
||||
log.Infof("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
|
||||
log.Infof("Syncing blocks up to %s", relayBlockHash)
|
||||
log.Infof("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
|
||||
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(
|
||||
block, highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -97,7 +100,7 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
|
||||
if shouldDownloadHeadersProof {
|
||||
log.Infof("Starting IBD with headers proof")
|
||||
err := flow.ibdWithHeadersProof(highHash, block.Header.DAAScore())
|
||||
err = flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -110,27 +113,184 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", highHash)
|
||||
"to the recent pruning point before normal operation can resume.", relayBlockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash, block.Header.DAAScore())
|
||||
err = flow.syncPruningPointFutureHeaders(
|
||||
flow.Domain().Consensus(),
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncMissingBlockBodies(highHash)
|
||||
// We start by syncing missing bodies over the syncer selected chain
|
||||
err = flow.syncMissingBlockBodies(syncerHeaderSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relayBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Relay block might be in the anticone of syncer selected tip, thus
|
||||
// check his chain for missing bodies as well.
|
||||
// Note: this operation can be slightly optimized to avoid the full chain search since relay block
|
||||
// is in syncer virtual mergeset which has bounded size.
|
||||
if relayBlockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
err = flow.syncMissingBlockBodies(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Finished syncing blocks up to %s", highHash)
|
||||
log.Debugf("Finished syncing blocks up to %s", relayBlockHash)
|
||||
isFinishedSuccessfully = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) {
|
||||
/*
|
||||
Algorithm:
|
||||
Request full selected chain block locator from syncer
|
||||
Find the highest block which we know
|
||||
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
|
||||
*/
|
||||
|
||||
// Empty hashes indicate that the full chain is queried
|
||||
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer,
|
||||
len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
syncerHeaderSelectedTipHash := locatorHashes[0]
|
||||
var highestKnownSyncerChainHash *externalapi.DomainHash
|
||||
chainNegotiationRestartCounter := 0
|
||||
chainNegotiationZoomCounts := 0
|
||||
initialLocatorLen := len(locatorHashes)
|
||||
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
|
||||
for _, syncerChainHash := range locatorHashes {
|
||||
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if info.Exists {
|
||||
if info.BlockStatus == externalapi.StatusInvalid {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Sent invalid chain block %s", syncerChainHash)
|
||||
}
|
||||
|
||||
isPruningPointOnSyncerChain, err := flow.Domain().Consensus().IsInSelectedParentChainOf(pruningPoint, syncerChainHash)
|
||||
if err != nil {
|
||||
log.Errorf("Error checking isPruningPointOnSyncerChain: %s", err)
|
||||
}
|
||||
|
||||
// We're only interested in syncer chain blocks that have our pruning
|
||||
// point in their selected chain. Otherwise, it means one of the following:
|
||||
// 1) We will not switch the virtual selected chain to the syncers chain since it will violate finality
|
||||
// (hence we can ignore it unless merged by others).
|
||||
// 2) syncerChainHash is actually in the past of our pruning point so there's no
|
||||
// point in syncing from it.
|
||||
if err == nil && isPruningPointOnSyncerChain {
|
||||
currentHighestKnownSyncerChainHash = syncerChainHash
|
||||
break
|
||||
}
|
||||
}
|
||||
lowestUnknownSyncerChainHash = syncerChainHash
|
||||
}
|
||||
// No unknown blocks, break. Note this can only happen in the first iteration
|
||||
if lowestUnknownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// No shared block, break
|
||||
if currentHighestKnownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = nil
|
||||
break
|
||||
}
|
||||
// No point in zooming further
|
||||
if len(locatorHashes) == 1 {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// Zoom in
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(
|
||||
lowestUnknownSyncerChainHash,
|
||||
currentHighestKnownSyncerChainHash, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) > 0 {
|
||||
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
|
||||
!locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+
|
||||
"hashes to match the locator bounds")
|
||||
}
|
||||
|
||||
chainNegotiationZoomCounts++
|
||||
log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
if len(locatorHashes) == 2 {
|
||||
// We found our search target
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
|
||||
if chainNegotiationZoomCounts > initialLocatorLen*2 {
|
||||
// Since the zoom-in always queries two consecutive entries in the previous locator, it is
|
||||
// expected to decrease in size at least every two iterations
|
||||
return nil, nil, protocolerrors.Errorf(true,
|
||||
"IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d",
|
||||
chainNegotiationZoomCounts, initialLocatorLen)
|
||||
}
|
||||
|
||||
} else { // Empty locator signals a restart due to chain changes
|
||||
chainNegotiationZoomCounts = 0
|
||||
chainNegotiationRestartCounter++
|
||||
if chainNegotiationRestartCounter > 32 {
|
||||
return nil, nil, protocolerrors.Errorf(false,
|
||||
"IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
|
||||
}
|
||||
log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter)
|
||||
|
||||
// An empty locator signals that the syncer chain was modified and no longer contains one of
|
||||
// the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
initialLocatorLen = len(locatorHashes)
|
||||
// Reset syncer's header selected tip
|
||||
syncerHeaderSelectedTipHash = locatorHashes[0]
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Found highest known syncer chain block %s from peer %s",
|
||||
highestKnownSyncerChainHash, flow.peer)
|
||||
|
||||
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
@ -140,146 +300,66 @@ func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool, err error) {
|
||||
successString := "successfully"
|
||||
if !isFinishedSuccessfully {
|
||||
successString = "(interrupted)"
|
||||
if err != nil {
|
||||
successString = fmt.Sprintf("(interrupted: %s)", err)
|
||||
} else {
|
||||
successString = fmt.Sprintf("(interrupted)")
|
||||
}
|
||||
}
|
||||
log.Infof("IBD finished %s", successString)
|
||||
log.Infof("IBD with peer %s finished %s", flow.peer, successString)
|
||||
}
|
||||
|
||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) findHighestSharedBlockHash(
|
||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
||||
func (flow *handleIBDFlow) getSyncerChainBlockLocator(
|
||||
highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) {
|
||||
|
||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash)
|
||||
err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !highestHashFound {
|
||||
return nil, false, nil
|
||||
}
|
||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if highestHashIndex == 0 ||
|
||||
// If the block locator contains only two adjacent chain blocks, the
|
||||
// syncer will always find the same highest chain block, so to avoid
|
||||
// an endless loop, we explicitly stop the loop in such situation.
|
||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
||||
|
||||
return highestHash, true, nil
|
||||
}
|
||||
|
||||
locatorHashAboveHighestHash := highestHash
|
||||
if highestHashIndex > 0 {
|
||||
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
|
||||
}
|
||||
|
||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
||||
if err != nil {
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
|
||||
"restarting with full block locator")
|
||||
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return blockLocator, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) findHighestHashIndex(
|
||||
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
|
||||
|
||||
highestHashIndex := 0
|
||||
highestHashIndexFound := false
|
||||
for i, blockLocatorHash := range blockLocator {
|
||||
if highestHash.Equal(blockLocatorHash) {
|
||||
highestHashIndex = i
|
||||
highestHashIndexFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !highestHashIndexFound {
|
||||
return 0, protocolerrors.Errorf(true, "highest hash %s "+
|
||||
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
|
||||
}
|
||||
log.Debugf("The index of the highest hash in the original "+
|
||||
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
|
||||
|
||||
return highestHashIndex, nil
|
||||
}
|
||||
|
||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) fetchHighestHash(
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
||||
highestHash := message.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, true, nil
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
||||
return nil, false, nil
|
||||
case *appmessage.MsgIBDChainBlockLocator:
|
||||
if len(message.BlockLocatorHashes) > 64 {
|
||||
return nil, protocolerrors.Errorf(true,
|
||||
"Got block locator of size %d>64 while expecting locator to have size "+
|
||||
"which is logarithmic in DAG size (which should never exceed 2^64)",
|
||||
len(message.BlockLocatorHashes))
|
||||
}
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDChainBlockLocator, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus,
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash *externalapi.DomainHash,
|
||||
highBlockDAAScoreHint uint64) error {
|
||||
|
||||
log.Infof("Downloading headers from %s", flow.peer)
|
||||
|
||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
||||
if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) {
|
||||
// No need to get syncer selected tip headers, so sync relay past and return
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
|
||||
err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestSharedBlockHash)
|
||||
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScore, "block headers")
|
||||
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScoreHint, "block headers")
|
||||
|
||||
// Keep a short queue of BlockHeadersMessages so that there's
|
||||
// never a moment when the node is not validating and inserting
|
||||
@ -297,6 +377,11 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
close(blockHeadersMessageChan)
|
||||
return
|
||||
}
|
||||
if len(blockHeadersMessage.BlockHeaders) == 0 {
|
||||
// The syncer should have sent a done message if the search completed, and not an empty list
|
||||
errChan <- protocolerrors.Errorf(true, "Received an empty headers message from peer %s", flow.peer)
|
||||
return
|
||||
}
|
||||
|
||||
blockHeadersMessageChan <- blockHeadersMessage
|
||||
|
||||
@ -312,16 +397,7 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
select {
|
||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
// If the highHash has not been received, the peer is misbehaving
|
||||
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !highHashBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"highHash block %s from peer %s during block download", highHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
@ -338,11 +414,70 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
||||
func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
// Finished downloading syncer selected tip blocks,
|
||||
// check if we already have the triggering relayBlockHash
|
||||
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
// Send a special header request for the selected tip anticone. This is expected to
|
||||
// be a small set, as it is bounded to the size of virtual's mergeset.
|
||||
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
_, anticoneDone, err = flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
for _, header := range anticoneHeadersMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
||||
// If the relayBlockHash has still not been received, the peer is misbehaving
|
||||
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestAnticone(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
|
||||
msgRequestAnticone := appmessage.NewMsgRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
return flow.outgoingRoute.Enqueue(msgRequestAnticone)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(
|
||||
highestKnownSyncerChainHash, syncerHeaderSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgRequestHeaders := appmessage.NewMsgRequstHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgRequestHeaders)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
||||
@ -381,7 +516,7 @@ func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlo
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
||||
err = consensus.ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
@ -459,7 +594,7 @@ func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
||||
|
||||
receivedChunkCount++
|
||||
if receivedChunkCount%ibdBatchSize == 0 {
|
||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
log.Infof("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
receivedChunkCount, receivedUTXOCount)
|
||||
|
||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||
@ -511,6 +646,12 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
|
||||
highestProcessedDAAScore := lowBlockHeader.DAAScore()
|
||||
|
||||
// If the IBD is small, we want to update the virtual after each block in order to avoid complications and possible bugs.
|
||||
updateVirtual, err := flow.Domain().Consensus().IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
@ -547,7 +688,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
return err
|
||||
}
|
||||
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
|
||||
err = flow.Domain().Consensus().ValidateAndInsertBlock(block, updateVirtual)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||
@ -555,7 +696,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
}
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -566,7 +707,15 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
return flow.resolveVirtual(highestProcessedDAAScore)
|
||||
// We need to resolve virtual only if it wasn't updated while syncing block bodies
|
||||
if !updateVirtual {
|
||||
err := flow.resolveVirtual(highestProcessedDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return flow.OnNewBlockTemplate()
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
@ -579,38 +728,24 @@ func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
|
||||
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
err := flow.Domain().Consensus().ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) {
|
||||
var percents int
|
||||
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||
percents = 100
|
||||
} else {
|
||||
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||
}
|
||||
if percents < 0 {
|
||||
percents = 0
|
||||
} else if percents > 100 {
|
||||
percents = 100
|
||||
}
|
||||
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if i%10 == 0 {
|
||||
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var percents int
|
||||
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||
percents = 100
|
||||
} else {
|
||||
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||
}
|
||||
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||
}
|
||||
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isCompletelyResolved {
|
||||
log.Infof("Resolved virtual")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
log.Infof("Resolved virtual")
|
||||
return nil
|
||||
}
|
@ -10,6 +10,10 @@ type ibdProgressReporter struct {
|
||||
}
|
||||
|
||||
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
|
||||
if highDAAScore <= lowDAAScore {
|
||||
// Avoid a zero or negative diff
|
||||
highDAAScore = lowDAAScore + 1
|
||||
}
|
||||
return &ibdProgressReporter{
|
||||
lowDAAScore: lowDAAScore,
|
||||
highDAAScore: highDAAScore,
|
||||
@ -23,7 +27,16 @@ func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName
|
||||
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
|
||||
ipr.processed += processedDelta
|
||||
|
||||
relativeDAAScore := highestProcessedDAAScore - ipr.lowDAAScore
|
||||
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
|
||||
if highestProcessedDAAScore > ipr.highDAAScore {
|
||||
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
|
||||
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
relativeDAAScore := uint64(0)
|
||||
if highestProcessedDAAScore > ipr.lowDAAScore {
|
||||
// Avoid a negative diff
|
||||
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
|
||||
if progressPercent > ipr.lastReportedProgressPercent {
|
||||
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
|
@ -12,18 +12,20 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
err := flow.Domain().InitStagingConsensus()
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
err := flow.Domain().InitStagingConsensusWithoutGenesis()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(highHash, highBlockDAAScore)
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(syncerHeaderSelectedTipHash, relayBlockHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
if !flow.IsRecoverableError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus. (%s)", flow.peer, err)
|
||||
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
||||
if deleteStagingConsensusErr != nil {
|
||||
return deleteStagingConsensusErr
|
||||
@ -32,6 +34,8 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash,
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
|
||||
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
|
||||
err = flow.Domain().CommitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -45,11 +49,34 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *externalapi.DomainBlock,
|
||||
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
|
||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
|
||||
relayBlock *externalapi.DomainBlock,
|
||||
highestKnownSyncerChainHash *externalapi.DomainHash) (shouldDownload, shouldSync bool, err error) {
|
||||
|
||||
if !highestSharedBlockFound {
|
||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock)
|
||||
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
|
||||
if highestKnownSyncerChainHash != nil {
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
highestSharedBlockFound = blockInfo.HasBody()
|
||||
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
isPruningPointInSharedBlockChain, err = flow.Domain().Consensus().IsInSelectedParentChainOf(
|
||||
pruningPoint, highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
// Note: in the case where `highestSharedBlockFound == true && isPruningPointInSharedBlockChain == false`
|
||||
// we might have here info which is relevant to finality conflict decisions. This should be taken into
|
||||
// account when we improve this aspect.
|
||||
if !highestSharedBlockFound || !isPruningPointInSharedBlockChain {
|
||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
@ -58,28 +85,33 @@ func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *ex
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
return false, false, nil
|
||||
if highestKnownSyncerChainHash == nil {
|
||||
log.Infof("Stopping IBD since IBD from this node will cause a finality conflict")
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock *externalapi.DomainBlock) (bool, error) {
|
||||
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
|
||||
virtualSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if highBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
if relayBlock.Header.BlueScore() < virtualSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return highBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
||||
return relayBlock.Header.BlueWork().Cmp(virtualSelectedTipInfo.BlueWork) > 0, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
||||
@ -114,7 +146,10 @@ func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.Doma
|
||||
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash,
|
||||
highBlockDAAScore uint64) error {
|
||||
|
||||
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -131,19 +166,20 @@ func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalap
|
||||
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash, highBlockDAAScore)
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(),
|
||||
syncerHeaderSelectedTipHash, proofPruningPoint, relayBlockHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Headers downloaded from peer %s", flow.peer)
|
||||
|
||||
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
|
||||
relayBlockInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !highHashInfo.Exists {
|
||||
if !relayBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
|
||||
}
|
||||
|
||||
@ -206,7 +242,8 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -220,9 +257,19 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We're using i+2 because we want to check if the next block will belong to the next batch, but we already downloaded
|
||||
// the pruning point outside the loop so we use i+2 instead of i+1.
|
||||
if (i+2)%ibdBatchSize == 0 {
|
||||
log.Infof("Downloaded %d blocks from the pruning point anticone", i+1)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextPruningPointAndItsAnticoneBlocks())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
|
||||
log.Infof("Finished downloading pruning point and its anticone from %s. Total blocks downloaded: %d", flow.peer, i+1)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -243,8 +290,14 @@ func (flow *handleIBDFlow) processBlockWithTrustedData(
|
||||
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
|
||||
}
|
||||
|
||||
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||
return err
|
||||
err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return protocolerrors.Wrapf(true, err, "failed validating block with trusted data")
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
|
||||
@ -344,6 +397,7 @@ func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consens
|
||||
log.Info("Fetching the pruning point UTXO set")
|
||||
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
||||
if err != nil {
|
||||
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
|
||||
return false, err
|
||||
}
|
||||
|
@ -2,6 +2,8 @@ package ping
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
@ -61,6 +63,9 @@ func (flow *sendPingsFlow) start() error {
|
||||
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
if errors.Is(err, router.ErrTimeout) {
|
||||
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
pongMessage := message.(*appmessage.MsgPong)
|
@ -1,14 +1,14 @@
|
||||
package v4
|
||||
package v5
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
@ -78,6 +78,7 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
|
||||
appmessage.CmdPruningPointProof,
|
||||
appmessage.CmdTrustedData,
|
||||
appmessage.CmdIBDChainBlockLocator,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBD(m.Context(), incomingRoute,
|
||||
@ -121,7 +122,7 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone}, isStopping, errChan,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone, appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
@ -134,6 +135,20 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestIBDChainBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDChainBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestIBDChainBlockLocator(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestAnticone", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestAnticone}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestAnticone(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointProofRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
@ -1,7 +1,7 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -22,7 +22,7 @@ type TransactionsRelayContext interface {
|
||||
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
|
||||
OnTransactionAddedToMempool()
|
||||
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
|
||||
IsIBDRunning() bool
|
||||
IsNearlySynced() (bool, error)
|
||||
}
|
||||
|
||||
type handleRelayedTransactionsFlow struct {
|
||||
@ -50,7 +50,12 @@ func (flow *handleRelayedTransactionsFlow) start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsIBDRunning() {
|
||||
isNearlySynced, err := flow.IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Transaction relay is disabled if the node is out of sync and thus not mining
|
||||
if !isNearlySynced {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -97,7 +102,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
|
||||
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
|
||||
// Ask the transaction memory pool if the transaction is known
|
||||
// to it in any form (main pool or orphan).
|
||||
if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
|
||||
if _, _, ok := flow.Domain().MiningManager().GetTransaction(txID, true, true); ok {
|
||||
return true
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package transactionrelay_test
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@ -47,8 +47,8 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
|
||||
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
|
||||
return false
|
||||
func (m *mocTransactionsRelayContext) IsNearlySynced() (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
|
@ -30,7 +30,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
|
||||
}
|
||||
|
||||
for _, transactionID := range msgRequestTransactions.IDs {
|
||||
tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
|
||||
tx, _, ok := flow.Domain().MiningManager().GetTransaction(transactionID, true, false)
|
||||
|
||||
if !ok {
|
||||
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
|
||||
@ -40,7 +40,6 @@ func (flow *handleRequestedTransactionsFlow) start() error {
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
|
||||
if err != nil {
|
||||
return err
|
@ -2,7 +2,7 @@ package transactionrelay_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
@ -2,10 +2,11 @@ package protocol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
@ -90,14 +91,9 @@ func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-c
|
||||
return <-errChan
|
||||
}
|
||||
|
||||
// SetOnVirtualChange sets the onVirtualChangeHandler handler
|
||||
func (m *Manager) SetOnVirtualChange(onVirtualChangeHandler flowcontext.OnVirtualChangeHandler) {
|
||||
m.context.SetOnVirtualChangeHandler(onVirtualChangeHandler)
|
||||
}
|
||||
|
||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
||||
func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowcontext.OnBlockAddedToDAGHandler) {
|
||||
m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)
|
||||
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplate handler
|
||||
func (m *Manager) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler flowcontext.OnNewBlockTemplateHandler) {
|
||||
m.context.SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler)
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
|
||||
@ -110,12 +106,6 @@ func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMemp
|
||||
m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler)
|
||||
}
|
||||
|
||||
// ShouldMine returns whether it's ok to use block template from this node
|
||||
// for mining purposes.
|
||||
func (m *Manager) ShouldMine() (bool, error) {
|
||||
return m.context.ShouldMine()
|
||||
}
|
||||
|
||||
// IsIBDRunning returns true if IBD is currently marked as running
|
||||
func (m *Manager) IsIBDRunning() bool {
|
||||
return m.context.IsIBDRunning()
|
||||
|
@ -3,7 +3,7 @@ package protocol
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
|
||||
v4 "github.com/kaspanet/kaspad/app/protocol/flows/v4"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
@ -23,7 +23,7 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
// errChan is used by the flow goroutines to return to runFlows when an error occurs.
|
||||
// They are both initialized here and passed to register flows.
|
||||
isStopping := uint32(0)
|
||||
errChan := make(chan error)
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
receiveVersionRoute, sendVersionRoute, receiveReadyRoute := registerHandshakeRoutes(router)
|
||||
|
||||
@ -76,8 +76,8 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
var flows []*common.Flow
|
||||
log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion())
|
||||
switch peer.ProtocolVersion() {
|
||||
case 4:
|
||||
flows = v4.Register(m, router, errChan, &isStopping)
|
||||
case 5:
|
||||
flows = v5.Register(m, router, errChan, &isStopping)
|
||||
default:
|
||||
panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion()))
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Manager is an RPC manager
|
||||
@ -28,6 +29,7 @@ func NewManager(
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
utxoIndex *utxoindex.UTXOIndex,
|
||||
consensusEventsChan chan externalapi.ConsensusEvent,
|
||||
shutDownChan chan<- struct{}) *Manager {
|
||||
|
||||
manager := Manager{
|
||||
@ -44,50 +46,90 @@ func NewManager(
|
||||
}
|
||||
netAdapter.SetRPCRouterInitializer(manager.routerInitializer)
|
||||
|
||||
manager.initConsensusEventsHandler(consensusEventsChan)
|
||||
|
||||
return &manager
|
||||
}
|
||||
|
||||
// NotifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
|
||||
func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
|
||||
func (m *Manager) initConsensusEventsHandler(consensusEventsChan chan externalapi.ConsensusEvent) {
|
||||
spawn("consensusEventsHandler", func() {
|
||||
for {
|
||||
consensusEvent, ok := <-consensusEventsChan
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
switch event := consensusEvent.(type) {
|
||||
case *externalapi.VirtualChangeSet:
|
||||
err := m.notifyVirtualChange(event)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
case *externalapi.BlockAdded:
|
||||
err := m.notifyBlockAddedToDAG(event.Block)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
default:
|
||||
panic(errors.Errorf("Got event of unsupported type %T", consensusEvent))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// notifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
|
||||
func (m *Manager) notifyBlockAddedToDAG(block *externalapi.DomainBlock) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.notifyBlockAddedToDAG")
|
||||
defer onEnd()
|
||||
|
||||
err := m.NotifyVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
// Before converting the block and populating it, we check if any listeners are interested.
|
||||
// This is done since most nodes do not use this event.
|
||||
if !m.context.NotificationManager.HasBlockAddedListeners() {
|
||||
return nil
|
||||
}
|
||||
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
|
||||
err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
|
||||
err := m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
|
||||
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||
err = m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyVirtualChange notifies the manager that the virtual block has been changed.
|
||||
func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
|
||||
// notifyVirtualChange notifies the manager that the virtual block has been changed.
|
||||
func (m *Manager) notifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualChange")
|
||||
defer onEnd()
|
||||
|
||||
if m.context.Config.UTXOIndex {
|
||||
if m.context.Config.UTXOIndex && virtualChangeSet.VirtualUTXODiff != nil {
|
||||
err := m.notifyUTXOsChanged(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := m.notifyVirtualSelectedParentBlueScoreChanged()
|
||||
err := m.notifyVirtualSelectedParentBlueScoreChanged(virtualChangeSet.VirtualSelectedParentBlueScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.notifyVirtualDaaScoreChanged()
|
||||
err = m.notifyVirtualDaaScoreChanged(virtualChangeSet.VirtualDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if virtualChangeSet.VirtualSelectedParentChainChanges == nil ||
|
||||
(len(virtualChangeSet.VirtualSelectedParentChainChanges.Added) == 0 &&
|
||||
len(virtualChangeSet.VirtualSelectedParentChainChanges.Removed) == 0) {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err = m.notifyVirtualSelectedParentChainChanged(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -96,6 +138,13 @@ func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChang
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplate notifies the manager that a new
|
||||
// block template is available for miners
|
||||
func (m *Manager) NotifyNewBlockTemplate() error {
|
||||
notification := appmessage.NewNewBlockTemplateNotificationMessage()
|
||||
return m.context.NotificationManager.NotifyNewBlockTemplate(notification)
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
|
||||
// resets due to pruning point change via IBD.
|
||||
func (m *Manager) NotifyPruningPointUTXOSetOverride() error {
|
||||
@ -138,6 +187,7 @@ func (m *Manager) notifyUTXOsChanged(virtualChangeSet *externalapi.VirtualChange
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.context.NotificationManager.NotifyUTXOsChanged(utxoIndexChanges)
|
||||
}
|
||||
|
||||
@ -153,33 +203,18 @@ func (m *Manager) notifyPruningPointUTXOSetOverride() error {
|
||||
return m.context.NotificationManager.NotifyPruningPointUTXOSetOverride()
|
||||
}
|
||||
|
||||
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged() error {
|
||||
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged(virtualSelectedParentBlueScore uint64) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentBlueScoreChanged")
|
||||
defer onEnd()
|
||||
|
||||
virtualSelectedParent, err := m.context.Domain.Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockInfo, err := m.context.Domain.Consensus().GetBlockInfo(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(blockInfo.BlueScore)
|
||||
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(virtualSelectedParentBlueScore)
|
||||
return m.context.NotificationManager.NotifyVirtualSelectedParentBlueScoreChanged(notification)
|
||||
}
|
||||
|
||||
func (m *Manager) notifyVirtualDaaScoreChanged() error {
|
||||
func (m *Manager) notifyVirtualDaaScoreChanged(virtualDAAScore uint64) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualDaaScoreChanged")
|
||||
defer onEnd()
|
||||
|
||||
virtualDAAScore, err := m.context.Domain.Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notification := appmessage.NewVirtualDaaScoreChangedNotificationMessage(virtualDAAScore)
|
||||
return m.context.NotificationManager.NotifyVirtualDaaScoreChanged(notification)
|
||||
}
|
||||
@ -188,10 +223,16 @@ func (m *Manager) notifyVirtualSelectedParentChainChanged(virtualChangeSet *exte
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged")
|
||||
defer onEnd()
|
||||
|
||||
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
virtualChangeSet.VirtualSelectedParentChainChanges)
|
||||
if err != nil {
|
||||
return err
|
||||
hasListeners, includeAcceptedTransactionIDs := m.context.NotificationManager.HasListenersThatPropagateVirtualSelectedParentChainChanged()
|
||||
|
||||
if hasListeners {
|
||||
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
virtualChangeSet.VirtualSelectedParentChainChanges, includeAcceptedTransactionIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification)
|
||||
}
|
||||
return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -48,6 +48,9 @@ var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
|
||||
appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond,
|
||||
appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged,
|
||||
appmessage.CmdNotifyNewBlockTemplateRequestMessage: rpchandlers.HandleNotifyNewBlockTemplate,
|
||||
appmessage.CmdGetCoinSupplyRequestMessage: rpchandlers.HandleGetCoinSupply,
|
||||
appmessage.CmdGetMempoolEntriesByAddressesRequestMessage: rpchandlers.HandleGetMempoolEntriesByAddresses,
|
||||
}
|
||||
|
||||
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {
|
||||
|
@ -3,12 +3,14 @@ package rpccontext
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
)
|
||||
|
||||
// ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage converts
|
||||
// VirtualSelectedParentChainChanges to VirtualSelectedParentChainChangedNotificationMessage
|
||||
func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
selectedParentChainChanges *externalapi.SelectedChainPath) (*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
|
||||
selectedParentChainChanges *externalapi.SelectedChainPath, includeAcceptedTransactionIDs bool) (
|
||||
*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
|
||||
|
||||
removedChainBlockHashes := make([]string, len(selectedParentChainChanges.Removed))
|
||||
for i, removed := range selectedParentChainChanges.Removed {
|
||||
@ -20,5 +22,58 @@ func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotifi
|
||||
addedChainBlocks[i] = added.String()
|
||||
}
|
||||
|
||||
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes, addedChainBlocks), nil
|
||||
var acceptedTransactionIDs []*appmessage.AcceptedTransactionIDs
|
||||
if includeAcceptedTransactionIDs {
|
||||
var err error
|
||||
acceptedTransactionIDs, err = ctx.getAndConvertAcceptedTransactionIDs(selectedParentChainChanges)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(
|
||||
removedChainBlockHashes, addedChainBlocks, acceptedTransactionIDs), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) getAndConvertAcceptedTransactionIDs(selectedParentChainChanges *externalapi.SelectedChainPath) (
|
||||
[]*appmessage.AcceptedTransactionIDs, error) {
|
||||
|
||||
acceptedTransactionIDs := make([]*appmessage.AcceptedTransactionIDs, len(selectedParentChainChanges.Added))
|
||||
|
||||
const chunk = 1000
|
||||
position := 0
|
||||
|
||||
for position < len(selectedParentChainChanges.Added) {
|
||||
var chainBlocksChunk []*externalapi.DomainHash
|
||||
if position+chunk > len(selectedParentChainChanges.Added) {
|
||||
chainBlocksChunk = selectedParentChainChanges.Added[position:]
|
||||
} else {
|
||||
chainBlocksChunk = selectedParentChainChanges.Added[position : position+chunk]
|
||||
}
|
||||
// We use chunks in order to avoid blocking consensus for too long
|
||||
chainBlocksAcceptanceData, err := ctx.Domain.Consensus().GetBlocksAcceptanceData(chainBlocksChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, addedChainBlock := range chainBlocksChunk {
|
||||
chainBlockAcceptanceData := chainBlocksAcceptanceData[i]
|
||||
acceptedTransactionIDs[position+i] = &appmessage.AcceptedTransactionIDs{
|
||||
AcceptingBlockHash: addedChainBlock.String(),
|
||||
AcceptedTransactionIDs: nil,
|
||||
}
|
||||
for _, blockAcceptanceData := range chainBlockAcceptanceData {
|
||||
for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData {
|
||||
if transactionAcceptanceData.IsAccepted {
|
||||
acceptedTransactionIDs[position+i].AcceptedTransactionIDs =
|
||||
append(acceptedTransactionIDs[position+i].AcceptedTransactionIDs,
|
||||
consensushashing.TransactionID(transactionAcceptanceData.Transaction).String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
position += chunk
|
||||
}
|
||||
|
||||
return acceptedTransactionIDs, nil
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ func NewContext(cfg *config.Config,
|
||||
UTXOIndex: utxoIndex,
|
||||
ShutDownChan: shutDownChan,
|
||||
}
|
||||
context.NotificationManager = NewNotificationManager()
|
||||
context.NotificationManager = NewNotificationManager(cfg.ActiveNetParams)
|
||||
|
||||
return context
|
||||
}
|
||||
|
@ -3,6 +3,11 @@ package rpccontext
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
@ -13,6 +18,7 @@ import (
|
||||
type NotificationManager struct {
|
||||
sync.RWMutex
|
||||
listeners map[*routerpkg.Router]*NotificationListener
|
||||
params *dagconfig.Params
|
||||
}
|
||||
|
||||
// UTXOsChangedNotificationAddress represents a kaspad address.
|
||||
@ -24,6 +30,8 @@ type UTXOsChangedNotificationAddress struct {
|
||||
|
||||
// NotificationListener represents a registered RPC notification listener
|
||||
type NotificationListener struct {
|
||||
params *dagconfig.Params
|
||||
|
||||
propagateBlockAddedNotifications bool
|
||||
propagateVirtualSelectedParentChainChangedNotifications bool
|
||||
propagateFinalityConflictNotifications bool
|
||||
@ -32,13 +40,16 @@ type NotificationListener struct {
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications bool
|
||||
propagateVirtualDaaScoreChangedNotifications bool
|
||||
propagatePruningPointUTXOSetOverrideNotifications bool
|
||||
propagateNewBlockTemplateNotifications bool
|
||||
|
||||
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
||||
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
||||
includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications bool
|
||||
}
|
||||
|
||||
// NewNotificationManager creates a new NotificationManager
|
||||
func NewNotificationManager() *NotificationManager {
|
||||
func NewNotificationManager(params *dagconfig.Params) *NotificationManager {
|
||||
return &NotificationManager{
|
||||
params: params,
|
||||
listeners: make(map[*routerpkg.Router]*NotificationListener),
|
||||
}
|
||||
}
|
||||
@ -48,7 +59,7 @@ func (nm *NotificationManager) AddListener(router *routerpkg.Router) {
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
listener := newNotificationListener()
|
||||
listener := newNotificationListener(nm.params)
|
||||
nm.listeners[router] = listener
|
||||
}
|
||||
|
||||
@ -72,6 +83,19 @@ func (nm *NotificationManager) Listener(router *routerpkg.Router) (*Notification
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
// HasBlockAddedListeners indicates if the notification manager has any listeners for `BlockAdded` events
|
||||
func (nm *NotificationManager) HasBlockAddedListeners() bool {
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for _, listener := range nm.listeners {
|
||||
if listener.propagateBlockAddedNotifications {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NotifyBlockAdded notifies the notification manager that a block has been added to the DAG
|
||||
func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAddedNotificationMessage) error {
|
||||
nm.RLock()
|
||||
@ -79,10 +103,8 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateBlockAddedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
log.Warnf("Couldn't send notification: %s", err)
|
||||
} else if err != nil {
|
||||
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -91,13 +113,27 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
|
||||
}
|
||||
|
||||
// NotifyVirtualSelectedParentChainChanged notifies the notification manager that the DAG's selected parent chain has changed
|
||||
func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
|
||||
func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(
|
||||
notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
notificationWithoutAcceptedTransactionIDs := &appmessage.VirtualSelectedParentChainChangedNotificationMessage{
|
||||
RemovedChainBlockHashes: notification.RemovedChainBlockHashes,
|
||||
AddedChainBlockHashes: notification.AddedChainBlockHashes,
|
||||
}
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateVirtualSelectedParentChainChangedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
var err error
|
||||
|
||||
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
|
||||
err = router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
} else {
|
||||
err = router.OutgoingRoute().MaybeEnqueue(notificationWithoutAcceptedTransactionIDs)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -106,6 +142,31 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(notificat
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasListenersThatPropagateVirtualSelectedParentChainChanged returns whether there's any listener that is
|
||||
// subscribed to VirtualSelectedParentChainChanged notifications as well as checks if any such listener requested
|
||||
// to include AcceptedTransactionIDs.
|
||||
func (nm *NotificationManager) HasListenersThatPropagateVirtualSelectedParentChainChanged() (hasListeners, hasListenersThatRequireAcceptedTransactionIDs bool) {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
hasListeners = false
|
||||
hasListenersThatRequireAcceptedTransactionIDs = false
|
||||
|
||||
for _, listener := range nm.listeners {
|
||||
if listener.propagateVirtualSelectedParentChainChangedNotifications {
|
||||
hasListeners = true
|
||||
// Generating acceptedTransactionIDs is a heavy operation, so we check if it's needed by any listener.
|
||||
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
|
||||
hasListenersThatRequireAcceptedTransactionIDs = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hasListeners, hasListenersThatRequireAcceptedTransactionIDs
|
||||
}
|
||||
|
||||
// NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG
|
||||
func (nm *NotificationManager) NotifyFinalityConflict(notification *appmessage.FinalityConflictNotificationMessage) error {
|
||||
nm.RLock()
|
||||
@ -146,7 +207,10 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateUTXOsChangedNotifications {
|
||||
// Filter utxoChanges and create a notification
|
||||
notification := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges)
|
||||
notification, err := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Don't send the notification if it's empty
|
||||
if len(notification.Added) == 0 && len(notification.Removed) == 0 {
|
||||
@ -154,7 +218,7 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
|
||||
}
|
||||
|
||||
// Enqueue the notification
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
err = router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -173,7 +237,7 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentBlueScoreChanged(
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateVirtualSelectedParentBlueScoreChangedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -192,6 +256,25 @@ func (nm *NotificationManager) NotifyVirtualDaaScoreChanged(
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateVirtualDaaScoreChangedNotifications {
|
||||
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplate notifies the notification manager that a new
|
||||
// block template is available for miners
|
||||
func (nm *NotificationManager) NotifyNewBlockTemplate(
|
||||
notification *appmessage.NewBlockTemplateNotificationMessage) error {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateNewBlockTemplateNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -218,18 +301,27 @@ func (nm *NotificationManager) NotifyPruningPointUTXOSetOverride() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newNotificationListener() *NotificationListener {
|
||||
func newNotificationListener(params *dagconfig.Params) *NotificationListener {
|
||||
return &NotificationListener{
|
||||
params: params,
|
||||
|
||||
propagateBlockAddedNotifications: false,
|
||||
propagateVirtualSelectedParentChainChangedNotifications: false,
|
||||
propagateFinalityConflictNotifications: false,
|
||||
propagateFinalityConflictResolvedNotifications: false,
|
||||
propagateUTXOsChangedNotifications: false,
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications: false,
|
||||
propagateNewBlockTemplateNotifications: false,
|
||||
propagatePruningPointUTXOSetOverrideNotifications: false,
|
||||
}
|
||||
}
|
||||
|
||||
// IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications returns true if this listener
|
||||
// includes accepted transaction IDs in it's virtual-selected-parent-chain-changed notifications
|
||||
func (nl *NotificationListener) IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications() bool {
|
||||
return nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications
|
||||
}
|
||||
|
||||
// PropagateBlockAddedNotifications instructs the listener to send block added notifications
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateBlockAddedNotifications() {
|
||||
@ -238,8 +330,9 @@ func (nl *NotificationListener) PropagateBlockAddedNotifications() {
|
||||
|
||||
// PropagateVirtualSelectedParentChainChangedNotifications instructs the listener to send chain changed notifications
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications() {
|
||||
func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications(includeAcceptedTransactionIDs bool) {
|
||||
nl.propagateVirtualSelectedParentChainChangedNotifications = true
|
||||
nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications = includeAcceptedTransactionIDs
|
||||
}
|
||||
|
||||
// PropagateFinalityConflictNotifications instructs the listener to send finality conflict notifications
|
||||
@ -258,7 +351,11 @@ func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications()
|
||||
// to the remote listener for the given addresses. Subsequent calls instruct the listener to
|
||||
// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses
|
||||
// are ignored.
|
||||
func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
func (nm *NotificationManager) PropagateUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
|
||||
// Apply a write-lock since the internal listener address map is modified
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
nl.propagateUTXOsChangedNotifications = true
|
||||
nl.propagateUTXOsChangedNotificationAddresses =
|
||||
@ -273,7 +370,11 @@ func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*
|
||||
// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs
|
||||
// changed notifications to the remote listener for the given addresses. Addresses for which
|
||||
// notifications are not currently sent are ignored.
|
||||
func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
func (nm *NotificationManager) StopPropagatingUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
|
||||
// Apply a write-lock since the internal listener address map is modified
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
return
|
||||
}
|
||||
@ -284,7 +385,7 @@ func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(address
|
||||
}
|
||||
|
||||
func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
||||
utxoChanges *utxoindex.UTXOChanges) *appmessage.UTXOsChangedNotificationMessage {
|
||||
utxoChanges *utxoindex.UTXOChanges) (*appmessage.UTXOsChangedNotificationMessage, error) {
|
||||
|
||||
// As an optimization, we iterate over the smaller set (O(n)) among the two below
|
||||
// and check existence over the larger set (O(1))
|
||||
@ -299,27 +400,64 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
for scriptPublicKeyString, removedOutpoints := range utxoChanges.Removed {
|
||||
for scriptPublicKeyString, removedPairs := range utxoChanges.Removed {
|
||||
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
} else if addressesSize > 0 {
|
||||
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
|
||||
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
|
||||
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
||||
if removedPairs, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for scriptPublicKeyString, addedPairs := range utxoChanges.Added {
|
||||
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, addedPairs)
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
for scriptPublicKeyString, removedPAirs := range utxoChanges.Removed {
|
||||
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, removedPAirs)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
|
||||
return notification
|
||||
return notification, nil
|
||||
}
|
||||
|
||||
func (nl *NotificationListener) scriptPubKeyStringToAddressString(scriptPublicKeyString utxoindex.ScriptPublicKeyString) (string, error) {
|
||||
scriptPubKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString))
|
||||
|
||||
// ignore error because it is often returned when the script is of unknown type
|
||||
scriptType, address, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey, nl.params)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var addressString string
|
||||
if scriptType == txscript.NonStandardTy {
|
||||
addressString = ""
|
||||
} else {
|
||||
addressString = address.String()
|
||||
}
|
||||
return addressString, nil
|
||||
}
|
||||
|
||||
// PropagateVirtualSelectedParentBlueScoreChangedNotifications instructs the listener to send
|
||||
@ -334,6 +472,12 @@ func (nl *NotificationListener) PropagateVirtualDaaScoreChangedNotifications() {
|
||||
nl.propagateVirtualDaaScoreChangedNotifications = true
|
||||
}
|
||||
|
||||
// PropagateNewBlockTemplateNotifications instructs the listener to send
|
||||
// new block template notifications to the remote listener
|
||||
func (nl *NotificationListener) PropagateNewBlockTemplateNotifications() {
|
||||
nl.propagateNewBlockTemplateNotifications = true
|
||||
}
|
||||
|
||||
// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications
|
||||
// to the remote listener.
|
||||
func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() {
|
||||
|
@ -32,22 +32,6 @@ func ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(address string, pair
|
||||
return utxosByAddressesEntries
|
||||
}
|
||||
|
||||
// convertUTXOOutpointsToUTXOsByAddressesEntries converts
|
||||
// UTXOOutpoints to a slice of UTXOsByAddressesEntry
|
||||
func convertUTXOOutpointsToUTXOsByAddressesEntries(address string, outpoints utxoindex.UTXOOutpoints) []*appmessage.UTXOsByAddressesEntry {
|
||||
utxosByAddressesEntries := make([]*appmessage.UTXOsByAddressesEntry, 0, len(outpoints))
|
||||
for outpoint := range outpoints {
|
||||
utxosByAddressesEntries = append(utxosByAddressesEntries, &appmessage.UTXOsByAddressesEntry{
|
||||
Address: address,
|
||||
Outpoint: &appmessage.RPCOutpoint{
|
||||
TransactionID: outpoint.TransactionID.String(),
|
||||
Index: outpoint.Index,
|
||||
},
|
||||
})
|
||||
}
|
||||
return utxosByAddressesEntries
|
||||
}
|
||||
|
||||
// ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings
|
||||
// to UTXOsChangedNotificationAddresses
|
||||
func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
|
||||
@ -63,7 +47,7 @@ func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||
}
|
||||
scriptPublicKeyString := utxoindex.ConvertScriptPublicKeyToString(scriptPublicKey)
|
||||
scriptPublicKeyString := utxoindex.ScriptPublicKeyString(scriptPublicKey.String())
|
||||
addresses[i] = &UTXOsChangedNotificationAddress{
|
||||
Address: addressString,
|
||||
ScriptPublicKeyString: scriptPublicKeyString,
|
||||
|
@ -81,10 +81,6 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
|
||||
block.VerboseData.SelectedParentHash = blockInfo.SelectedParent.String()
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the block if we didn't receive it previously
|
||||
if domainBlock == nil {
|
||||
domainBlock, err = ctx.Domain.Consensus().GetBlockEvenIfHeaderOnly(blockHash)
|
||||
@ -93,6 +89,10 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
|
||||
}
|
||||
}
|
||||
|
||||
if len(domainBlock.Transactions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
transactionIDs := make([]string, len(domainBlock.Transactions))
|
||||
for i, transaction := range domainBlock.Transactions {
|
||||
transactionIDs[i] = consensushashing.TransactionID(transaction).String()
|
||||
@ -122,6 +122,7 @@ func (ctx *Context) PopulateTransactionWithVerboseData(
|
||||
}
|
||||
|
||||
ctx.Domain.Consensus().PopulateMass(domainTransaction)
|
||||
|
||||
transaction.VerboseData = &appmessage.RPCTransactionVerboseData{
|
||||
TransactionID: consensushashing.TransactionID(domainTransaction).String(),
|
||||
Hash: consensushashing.TransactionHash(domainTransaction).String(),
|
||||
|
@ -9,6 +9,14 @@ import (
|
||||
|
||||
// HandleAddPeer handles the respectively named RPC command
|
||||
func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("AddPeer RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := appmessage.NewAddPeerResponseMessage()
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("AddPeer RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
AddPeerRequest := request.(*appmessage.AddPeerRequestMessage)
|
||||
address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort)
|
||||
if err != nil {
|
||||
|
@ -9,6 +9,14 @@ import (
|
||||
|
||||
// HandleBan handles the respectively named RPC command
|
||||
func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("Ban RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := appmessage.NewBanResponseMessage()
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("Ban RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
banRequest := request.(*appmessage.BanRequestMessage)
|
||||
ip := net.ParseIP(banRequest.IP)
|
||||
if ip == nil {
|
||||
|
@ -27,6 +27,27 @@ func HandleEstimateNetworkHashesPerSecond(
|
||||
}
|
||||
}
|
||||
|
||||
if context.Config.SafeRPC {
|
||||
const windowSizeLimit = 10000
|
||||
if windowSize > windowSizeLimit {
|
||||
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||
response.Error =
|
||||
appmessage.RPCErrorf(
|
||||
"Requested window size %d is larger than max allowed in RPC safe mode (%d)",
|
||||
windowSize, windowSizeLimit)
|
||||
return response, nil
|
||||
}
|
||||
}
|
||||
|
||||
if uint64(windowSize) > context.Config.ActiveNetParams.PruningDepth() {
|
||||
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||
response.Error =
|
||||
appmessage.RPCErrorf(
|
||||
"Requested window size %d is larger than pruning point depth %d",
|
||||
windowSize, context.Config.ActiveNetParams.PruningDepth())
|
||||
return response, nil
|
||||
}
|
||||
|
||||
networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize)
|
||||
if err != nil {
|
||||
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||
|
@ -22,7 +22,7 @@ func HandleGetBalanceByAddress(context *rpccontext.Context, _ *router.Router, re
|
||||
balance, err := getBalanceByAddress(context, getBalanceByAddressRequest.Address)
|
||||
if err != nil {
|
||||
rpcError := &appmessage.RPCError{}
|
||||
if !errors.As(err, rpcError) {
|
||||
if !errors.As(err, &rpcError) {
|
||||
return nil, err
|
||||
}
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
|
@ -23,7 +23,7 @@ func HandleGetBalancesByAddresses(context *rpccontext.Context, _ *router.Router,
|
||||
|
||||
if err != nil {
|
||||
rpcError := &appmessage.RPCError{}
|
||||
if !errors.As(err, rpcError) {
|
||||
if !errors.As(err, &rpcError) {
|
||||
return nil, err
|
||||
}
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
@ -16,7 +17,7 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
|
||||
payAddress, err := util.DecodeAddress(getBlockTemplateRequest.PayAddress, context.Config.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetBlockResponseMessage{}
|
||||
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not decode address: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
@ -26,18 +27,20 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
return nil, err
|
||||
}
|
||||
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version())}
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version() + "/" + getBlockTemplateRequest.ExtraData)}
|
||||
|
||||
templateBlock, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||
templateBlock, isNearlySynced, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint64(len(templateBlock.Transactions[transactionhelper.CoinbaseTransactionIndex].Payload)) > context.Config.NetParams().MaxCoinbasePayloadLength {
|
||||
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Coinbase payload is above max length (%d). Try to shorten the extra data.", context.Config.NetParams().MaxCoinbasePayloadLength)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
|
||||
|
||||
isSynced, err := context.ProtocolManager.ShouldMine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, isSynced), nil
|
||||
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, context.ProtocolManager.Context().HasPeers() && isNearlySynced), nil
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appm
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !blockInfo.Exists {
|
||||
if !blockInfo.HasHeader() {
|
||||
return &appmessage.GetBlocksResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Could not find lowHash %s", getBlocksRequest.LowHash),
|
||||
}, nil
|
||||
|
@ -23,6 +23,10 @@ type fakeDomain struct {
|
||||
testapi.TestConsensus
|
||||
}
|
||||
|
||||
func (d fakeDomain) ConsensusEventsChannel() chan externalapi.ConsensusEvent {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d fakeDomain) DeleteStagingConsensus() error {
|
||||
panic("implement me")
|
||||
}
|
||||
@ -31,7 +35,7 @@ func (d fakeDomain) StagingConsensus() externalapi.Consensus {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d fakeDomain) InitStagingConsensus() error {
|
||||
func (d fakeDomain) InitStagingConsensusWithoutGenesis() error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
29
app/rpc/rpchandlers/get_coin_supply.go
Normal file
29
app/rpc/rpchandlers/get_coin_supply.go
Normal file
@ -0,0 +1,29 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetCoinSupply handles the respectively named RPC command
|
||||
func HandleGetCoinSupply(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
if !context.Config.UTXOIndex {
|
||||
errorMessage := &appmessage.GetCoinSupplyResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
circulatingSompiSupply, err := context.UTXOIndex.GetCirculatingSompiSupply()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := appmessage.NewGetCoinSupplyResponseMessage(
|
||||
constants.MaxSompi,
|
||||
circulatingSompiSupply,
|
||||
)
|
||||
|
||||
return response, nil
|
||||
}
|
@ -9,10 +9,17 @@ import (
|
||||
|
||||
// HandleGetInfo handles the respectively named RPC command
|
||||
func HandleGetInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
isNearlySynced, err := context.Domain.Consensus().IsNearlySynced()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := appmessage.NewGetInfoResponseMessage(
|
||||
context.NetAdapter.ID().String(),
|
||||
uint64(context.Domain.MiningManager().TransactionCount()),
|
||||
uint64(context.Domain.MiningManager().TransactionCount(true, false)),
|
||||
version.Version(),
|
||||
context.Config.UTXOIndex,
|
||||
context.ProtocolManager.Context().HasPeers() && isNearlySynced,
|
||||
)
|
||||
|
||||
return response, nil
|
||||
|
@ -7,19 +7,40 @@ import (
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntries handles the respectively named RPC command
|
||||
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
transactions := context.Domain.MiningManager().AllTransactions()
|
||||
entries := make([]*appmessage.MempoolEntry, 0, len(transactions))
|
||||
for _, transaction := range transactions {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
getMempoolEntriesRequest := request.(*appmessage.GetMempoolEntriesRequestMessage)
|
||||
|
||||
entries := make([]*appmessage.MempoolEntry, 0)
|
||||
|
||||
transactionPoolTransactions, orphanPoolTransactions := context.Domain.MiningManager().AllTransactions(!getMempoolEntriesRequest.FilterTransactionPool, getMempoolEntriesRequest.IncludeOrphanPool)
|
||||
|
||||
if !getMempoolEntriesRequest.FilterTransactionPool {
|
||||
for _, transaction := range transactionPoolTransactions {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
if getMempoolEntriesRequest.IncludeOrphanPool {
|
||||
for _, transaction := range orphanPoolTransactions {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: true,
|
||||
})
|
||||
}
|
||||
entries = append(entries, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
})
|
||||
}
|
||||
|
||||
return appmessage.NewGetMempoolEntriesResponseMessage(entries), nil
|
||||
|
122
app/rpc/rpchandlers/get_mempool_entries_by_addresses.go
Normal file
122
app/rpc/rpchandlers/get_mempool_entries_by_addresses.go
Normal file
@ -0,0 +1,122 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntriesByAddresses handles the respectively named RPC command
|
||||
func HandleGetMempoolEntriesByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
getMempoolEntriesByAddressesRequest := request.(*appmessage.GetMempoolEntriesByAddressesRequestMessage)
|
||||
|
||||
mempoolEntriesByAddresses := make([]*appmessage.MempoolEntryByAddress, 0)
|
||||
|
||||
sendingInTransactionPool, receivingInTransactionPool, sendingInOrphanPool, receivingInOrphanPool, err := context.Domain.MiningManager().GetTransactionsByAddresses(!getMempoolEntriesByAddressesRequest.FilterTransactionPool, getMempoolEntriesByAddressesRequest.IncludeOrphanPool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, addressString := range getMempoolEntriesByAddressesRequest.Addresses {
|
||||
|
||||
address, err := util.DecodeAddress(addressString, context.Config.NetParams().Prefix)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not decode address '%s': %s", addressString, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
sending := make([]*appmessage.MempoolEntry, 0)
|
||||
receiving := make([]*appmessage.MempoolEntry, 0)
|
||||
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not extract scriptPublicKey from address '%s': %s", addressString, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
if !getMempoolEntriesByAddressesRequest.FilterTransactionPool {
|
||||
|
||||
if transaction, found := sendingInTransactionPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sending = append(sending, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if transaction, found := receivingInTransactionPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
receiving = append(receiving, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
if getMempoolEntriesByAddressesRequest.IncludeOrphanPool {
|
||||
|
||||
if transaction, found := sendingInOrphanPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sending = append(sending, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: true,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if transaction, found := receivingInOrphanPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
receiving = append(receiving, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: true,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(sending) > 0 || len(receiving) > 0 {
|
||||
mempoolEntriesByAddresses = append(
|
||||
mempoolEntriesByAddresses,
|
||||
&appmessage.MempoolEntryByAddress{
|
||||
Address: address.String(),
|
||||
Sending: sending,
|
||||
Receiving: receiving,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return appmessage.NewGetMempoolEntriesByAddressesResponseMessage(mempoolEntriesByAddresses), nil
|
||||
}
|
@ -3,12 +3,18 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntry handles the respectively named RPC command
|
||||
func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
transaction := &externalapi.DomainTransaction{}
|
||||
var found bool
|
||||
var isOrphan bool
|
||||
|
||||
getMempoolEntryRequest := request.(*appmessage.GetMempoolEntryRequestMessage)
|
||||
|
||||
transactionID, err := transactionid.FromString(getMempoolEntryRequest.TxID)
|
||||
@ -18,17 +24,18 @@ func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, reques
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
transaction, ok := context.Domain.MiningManager().GetTransaction(transactionID)
|
||||
if !ok {
|
||||
mempoolTransaction, isOrphan, found := context.Domain.MiningManager().GetTransaction(transactionID, !getMempoolEntryRequest.FilterTransactionPool, getMempoolEntryRequest.IncludeOrphanPool)
|
||||
|
||||
if !found {
|
||||
errorMessage := &appmessage.GetMempoolEntryResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Transaction %s was not found", transactionID)
|
||||
return errorMessage, nil
|
||||
}
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(mempoolTransaction)
|
||||
err = context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction), nil
|
||||
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction, isOrphan), nil
|
||||
}
|
||||
|
@ -26,12 +26,14 @@ func HandleGetVirtualSelectedParentChainFromBlock(context *rpccontext.Context, _
|
||||
return response, nil
|
||||
}
|
||||
|
||||
chainChangedNotification, err := context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(virtualSelectedParentChain)
|
||||
chainChangedNotification, err := context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
virtualSelectedParentChain, getVirtualSelectedParentChainFromBlockRequest.IncludeAcceptedTransactionIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := appmessage.NewGetVirtualSelectedParentChainFromBlockResponseMessage(
|
||||
chainChangedNotification.RemovedChainBlockHashes, chainChangedNotification.AddedChainBlockHashes)
|
||||
chainChangedNotification.RemovedChainBlockHashes, chainChangedNotification.AddedChainBlockHashes,
|
||||
chainChangedNotification.AcceptedTransactionIDs)
|
||||
return response, nil
|
||||
}
|
||||
|
19
app/rpc/rpchandlers/notify_new_block_template.go
Normal file
19
app/rpc/rpchandlers/notify_new_block_template.go
Normal file
@ -0,0 +1,19 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleNotifyNewBlockTemplate handles the respectively named RPC command
|
||||
func HandleNotifyNewBlockTemplate(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateNewBlockTemplateNotifications()
|
||||
|
||||
response := appmessage.NewNotifyNewBlockTemplateResponseMessage()
|
||||
return response, nil
|
||||
}
|
@ -26,7 +26,7 @@ func HandleNotifyUTXOsChanged(context *rpccontext.Context, router *router.Router
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateUTXOsChangedNotifications(addresses)
|
||||
context.NotificationManager.PropagateUTXOsChangedNotifications(listener, addresses)
|
||||
|
||||
response := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
return response, nil
|
||||
|
@ -7,12 +7,17 @@ import (
|
||||
)
|
||||
|
||||
// HandleNotifyVirtualSelectedParentChainChanged handles the respectively named RPC command
|
||||
func HandleNotifyVirtualSelectedParentChainChanged(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
func HandleNotifyVirtualSelectedParentChainChanged(context *rpccontext.Context, router *router.Router,
|
||||
request appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
notifyVirtualSelectedParentChainChangedRequest := request.(*appmessage.NotifyVirtualSelectedParentChainChangedRequestMessage)
|
||||
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateVirtualSelectedParentChainChangedNotifications()
|
||||
listener.PropagateVirtualSelectedParentChainChangedNotifications(
|
||||
notifyVirtualSelectedParentChainChangedRequest.IncludeAcceptedTransactionIDs)
|
||||
|
||||
response := appmessage.NewNotifyVirtualSelectedParentChainChangedResponseMessage()
|
||||
return response, nil
|
||||
|
@ -8,6 +8,14 @@ import (
|
||||
|
||||
// HandleResolveFinalityConflict handles the respectively named RPC command
|
||||
func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("ResolveFinalityConflict RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := &appmessage.ResolveFinalityConflictResponseMessage{}
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("ResolveFinalityConflict RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
response := &appmessage.ResolveFinalityConflictResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
|
@ -12,6 +12,14 @@ const pauseBeforeShutDown = time.Second
|
||||
|
||||
// HandleShutDown handles the respectively named RPC command
|
||||
func HandleShutDown(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("ShutDown RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := appmessage.NewShutDownResponseMessage()
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("ShutDown RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
log.Warn("ShutDown RPC called.")
|
||||
|
||||
// Wait a second before shutting down, to allow time to return the response to the caller
|
||||
|
@ -26,7 +26,7 @@ func HandleStopNotifyingUTXOsChanged(context *rpccontext.Context, router *router
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.StopPropagatingUTXOsChangedNotifications(addresses)
|
||||
context.NotificationManager.StopPropagatingUTXOsChangedNotifications(listener, addresses)
|
||||
|
||||
response := appmessage.NewStopNotifyingUTXOsChangedResponseMessage()
|
||||
return response, nil
|
||||
|
@ -1,6 +1,7 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
@ -14,9 +15,14 @@ import (
|
||||
func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage)
|
||||
|
||||
isSynced, err := context.ProtocolManager.ShouldMine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var err error
|
||||
isSynced := false
|
||||
// The node is considered synced if it has peers and consensus state is nearly synced
|
||||
if context.ProtocolManager.Context().HasPeers() {
|
||||
isSynced, err = context.ProtocolManager.Context().IsNearlySynced()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !context.Config.AllowSubmitBlockWhenNotSynced && !isSynced {
|
||||
@ -58,6 +64,12 @@ func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request ap
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jsonBytes, _ := json.MarshalIndent(submitBlockRequest.Block.Header, "", " ")
|
||||
if jsonBytes != nil {
|
||||
log.Warnf("The RPC submitted block triggered a rule/protocol error (%s), printing "+
|
||||
"the full header for debug purposes: \n%s", err, string(jsonBytes))
|
||||
}
|
||||
|
||||
return &appmessage.SubmitBlockResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Block rejected. Reason: %s", err),
|
||||
RejectReason: appmessage.RejectReasonBlockInvalid,
|
||||
|
@ -28,7 +28,8 @@ func HandleSubmitTransaction(context *rpccontext.Context, _ *router.Router, requ
|
||||
}
|
||||
|
||||
log.Debugf("Rejected transaction %s: %s", transactionID, err)
|
||||
errorMessage := &appmessage.SubmitTransactionResponseMessage{}
|
||||
// Return the ID also in the case of error, so that clients can match the response to the correct transaction submit request
|
||||
errorMessage := appmessage.NewSubmitTransactionResponseMessage(transactionID.String())
|
||||
errorMessage.Error = appmessage.RPCErrorf("Rejected transaction %s: %s", transactionID, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
@ -9,6 +9,14 @@ import (
|
||||
|
||||
// HandleUnban handles the respectively named RPC command
|
||||
func HandleUnban(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("Unban RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := appmessage.NewUnbanResponseMessage()
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("Unban RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
unbanRequest := request.(*appmessage.UnbanRequestMessage)
|
||||
ip := net.ParseIP(unbanRequest.IP)
|
||||
if ip == nil {
|
||||
|
@ -5,14 +5,10 @@ FLAGS=$@
|
||||
go version
|
||||
|
||||
go get $FLAGS -t -d ./...
|
||||
# This is to bypass a go bug: https://github.com/golang/go/issues/27643
|
||||
GO111MODULE=off go get $FLAGS golang.org/x/lint/golint \
|
||||
honnef.co/go/tools/cmd/staticcheck
|
||||
go install $FLAGS honnef.co/go/tools/cmd/staticcheck@latest
|
||||
|
||||
test -z "$(go fmt ./...)"
|
||||
|
||||
golint -set_exit_status ./...
|
||||
|
||||
staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./...
|
||||
|
||||
go build $FLAGS -o kaspad .
|
||||
|
244
changelog.txt
244
changelog.txt
@ -1,3 +1,247 @@
|
||||
Kaspad v0.12.17 - 2024-02-19
|
||||
===========================
|
||||
|
||||
* Wallet-related improvements and fixes (#2253, #2257, #2258, #2262)
|
||||
|
||||
Kaspad v0.12.16 - 2023-12-25
|
||||
===========================
|
||||
|
||||
* Adapt wallet UTXO selection to dust patch (#2254)
|
||||
|
||||
Kaspad v0.12.15 - 2023-12-16
|
||||
===========================
|
||||
|
||||
* Update ECDSA address test to use a valid public key (#2202)
|
||||
* Fix off by small amounts in sent amount kaspa (#2220)
|
||||
* Use removeRedeemers correctly by (#2235)
|
||||
* Fix windows asset building by increasing go version (#2245)
|
||||
* Added a mainnet dnsseeder (#2247)
|
||||
* Fix extract atomic swap data pushes (#2203)
|
||||
* Adapt kaspawallet to support testnet 11 (#2211)
|
||||
* Fix type detection in RemoveInvalidTransactions (#2252)
|
||||
|
||||
Kaspad v0.12.14 - 2023-09-26
|
||||
===========================
|
||||
|
||||
* Anti-spam measurements against dust attack (#2223)
|
||||
|
||||
Kaspad v0.12.13 - 2023-03-06
|
||||
===========================
|
||||
|
||||
* Bump golang.org/x/crypto from 0.0.0-20210513164829-c07d793c2f9a to 0.1.0 (#2195)
|
||||
* Bump golang.org/x/net from 0.0.0-20210405180319-a5a99cb37ef4 to 0.7.0 (#2194)
|
||||
* Avoid sending transactions with no funds (#2193)
|
||||
|
||||
Kaspad v0.12.12 - 2023-03-06
|
||||
===========================
|
||||
|
||||
* Rename last references to blockheight (#2089)
|
||||
* Add code of conduct (#2183)
|
||||
* Extend TestGetPreciseSigOps with more tests (#2188)
|
||||
* Add Dockerfile to kaspawallet (#2187)
|
||||
* Add `--send-all` to `kaspawallet send` command (#2181)
|
||||
* Bump golang.org/x/text from 0.3.5 to 0.3.8 (#2190)
|
||||
* Upgrade to go 1.19 (#2191)
|
||||
|
||||
Kaspad v0.12.11 - 2022-12-1
|
||||
===========================
|
||||
|
||||
* Fix IBD sync conditions (#2174)
|
||||
|
||||
Kaspad v0.12.10 - 2022-11-23
|
||||
===========================
|
||||
|
||||
* Increase devnet's initial difficulty (#2167)
|
||||
|
||||
Bug fixes:
|
||||
* Check rule errors when validating blocks with trusted data (#2171)
|
||||
* Compare blue score with selected tip when checking if a pruning point proof is needed (#2169)
|
||||
* Add found to GetBlock (#2165)
|
||||
|
||||
Wallet new features:
|
||||
* Use one of the From addresses as a change address (#2164)
|
||||
|
||||
Kaspad v0.12.9 - 2022-10-23
|
||||
===========================
|
||||
|
||||
* Create directory before locking lock file (#2160)
|
||||
|
||||
Kaspad v0.12.8 - 2022-10-23
|
||||
===========================
|
||||
|
||||
* Remove hard fork activation rules (#2152)
|
||||
* Add lock file to kaspawallet (#2154)
|
||||
* Add a new testnet DNS seeder (#2156)
|
||||
* Use utxo diff algo for pruning point move and use acceptance data method only as a fall-back (#2157)
|
||||
* Make more checks if status is invalid even if the block exists (#2158)
|
||||
|
||||
|
||||
Kaspad v0.12.7 - 2022-09-21
|
||||
===========================
|
||||
|
||||
* Security Fix + Hard fork - Full details can be seen here: https://medium.com/@michaelsuttonil/kaspa-security-patch-and-hard-fork-september-2022-12da617b0094
|
||||
|
||||
Kaspad v0.12.6 - 2022-09-09
|
||||
===========================
|
||||
|
||||
* Remove tests from docker files (#2133)
|
||||
|
||||
Wallet new features:
|
||||
* Optionally show serialized transactions on send (#2135)
|
||||
|
||||
Bug fixes:
|
||||
* Update virtual on IBD if nearly synced (#2134)
|
||||
|
||||
Kaspad v0.12.5 - 2022-08-28
|
||||
===========================
|
||||
|
||||
* Add tests for hash writers (#2120)
|
||||
* Replace daglabs's dnsseeder with Wolfie's (#2119)
|
||||
* Change testnet dnsseeder (#2126)
|
||||
* Add RPC timeout parameter to wallet daemon (#2104)
|
||||
|
||||
Wallet new features:
|
||||
* Add UseExistingChangeAddress option to the wallet (#2127)
|
||||
|
||||
Bug fixes:
|
||||
* Call update pruning point if required on resolve virtual or startup (#2129)
|
||||
* Add missing locks to notification listener modifications (#2124)
|
||||
* Calculate pruning point utxo set from acceptance data (#2123)
|
||||
* Fix RPC client memory/goroutine leak (#2122)
|
||||
* Fix a subtle lock sync issue in consensus insert block (#2121)
|
||||
* Mempool: Retrieve stable state of the mempool. Optimze get mempool entries by addresses (#2111)
|
||||
* Kaspawallet.send(): Make separate context for Broadcast, to prolong timeout (#2131)
|
||||
|
||||
|
||||
|
||||
Kaspad v0.12.4 - 2022-07-17
|
||||
===========================
|
||||
|
||||
* Crucial fix for the UTXO difference mechanism (#2114)
|
||||
* Implement multi-layer auto-compound (#2115)
|
||||
|
||||
Kaspad v0.12.3 - 2022-06-29
|
||||
===========================
|
||||
|
||||
* Fixes a few bugs which can lead to node crashes or out-of-memory errors
|
||||
|
||||
Kaspad v0.12.2 - 2022-06-17
|
||||
===========================
|
||||
|
||||
* Clarify wallet message concerning a wallet daemon sync state (#2045)
|
||||
* Change the way the miner executable reports execution errors (closes issue #1677) (#2048)
|
||||
* Fix kaspawallet help messages, clarify sweep command help string (#2067)
|
||||
* Wallet parse/send/create commands improvement (#2024)
|
||||
* Use chunks for `GetBlocksAcceptanceData` calls in order to avoid blocking consensus for too long (#2075)
|
||||
* Unite multiple `GetBlockAcceptanceData` consensus calls to one (#2074)
|
||||
* Update many-small-chains-and-one-big-chain DAG to not fail merge depth limit (#2072)
|
||||
|
||||
RPC API Changes:
|
||||
* RPC: include orphans into mempool entries (#2046)
|
||||
* RPC & UtxoIndex: keep track of, query and test circulating supply. (#2070)
|
||||
|
||||
Bug Fixes:
|
||||
* Fix RPC connections counting (#2026)
|
||||
* Fix UTXO diff child error (#2084)
|
||||
* Fix `not in selected chain` crash (#2082)
|
||||
|
||||
Kaspad v0.12.1 - 2022-05-31
|
||||
===========================
|
||||
|
||||
* Fix utxoindex synchronization bug which resulted in kaspawallet orphan tx errors (#2052, #2056, #2059)
|
||||
* Add a channel mechanism for consensus events to be processed in the order they were produced (#2052, #2056, #2059)
|
||||
* Block template cache improvement (#2023)
|
||||
* Improved staging shard performance (#2034)
|
||||
* Add finality check to ResolveVirtual (#2041)
|
||||
* Update Dockerfile for go 1.18 (#2038)
|
||||
* Remove HF1 activation code (#2042)
|
||||
|
||||
Kaspa wallet:
|
||||
* Various kaspawallet text fixes and log additions (#2032, #2047, #2062)
|
||||
* Wallet address synchronization improvement (#2025)
|
||||
* Add support for `from` address in `kaspawallet send` (#1964)
|
||||
* Make kaspawallet ignore outputs that exist in the mempool (#2053)
|
||||
* Wrap the entire wallet send operation with a lock (#2063)
|
||||
|
||||
RPC API:
|
||||
* Add "GetMempoolEntriesByAddresses" to kaspad RPC (#2022)
|
||||
* Make sure RPCErrors are returned and do not crash the system (#2039)
|
||||
* Add AcceptedTransactionIDs to ChainChanged notification and VirtualSelectedParentChain RPC (#2036, for exchanges to track tx confirmations)
|
||||
* Allow blank address in NotifyUTXOsChanged to get all updates (#2027)
|
||||
* Include isSynced and isUtxoIndexed in GetInfoResponse (#2068)
|
||||
|
||||
Kaspad v0.12.0 - 2022-04-14
|
||||
===========================
|
||||
Breaking changes:
|
||||
Hard-fork at DAA score 14687583 (estimated to be on 28/04 16:38 UTC) which includes:
|
||||
* Using separate depth than finality depth for merge set calculations (#2013)
|
||||
* Not counting the header size as part of the block mass (#2013)
|
||||
* Increasing block version to 1 (#2013)
|
||||
* Removing the limit on amount of KAS that can be sent in one transaction (#2013)
|
||||
|
||||
Bug fixes:
|
||||
* Making a workaround for the UTXO diff child bug (#2020)
|
||||
* Use cosigner index 0 for read only wallets (#2014)
|
||||
|
||||
Non-breaking changes:
|
||||
* Adding a "sweep" command to `kaspawallet` (#2018)
|
||||
* Use `blue work` heuristic to skip irrelevant relay blocks
|
||||
* Kaspawallet daemon: Add Send and Sign commands (#2016)
|
||||
|
||||
Kaspad v0.11.17 - 2022-04-06
|
||||
===========================
|
||||
* Decrement estimatedHeaderUpperBound from mempool's MaxBlockMass (#2009)
|
||||
|
||||
Kaspad v0.11.16 - 2022-04-05
|
||||
===========================
|
||||
* Don't skip wallet address with different cosigner index (#2007)
|
||||
|
||||
Kaspad v0.11.15 - 2022-04-05
|
||||
===========================
|
||||
* Add support for auto-compound in `kaspawallet send` (#1951)
|
||||
* Unite reachability stores (#1963, #1993, #2001)
|
||||
* Add names to nameless routes (#1986)
|
||||
* Optimize the miner-kaspad flow and latency (#1988)
|
||||
* Upgrade to go 1.18 (#1992)
|
||||
* Add package name to kaspawalletd .proto file (#1991)
|
||||
* Block template cache (#1994)
|
||||
* Add extra data to GetBlockTemplate request (#1995, #1997)
|
||||
* New definition for "out of sync" (#1996)
|
||||
* Remove v4 p2p version (#1998)
|
||||
* Remove increase pagefile from deploy.yaml (#2000)
|
||||
* Cache the pruning point anticone (#2002)
|
||||
* Add DB compaction after the deletion of a DB prefix (#2003)
|
||||
* Fixed a bug in staging of pruning point by index (#2005)
|
||||
* Clean up debug log level by moving many frequent logs to trace level (#2004)
|
||||
|
||||
Kaspad v0.11.14 - 2022-03-20
|
||||
===========================
|
||||
* Fix a bug in the new p2p v5 IBD chain negotiation (#1981)
|
||||
|
||||
Kaspad v0.11.13 - 2022-03-16
|
||||
===========================
|
||||
* Display progress of IBD process in Kaspad logs (#1938, #1939, #1949, #1977)
|
||||
* Optimize DB writes during fresh IBD (#1937)
|
||||
* Add AllowConnectionToDifferentVersions flag to kaspactl (#1940)
|
||||
* Drop support for p2p v3 (#1942)
|
||||
* Various transaction processing fixes and workarounds (#1943, #1946, #1971, #1974)
|
||||
* Make kaspawallet store the utxos sorted by amount (#1947)
|
||||
* Implement a `parse` sub command in the kaspawallet (#1953)
|
||||
* Set MaxBlockLevels for non-mainnet networks to 250 (#1952)
|
||||
* Add cache to DAA block window (#1948)
|
||||
* kaspactl: string slice parser for GetUtxosByAddresses (#1955, first contribution by @icook)
|
||||
* Add MergeSet and IsChainBlock to RPC (#1961)
|
||||
* Ignore transaction invs during IBD (#1960)
|
||||
* Optimize validation of expected header pruning point (#1962)
|
||||
* Fix a bug in bounded marge depth validation (#1966)
|
||||
* Don't relay blocks in virtual anticone (#1970)
|
||||
* Add version to block template to allow tracking of miner's kaspad version (#1967)
|
||||
* New p2p version: v5 (#1969)
|
||||
* Fix IBD shared past negotiation to be non quadratic also in the worst-case (#1969, p2p v5)
|
||||
* Send pruning point anticone in batches (#1973, p2p v5)
|
||||
* Cleanup log output mistakes and try to be more clear to the user (#1976, #1978)
|
||||
* Apply avoiding IBD logic from patch10 to p2p v4 IBD handling (#1979)
|
||||
|
||||
Kaspad v0.11.11 - 2022-01-27
|
||||
===========================
|
||||
* Fix for rare consensus bug regarding DAA window order. The bug only affected IBD from scratch and only today (#1934)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user