mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-09-14 13:30:11 +00:00
Compare commits
191 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
4bb5bf25d3 | ||
![]() |
25c2dd8670 | ||
![]() |
c93100ccd0 | ||
![]() |
03cc7dfc19 | ||
![]() |
ed745a9acb | ||
![]() |
c23c1d141c | ||
![]() |
352d261fd6 | ||
![]() |
43b9523919 | ||
![]() |
6085d1fc84 | ||
![]() |
1e9ddc42d0 | ||
![]() |
48a142e12f | ||
![]() |
86b89065cf | ||
![]() |
f41dc7fa0b | ||
![]() |
6b38bf7069 | ||
![]() |
d2453f8e7b | ||
![]() |
629faa8436 | ||
![]() |
91e6c6b74b | ||
![]() |
0819244ba1 | ||
![]() |
a0149cd8d0 | ||
![]() |
5a3b8a0066 | ||
![]() |
8e71f79f98 | ||
![]() |
346341a709 | ||
![]() |
8c881aea39 | ||
![]() |
40ec440dcf | ||
![]() |
88bdcb43bc | ||
![]() |
9d1e44673f | ||
![]() |
387fade044 | ||
![]() |
c417c8b525 | ||
![]() |
bd1420220a | ||
![]() |
5640ec4020 | ||
![]() |
1c0887ca60 | ||
![]() |
7be3f41aa7 | ||
![]() |
26c4c73624 | ||
![]() |
880d917e58 | ||
![]() |
3c53c6d8cd | ||
![]() |
3c4b973090 | ||
![]() |
8aee8f81c5 | ||
![]() |
ec3441e63f | ||
![]() |
e3ba1ca07e | ||
![]() |
27fdbd9c88 | ||
![]() |
377d9aaaeb | ||
![]() |
beee947dda | ||
![]() |
d4a27bf1c1 | ||
![]() |
eec6eb9669 | ||
![]() |
d5c10832c2 | ||
![]() |
9fbfba17b6 | ||
![]() |
09d698dd0e | ||
![]() |
ec51c6926a | ||
![]() |
7d44275eb1 | ||
![]() |
a3387a56b3 | ||
![]() |
c2ae03fc89 | ||
![]() |
6c774c966b | ||
![]() |
2d54c9693b | ||
![]() |
d8350d62b0 | ||
![]() |
26c7db251f | ||
![]() |
4d435f2b3a | ||
![]() |
067688f549 | ||
![]() |
3a3fa0d3f0 | ||
![]() |
cf4073b773 | ||
![]() |
6a5e7c9e3f | ||
![]() |
7e9b5b9010 | ||
![]() |
953838e0d8 | ||
![]() |
a1dcb34c29 | ||
![]() |
23764e1b0b | ||
![]() |
0838cc8e32 | ||
![]() |
9f51330f38 | ||
![]() |
f6d46fd23f | ||
![]() |
2a7e03e232 | ||
![]() |
3286a7d010 | ||
![]() |
aabbc741d7 | ||
![]() |
20b7ab89f9 | ||
![]() |
10f1e7e3f4 | ||
![]() |
d941c73701 | ||
![]() |
3f80638c86 | ||
![]() |
266ec6c270 | ||
![]() |
9ee409afaa | ||
![]() |
715cb3b1ac | ||
![]() |
eb693c4a86 | ||
![]() |
7a61c637b0 | ||
![]() |
c7bd84ef9d | ||
![]() |
b26b9f6c4b | ||
![]() |
1c9bb54cc2 | ||
![]() |
b9093d59eb | ||
![]() |
18d000f625 | ||
![]() |
c5aade7e7f | ||
![]() |
d4b741fd7c | ||
![]() |
74a4f927e9 | ||
![]() |
847aafc91f | ||
![]() |
c87e541570 | ||
![]() |
2ea1c4f922 | ||
![]() |
5e9c28b77b | ||
![]() |
d957a6d93a | ||
![]() |
b2648aa5bd | ||
![]() |
3908f274ae | ||
![]() |
fa7ea121ff | ||
![]() |
24848da895 | ||
![]() |
b200b77541 | ||
![]() |
d50ad0667c | ||
![]() |
5cea285960 | ||
![]() |
7eb5085f6b | ||
![]() |
491e3569d2 | ||
![]() |
440aea19b0 | ||
![]() |
968d47c3e6 | ||
![]() |
052193865e | ||
![]() |
85febcb551 | ||
![]() |
a4d9fa10bf | ||
![]() |
cd5fd86ad3 | ||
![]() |
b84d6fed2c | ||
![]() |
24c94b38be | ||
![]() |
4dd7113dc5 | ||
![]() |
48c7fa0104 | ||
![]() |
4d0cf2169a | ||
![]() |
5f7cc079e9 | ||
![]() |
016ddfdfce | ||
![]() |
5d24e2afbc | ||
![]() |
8735da045f | ||
![]() |
c839337425 | ||
![]() |
7390651072 | ||
![]() |
52fbeedf20 | ||
![]() |
1660cf0cf1 | ||
![]() |
2b5202be7a | ||
![]() |
9ffbb15160 | ||
![]() |
540b0d3a22 | ||
![]() |
8d5faee53a | ||
![]() |
6e2fd0633b | ||
![]() |
beb038c815 | ||
![]() |
35a959b56f | ||
![]() |
57c6118be8 | ||
![]() |
723aebbec9 | ||
![]() |
2b395e34b1 | ||
![]() |
ada559f007 | ||
![]() |
357e8ce73c | ||
![]() |
6725902663 | ||
![]() |
99bb21c512 | ||
![]() |
a4669f3fb5 | ||
![]() |
e8f40bdff9 | ||
![]() |
68a407ea37 | ||
![]() |
80879cabe1 | ||
![]() |
71afc62298 | ||
![]() |
ca5c8549b9 | ||
![]() |
ab73def07a | ||
![]() |
3f840233d8 | ||
![]() |
90d9edb8e5 | ||
![]() |
b9b360bce4 | ||
![]() |
27654961f9 | ||
![]() |
d45af760d8 | ||
![]() |
95fa045297 | ||
![]() |
cb65dae63d | ||
![]() |
21b82d7efc | ||
![]() |
63c6d7443b | ||
![]() |
753f4a2ec1 | ||
![]() |
ed667f7e54 | ||
![]() |
c4a034eb43 | ||
![]() |
2eca0f0b5f | ||
![]() |
58d627e05a | ||
![]() |
639183ba0e | ||
![]() |
9fa08442cf | ||
![]() |
0dd50394ec | ||
![]() |
ac8d4e1341 | ||
![]() |
2488fbde78 | ||
![]() |
2ab8065142 | ||
![]() |
25410b86ae | ||
![]() |
4e44dd8510 | ||
![]() |
1e56a22b32 | ||
![]() |
7a95f0c7a4 | ||
![]() |
c81506220b | ||
![]() |
e5598c15a7 | ||
![]() |
433af5e0fe | ||
![]() |
b7be807167 | ||
![]() |
e687ceeae7 | ||
![]() |
04e35321aa | ||
![]() |
061e65be93 | ||
![]() |
190e725dd0 | ||
![]() |
6449b03034 | ||
![]() |
9f02a24e8b | ||
![]() |
9b23bbcdb5 | ||
![]() |
b30f7309a2 | ||
![]() |
1c18a49992 | ||
![]() |
28d0f1ea2e | ||
![]() |
3f7e482291 | ||
![]() |
ce4f5fcc33 | ||
![]() |
be3a6604d7 | ||
![]() |
f452531df0 | ||
![]() |
13a09da848 | ||
![]() |
f58aeb4f9f | ||
![]() |
82f0a4d74f | ||
![]() |
69d90fe827 | ||
![]() |
c85b5d70fd | ||
![]() |
1cd712a63e | ||
![]() |
27ba9d0374 | ||
![]() |
b1229f7908 |
23
.github/workflows/deploy.yaml
vendored
23
.github/workflows/deploy.yaml
vendored
@ -1,7 +1,7 @@
|
|||||||
name: Build and upload assets
|
name: Build and upload assets
|
||||||
on:
|
on:
|
||||||
release:
|
release:
|
||||||
types: [ published ]
|
types: [published]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
@ -9,7 +9,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest, windows-latest, macos-latest ]
|
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||||
name: Building, ${{ matrix.os }}
|
name: Building, ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- name: Fix CRLF on Windows
|
- name: Fix CRLF on Windows
|
||||||
@ -17,18 +17,12 @@ jobs:
|
|||||||
run: git config --global core.autocrlf false
|
run: git config --global core.autocrlf false
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
# Increase the pagefile size on Windows to aviod running out of memory
|
|
||||||
- name: Increase pagefile size on Windows
|
|
||||||
if: runner.os == 'Windows'
|
|
||||||
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
|
||||||
|
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.16
|
go-version: 1.21
|
||||||
|
|
||||||
- name: Build on Linux
|
- name: Build on Linux
|
||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
@ -36,7 +30,7 @@ jobs:
|
|||||||
# `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
|
# `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
|
||||||
# `-s -w` strips the binary to produce smaller size binaries
|
# `-s -w` strips the binary to produce smaller size binaries
|
||||||
run: |
|
run: |
|
||||||
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ . ./cmd/...
|
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./cmd/...
|
||||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||||
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||||
zip -r "${archive}" ./bin/*
|
zip -r "${archive}" ./bin/*
|
||||||
@ -47,7 +41,7 @@ jobs:
|
|||||||
if: runner.os == 'Windows'
|
if: runner.os == 'Windows'
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
go build -v -ldflags="-s -w" -o bin/ . ./cmd/...
|
go build -v -ldflags="-s -w" -o bin/ ./cmd/...
|
||||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||||
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||||
powershell "Compress-Archive bin/* \"${archive}\""
|
powershell "Compress-Archive bin/* \"${archive}\""
|
||||||
@ -57,14 +51,13 @@ jobs:
|
|||||||
- name: Build on MacOS
|
- name: Build on MacOS
|
||||||
if: runner.os == 'macOS'
|
if: runner.os == 'macOS'
|
||||||
run: |
|
run: |
|
||||||
go build -v -ldflags="-s -w" -o ./bin/ . ./cmd/...
|
go build -v -ldflags="-s -w" -o ./bin/ ./cmd/...
|
||||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||||
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||||
zip -r "${archive}" ./bin/*
|
zip -r "${archive}" ./bin/*
|
||||||
echo "archive=${archive}" >> $GITHUB_ENV
|
echo "archive=${archive}" >> $GITHUB_ENV
|
||||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
|
||||||
- name: Upload release asset
|
- name: Upload release asset
|
||||||
uses: actions/upload-release-asset@v1
|
uses: actions/upload-release-asset@v1
|
||||||
env:
|
env:
|
||||||
|
8
.github/workflows/race.yaml
vendored
8
.github/workflows/race.yaml
vendored
@ -11,18 +11,18 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
branch: [ master, latest ]
|
branch: [master, latest]
|
||||||
name: Race detection on ${{ matrix.branch }}
|
name: Race detection on ${{ matrix.branch }}
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.16
|
go-version: 1.23
|
||||||
|
|
||||||
- name: Set scheduled branch name
|
- name: Set scheduled branch name
|
||||||
shell: bash
|
shell: bash
|
||||||
|
28
.github/workflows/tests.yaml
vendored
28
.github/workflows/tests.yaml
vendored
@ -8,22 +8,20 @@ on:
|
|||||||
types: [opened, synchronize, edited]
|
types: [opened, synchronize, edited]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
build:
|
build:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest, macos-latest ]
|
os: [ubuntu-latest, macos-latest]
|
||||||
name: Tests, ${{ matrix.os }}
|
name: Tests, ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Fix CRLF on Windows
|
- name: Fix CRLF on Windows
|
||||||
if: runner.os == 'Windows'
|
if: runner.os == 'Windows'
|
||||||
run: git config --global core.autocrlf false
|
run: git config --global core.autocrlf false
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
# Increase the pagefile size on Windows to aviod running out of memory
|
# Increase the pagefile size on Windows to aviod running out of memory
|
||||||
- name: Increase pagefile size on Windows
|
- name: Increase pagefile size on Windows
|
||||||
@ -31,14 +29,13 @@ jobs:
|
|||||||
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.16
|
go-version: 1.23
|
||||||
|
|
||||||
|
|
||||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||||
- name: Go Cache
|
- name: Go Cache
|
||||||
uses: actions/cache@v2
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
path: ~/go/pkg/mod
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
@ -49,19 +46,17 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: ./build_and_test.sh -v
|
run: ./build_and_test.sh -v
|
||||||
|
|
||||||
|
|
||||||
stability-test-fast:
|
stability-test-fast:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Fast stability tests, ${{ github.head_ref }}
|
name: Fast stability tests, ${{ github.head_ref }}
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.16
|
go-version: 1.23
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@ -75,18 +70,17 @@ jobs:
|
|||||||
working-directory: stability-tests
|
working-directory: stability-tests
|
||||||
run: ./install_and_test.sh
|
run: ./install_and_test.sh
|
||||||
|
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Produce code coverage
|
name: Produce code coverage
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.16
|
go-version: 1.23
|
||||||
|
|
||||||
- name: Delete the stability tests from coverage
|
- name: Delete the stability tests from coverage
|
||||||
run: rm -r stability-tests
|
run: rm -r stability-tests
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -53,6 +53,7 @@ _testmain.go
|
|||||||
debug
|
debug
|
||||||
debug.test
|
debug.test
|
||||||
__debug_bin
|
__debug_bin
|
||||||
|
*__debug_*
|
||||||
|
|
||||||
# CI
|
# CI
|
||||||
version.txt
|
version.txt
|
||||||
|
43
CODE_OF_CONDUCT.md
Normal file
43
CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to creating a positive environment include:
|
||||||
|
|
||||||
|
* Using welcoming and inclusive language
|
||||||
|
* Being respectful of differing viewpoints and experiences
|
||||||
|
* Gracefully accepting constructive criticism
|
||||||
|
* Focusing on what is best for the community
|
||||||
|
* Showing empathy towards other community members
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||||
|
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||||
|
|
||||||
|
## Our Responsibilities
|
||||||
|
|
||||||
|
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainers on this [Google form][gform]. The project maintainers will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project maintainers are obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||||
|
|
||||||
|
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||||
|
|
||||||
|
[gform]: https://forms.gle/dnKXMJL7VxdUjt3x5
|
||||||
|
[homepage]: http://contributor-covenant.org
|
||||||
|
[version]: http://contributor-covenant.org/version/1/4/
|
16
README.md
16
README.md
@ -1,13 +1,15 @@
|
|||||||
|
# DEPRECATED
|
||||||
|
|
||||||
Kaspad
|
The full node reference implementation was [rewritten in Rust](https://github.com/kaspanet/rusty-kaspa), as a result, the Go implementation is now deprecated.
|
||||||
====
|
|
||||||
|
PLEASE NOTE: Any pull requests or issues that will be opened in this repository will be closed without treatment, except for issues or pull requests related to the kaspawallet, which remains maintained. In any other case, please use the [Rust implementation](https://github.com/kaspanet/rusty-kaspa) instead.
|
||||||
|
|
||||||
|
# Kaspad
|
||||||
|
|
||||||
[](https://choosealicense.com/licenses/isc/)
|
[](https://choosealicense.com/licenses/isc/)
|
||||||
[](http://godoc.org/github.com/kaspanet/kaspad)
|
[](http://godoc.org/github.com/kaspanet/kaspad)
|
||||||
|
|
||||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
Kaspad was the reference full node Kaspa implementation written in Go (golang).
|
||||||
|
|
||||||
This project is currently under active development and is in Beta state.
|
|
||||||
|
|
||||||
## What is kaspa
|
## What is kaspa
|
||||||
|
|
||||||
@ -15,7 +17,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Go 1.16 or later.
|
Go 1.23 or later.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@ -42,7 +44,6 @@ $ go install . ./cmd/...
|
|||||||
not already add the bin directory to your system path during Go installation,
|
not already add the bin directory to your system path during Go installation,
|
||||||
you are encouraged to do so now.
|
you are encouraged to do so now.
|
||||||
|
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
Kaspad has several configuration options available to tweak how it runs, but all
|
Kaspad has several configuration options available to tweak how it runs, but all
|
||||||
@ -53,6 +54,7 @@ $ kaspad
|
|||||||
```
|
```
|
||||||
|
|
||||||
## Discord
|
## Discord
|
||||||
|
|
||||||
Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
|
Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
|
||||||
|
|
||||||
## Issue Tracker
|
## Issue Tracker
|
||||||
|
@ -6,7 +6,7 @@ supported kaspa messages to and from the appmessage. This package does not deal
|
|||||||
with the specifics of message handling such as what to do when a message is
|
with the specifics of message handling such as what to do when a message is
|
||||||
received. This provides the caller with a high level of flexibility.
|
received. This provides the caller with a high level of flexibility.
|
||||||
|
|
||||||
Kaspa Message Overview
|
# Kaspa Message Overview
|
||||||
|
|
||||||
The kaspa protocol consists of exchanging messages between peers. Each
|
The kaspa protocol consists of exchanging messages between peers. Each
|
||||||
message is preceded by a header which identifies information about it such as
|
message is preceded by a header which identifies information about it such as
|
||||||
@ -22,7 +22,7 @@ messages, all of the details of marshalling and unmarshalling to and from the
|
|||||||
appmessage using kaspa encoding are handled so the caller doesn't have to concern
|
appmessage using kaspa encoding are handled so the caller doesn't have to concern
|
||||||
themselves with the specifics.
|
themselves with the specifics.
|
||||||
|
|
||||||
Message Interaction
|
# Message Interaction
|
||||||
|
|
||||||
The following provides a quick summary of how the kaspa messages are intended
|
The following provides a quick summary of how the kaspa messages are intended
|
||||||
to interact with one another. As stated above, these interactions are not
|
to interact with one another. As stated above, these interactions are not
|
||||||
@ -45,13 +45,13 @@ interactions in no particular order.
|
|||||||
notfound message (MsgNotFound)
|
notfound message (MsgNotFound)
|
||||||
ping message (MsgPing) pong message (MsgPong)
|
ping message (MsgPing) pong message (MsgPong)
|
||||||
|
|
||||||
Common Parameters
|
# Common Parameters
|
||||||
|
|
||||||
There are several common parameters that arise when using this package to read
|
There are several common parameters that arise when using this package to read
|
||||||
and write kaspa messages. The following sections provide a quick overview of
|
and write kaspa messages. The following sections provide a quick overview of
|
||||||
these parameters so the next sections can build on them.
|
these parameters so the next sections can build on them.
|
||||||
|
|
||||||
Protocol Version
|
# Protocol Version
|
||||||
|
|
||||||
The protocol version should be negotiated with the remote peer at a higher
|
The protocol version should be negotiated with the remote peer at a higher
|
||||||
level than this package via the version (MsgVersion) message exchange, however,
|
level than this package via the version (MsgVersion) message exchange, however,
|
||||||
@ -60,7 +60,7 @@ latest protocol version this package supports and is typically the value to use
|
|||||||
for all outbound connections before a potentially lower protocol version is
|
for all outbound connections before a potentially lower protocol version is
|
||||||
negotiated.
|
negotiated.
|
||||||
|
|
||||||
Kaspa Network
|
# Kaspa Network
|
||||||
|
|
||||||
The kaspa network is a magic number which is used to identify the start of a
|
The kaspa network is a magic number which is used to identify the start of a
|
||||||
message and which kaspa network the message applies to. This package provides
|
message and which kaspa network the message applies to. This package provides
|
||||||
@ -71,7 +71,7 @@ the following constants:
|
|||||||
appmessage.Simnet (Simulation test network)
|
appmessage.Simnet (Simulation test network)
|
||||||
appmessage.Devnet (Development network)
|
appmessage.Devnet (Development network)
|
||||||
|
|
||||||
Determining Message Type
|
# Determining Message Type
|
||||||
|
|
||||||
As discussed in the kaspa message overview section, this package reads
|
As discussed in the kaspa message overview section, this package reads
|
||||||
and writes kaspa messages using a generic interface named Message. In
|
and writes kaspa messages using a generic interface named Message. In
|
||||||
@ -89,7 +89,7 @@ switch or type assertion. An example of a type switch follows:
|
|||||||
fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount)
|
fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
Reading Messages
|
# Reading Messages
|
||||||
|
|
||||||
In order to unmarshall kaspa messages from the appmessage, use the ReadMessage
|
In order to unmarshall kaspa messages from the appmessage, use the ReadMessage
|
||||||
function. It accepts any io.Reader, but typically this will be a net.Conn to
|
function. It accepts any io.Reader, but typically this will be a net.Conn to
|
||||||
@ -104,7 +104,7 @@ a remote node running a kaspa peer. Example syntax is:
|
|||||||
// Log and handle the error
|
// Log and handle the error
|
||||||
}
|
}
|
||||||
|
|
||||||
Writing Messages
|
# Writing Messages
|
||||||
|
|
||||||
In order to marshall kaspa messages to the appmessage, use the WriteMessage
|
In order to marshall kaspa messages to the appmessage, use the WriteMessage
|
||||||
function. It accepts any io.Writer, but typically this will be a net.Conn to
|
function. It accepts any io.Writer, but typically this will be a net.Conn to
|
||||||
@ -122,7 +122,7 @@ from a remote peer is:
|
|||||||
// Log and handle the error
|
// Log and handle the error
|
||||||
}
|
}
|
||||||
|
|
||||||
Errors
|
# Errors
|
||||||
|
|
||||||
Errors returned by this package are either the raw errors provided by underlying
|
Errors returned by this package are either the raw errors provided by underlying
|
||||||
calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and
|
calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and
|
||||||
|
@ -2,9 +2,10 @@ package appmessage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||||
@ -218,7 +219,8 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
|||||||
Outputs: outputs,
|
Outputs: outputs,
|
||||||
LockTime: rpcTransaction.LockTime,
|
LockTime: rpcTransaction.LockTime,
|
||||||
SubnetworkID: *subnetworkID,
|
SubnetworkID: *subnetworkID,
|
||||||
Gas: rpcTransaction.LockTime,
|
Gas: rpcTransaction.Gas,
|
||||||
|
MassCommitment: rpcTransaction.Mass,
|
||||||
Payload: payload,
|
Payload: payload,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -286,7 +288,8 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
|||||||
Outputs: outputs,
|
Outputs: outputs,
|
||||||
LockTime: transaction.LockTime,
|
LockTime: transaction.LockTime,
|
||||||
SubnetworkID: subnetworkID,
|
SubnetworkID: subnetworkID,
|
||||||
Gas: transaction.LockTime,
|
Gas: transaction.Gas,
|
||||||
|
Mass: transaction.MassCommitment,
|
||||||
Payload: payload,
|
Payload: payload,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,10 @@ type RPCError struct {
|
|||||||
Message string
|
Message string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (err RPCError) Error() string {
|
||||||
|
return err.Message
|
||||||
|
}
|
||||||
|
|
||||||
// RPCErrorf formats according to a format specifier and returns the string
|
// RPCErrorf formats according to a format specifier and returns the string
|
||||||
// as an RPCError.
|
// as an RPCError.
|
||||||
func RPCErrorf(format string, args ...interface{}) *RPCError {
|
func RPCErrorf(format string, args ...interface{}) *RPCError {
|
||||||
|
@ -69,6 +69,10 @@ const (
|
|||||||
CmdReady
|
CmdReady
|
||||||
CmdTrustedData
|
CmdTrustedData
|
||||||
CmdBlockWithTrustedDataV4
|
CmdBlockWithTrustedDataV4
|
||||||
|
CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||||
|
CmdRequestIBDChainBlockLocator
|
||||||
|
CmdIBDChainBlockLocator
|
||||||
|
CmdRequestAnticone
|
||||||
|
|
||||||
// rpc
|
// rpc
|
||||||
CmdGetCurrentNetworkRequestMessage
|
CmdGetCurrentNetworkRequestMessage
|
||||||
@ -150,6 +154,19 @@ const (
|
|||||||
CmdNotifyVirtualDaaScoreChangedRequestMessage
|
CmdNotifyVirtualDaaScoreChangedRequestMessage
|
||||||
CmdNotifyVirtualDaaScoreChangedResponseMessage
|
CmdNotifyVirtualDaaScoreChangedResponseMessage
|
||||||
CmdVirtualDaaScoreChangedNotificationMessage
|
CmdVirtualDaaScoreChangedNotificationMessage
|
||||||
|
CmdGetBalancesByAddressesRequestMessage
|
||||||
|
CmdGetBalancesByAddressesResponseMessage
|
||||||
|
CmdNotifyNewBlockTemplateRequestMessage
|
||||||
|
CmdNotifyNewBlockTemplateResponseMessage
|
||||||
|
CmdNewBlockTemplateNotificationMessage
|
||||||
|
CmdGetMempoolEntriesByAddressesRequestMessage
|
||||||
|
CmdGetMempoolEntriesByAddressesResponseMessage
|
||||||
|
CmdGetCoinSupplyRequestMessage
|
||||||
|
CmdGetCoinSupplyResponseMessage
|
||||||
|
CmdGetFeeEstimateRequestMessage
|
||||||
|
CmdGetFeeEstimateResponseMessage
|
||||||
|
CmdSubmitTransactionReplacementRequestMessage
|
||||||
|
CmdSubmitTransactionReplacementResponseMessage
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||||
@ -193,6 +210,10 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
|||||||
CmdReady: "Ready",
|
CmdReady: "Ready",
|
||||||
CmdTrustedData: "TrustedData",
|
CmdTrustedData: "TrustedData",
|
||||||
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
|
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
|
||||||
|
CmdRequestNextPruningPointAndItsAnticoneBlocks: "RequestNextPruningPointAndItsAnticoneBlocks",
|
||||||
|
CmdRequestIBDChainBlockLocator: "RequestIBDChainBlockLocator",
|
||||||
|
CmdIBDChainBlockLocator: "IBDChainBlockLocator",
|
||||||
|
CmdRequestAnticone: "RequestAnticone",
|
||||||
}
|
}
|
||||||
|
|
||||||
// RPCMessageCommandToString maps all MessageCommands to their string representation
|
// RPCMessageCommandToString maps all MessageCommands to their string representation
|
||||||
@ -274,6 +295,19 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
|||||||
CmdNotifyVirtualDaaScoreChangedRequestMessage: "NotifyVirtualDaaScoreChangedRequest",
|
CmdNotifyVirtualDaaScoreChangedRequestMessage: "NotifyVirtualDaaScoreChangedRequest",
|
||||||
CmdNotifyVirtualDaaScoreChangedResponseMessage: "NotifyVirtualDaaScoreChangedResponse",
|
CmdNotifyVirtualDaaScoreChangedResponseMessage: "NotifyVirtualDaaScoreChangedResponse",
|
||||||
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
|
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
|
||||||
|
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
|
||||||
|
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
|
||||||
|
CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest",
|
||||||
|
CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse",
|
||||||
|
CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification",
|
||||||
|
CmdGetMempoolEntriesByAddressesRequestMessage: "GetMempoolEntriesByAddressesRequest",
|
||||||
|
CmdGetMempoolEntriesByAddressesResponseMessage: "GetMempoolEntriesByAddressesResponse",
|
||||||
|
CmdGetCoinSupplyRequestMessage: "GetCoinSupplyRequest",
|
||||||
|
CmdGetCoinSupplyResponseMessage: "GetCoinSupplyResponse",
|
||||||
|
CmdGetFeeEstimateRequestMessage: "GetFeeEstimateRequest",
|
||||||
|
CmdGetFeeEstimateResponseMessage: "GetFeeEstimateResponse",
|
||||||
|
CmdSubmitTransactionReplacementRequestMessage: "SubmitTransactionReplacementRequest",
|
||||||
|
CmdSubmitTransactionReplacementResponseMessage: "SubmitTransactionReplacementResponse",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Message is an interface that describes a kaspa message. A type that
|
// Message is an interface that describes a kaspa message. A type that
|
||||||
|
@ -132,7 +132,7 @@ func TestConvertToPartial(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//blockOne is the first block in the mainnet block DAG.
|
// blockOne is the first block in the mainnet block DAG.
|
||||||
var blockOne = MsgBlock{
|
var blockOne = MsgBlock{
|
||||||
Header: MsgBlockHeader{
|
Header: MsgBlockHeader{
|
||||||
Version: 0,
|
Version: 0,
|
||||||
|
27
app/appmessage/p2p_msgibdchainblocklocator.go
Normal file
27
app/appmessage/p2p_msgibdchainblocklocator.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MsgIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||||
|
// locator message. It is used to find the blockLocator of a peer that is
|
||||||
|
// syncing with you.
|
||||||
|
type MsgIBDChainBlockLocator struct {
|
||||||
|
baseMessage
|
||||||
|
BlockLocatorHashes []*externalapi.DomainHash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message. This is part
|
||||||
|
// of the Message interface implementation.
|
||||||
|
func (msg *MsgIBDChainBlockLocator) Command() MessageCommand {
|
||||||
|
return CmdIBDChainBlockLocator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgIBDChainBlockLocator returns a new kaspa locator message that conforms to
|
||||||
|
// the Message interface. See MsgBlockLocator for details.
|
||||||
|
func NewMsgIBDChainBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgIBDChainBlockLocator {
|
||||||
|
return &MsgIBDChainBlockLocator{
|
||||||
|
BlockLocatorHashes: locatorHashes,
|
||||||
|
}
|
||||||
|
}
|
33
app/appmessage/p2p_msgrequestanticone.go
Normal file
33
app/appmessage/p2p_msgrequestanticone.go
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
// Copyright (c) 2013-2016 The btcsuite developers
|
||||||
|
// Use of this source code is governed by an ISC
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package appmessage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MsgRequestAnticone implements the Message interface and represents a kaspa
|
||||||
|
// RequestHeaders message. It is used to request the set past(ContextHash) \cap anticone(BlockHash)
|
||||||
|
type MsgRequestAnticone struct {
|
||||||
|
baseMessage
|
||||||
|
BlockHash *externalapi.DomainHash
|
||||||
|
ContextHash *externalapi.DomainHash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message. This is part
|
||||||
|
// of the Message interface implementation.
|
||||||
|
func (msg *MsgRequestAnticone) Command() MessageCommand {
|
||||||
|
return CmdRequestAnticone
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgRequestAnticone returns a new kaspa RequestPastDiff message that conforms to the
|
||||||
|
// Message interface using the passed parameters and defaults for the remaining
|
||||||
|
// fields.
|
||||||
|
func NewMsgRequestAnticone(blockHash, contextHash *externalapi.DomainHash) *MsgRequestAnticone {
|
||||||
|
return &MsgRequestAnticone{
|
||||||
|
BlockHash: blockHash,
|
||||||
|
ContextHash: contextHash,
|
||||||
|
}
|
||||||
|
}
|
31
app/appmessage/p2p_msgrequestibdchainblocklocator.go
Normal file
31
app/appmessage/p2p_msgrequestibdchainblocklocator.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MsgRequestIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||||
|
// IBDRequestChainBlockLocator message. It is used to request a block locator between low
|
||||||
|
// and high hash.
|
||||||
|
// The locator is returned via a locator message (MsgIBDChainBlockLocator).
|
||||||
|
type MsgRequestIBDChainBlockLocator struct {
|
||||||
|
baseMessage
|
||||||
|
HighHash *externalapi.DomainHash
|
||||||
|
LowHash *externalapi.DomainHash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message. This is part
|
||||||
|
// of the Message interface implementation.
|
||||||
|
func (msg *MsgRequestIBDChainBlockLocator) Command() MessageCommand {
|
||||||
|
return CmdRequestIBDChainBlockLocator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgIBDRequestChainBlockLocator returns a new IBDRequestChainBlockLocator message that conforms to the
|
||||||
|
// Message interface using the passed parameters and defaults for the remaining
|
||||||
|
// fields.
|
||||||
|
func NewMsgIBDRequestChainBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestIBDChainBlockLocator {
|
||||||
|
return &MsgRequestIBDChainBlockLocator{
|
||||||
|
HighHash: highHash,
|
||||||
|
LowHash: lowHash,
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,22 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MsgRequestNextPruningPointAndItsAnticoneBlocks implements the Message interface and represents a kaspa
|
||||||
|
// RequestNextPruningPointAndItsAnticoneBlocks message. It is used to notify the IBD syncer peer to send
|
||||||
|
// more blocks from the pruning anticone.
|
||||||
|
//
|
||||||
|
// This message has no payload.
|
||||||
|
type MsgRequestNextPruningPointAndItsAnticoneBlocks struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message. This is part
|
||||||
|
// of the Message interface implementation.
|
||||||
|
func (msg *MsgRequestNextPruningPointAndItsAnticoneBlocks) Command() MessageCommand {
|
||||||
|
return CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgRequestNextPruningPointAndItsAnticoneBlocks returns a new kaspa RequestNextPruningPointAndItsAnticoneBlocks message that conforms to the
|
||||||
|
// Message interface.
|
||||||
|
func NewMsgRequestNextPruningPointAndItsAnticoneBlocks() *MsgRequestNextPruningPointAndItsAnticoneBlocks {
|
||||||
|
return &MsgRequestNextPruningPointAndItsAnticoneBlocks{}
|
||||||
|
}
|
@ -133,8 +133,8 @@ func TestTx(t *testing.T) {
|
|||||||
|
|
||||||
// TestTxHash tests the ability to generate the hash of a transaction accurately.
|
// TestTxHash tests the ability to generate the hash of a transaction accurately.
|
||||||
func TestTxHashAndID(t *testing.T) {
|
func TestTxHashAndID(t *testing.T) {
|
||||||
txHash1Str := "93663e597f6c968d32d229002f76408edf30d6a0151ff679fc729812d8cb2acc"
|
txHash1Str := "b06f8b650115b5cf4d59499e10764a9312742930cb43c9b4ff6495d76f332ed7"
|
||||||
txID1Str := "24079c6d2bdf602fc389cc307349054937744a9c8dc0f07c023e6af0e949a4e7"
|
txID1Str := "e20225c3d065ee41743607ee627db44d01ef396dc9779b05b2caf55bac50e12d"
|
||||||
wantTxID1, err := transactionid.FromString(txID1Str)
|
wantTxID1, err := transactionid.FromString(txID1Str)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("NewTxIDFromStr: %v", err)
|
t.Fatalf("NewTxIDFromStr: %v", err)
|
||||||
@ -185,7 +185,7 @@ func TestTxHashAndID(t *testing.T) {
|
|||||||
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
|
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
|
||||||
}
|
}
|
||||||
|
|
||||||
hash2Str := "8dafd1bec24527d8e3b443ceb0a3b92fffc0d60026317f890b2faf5e9afc177a"
|
hash2Str := "fa16a8ce88d52ca1ff45187bbba0d33044e9f5fe309e8d0b22d4812dcf1782b7"
|
||||||
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
|
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("NewTxIDFromStr: %v", err)
|
t.Errorf("NewTxIDFromStr: %v", err)
|
||||||
|
47
app/appmessage/rpc_fee_estimate.go
Normal file
47
app/appmessage/rpc_fee_estimate.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// GetFeeEstimateRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetFeeEstimateRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetFeeEstimateRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdGetFeeEstimateRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetFeeEstimateRequestMessage returns a instance of the message
|
||||||
|
func NewGetFeeEstimateRequestMessage() *GetFeeEstimateRequestMessage {
|
||||||
|
return &GetFeeEstimateRequestMessage{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type RPCFeeRateBucket struct {
|
||||||
|
Feerate float64
|
||||||
|
EstimatedSeconds float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type RPCFeeEstimate struct {
|
||||||
|
PriorityBucket RPCFeeRateBucket
|
||||||
|
NormalBuckets []RPCFeeRateBucket
|
||||||
|
LowBuckets []RPCFeeRateBucket
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCoinSupplyResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetFeeEstimateResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Estimate RPCFeeEstimate
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetFeeEstimateResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdGetFeeEstimateResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetFeeEstimateResponseMessage returns a instance of the message
|
||||||
|
func NewGetFeeEstimateResponseMessage() *GetFeeEstimateResponseMessage {
|
||||||
|
return &GetFeeEstimateResponseMessage{}
|
||||||
|
}
|
47
app/appmessage/rpc_get_balances_by_addresses.go
Normal file
47
app/appmessage/rpc_get_balances_by_addresses.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// GetBalancesByAddressesRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetBalancesByAddressesRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Addresses []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetBalancesByAddressesRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdGetBalancesByAddressesRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetBalancesByAddressesRequest returns a instance of the message
|
||||||
|
func NewGetBalancesByAddressesRequest(addresses []string) *GetBalancesByAddressesRequestMessage {
|
||||||
|
return &GetBalancesByAddressesRequestMessage{
|
||||||
|
Addresses: addresses,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BalancesByAddressesEntry represents the balance of some address
|
||||||
|
type BalancesByAddressesEntry struct {
|
||||||
|
Address string
|
||||||
|
Balance uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBalancesByAddressesResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetBalancesByAddressesResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Entries []*BalancesByAddressesEntry
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetBalancesByAddressesResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdGetBalancesByAddressesResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetBalancesByAddressesResponse returns an instance of the message
|
||||||
|
func NewGetBalancesByAddressesResponse(entries []*BalancesByAddressesEntry) *GetBalancesByAddressesResponseMessage {
|
||||||
|
return &GetBalancesByAddressesResponseMessage{
|
||||||
|
Entries: entries,
|
||||||
|
}
|
||||||
|
}
|
@ -5,6 +5,7 @@ package appmessage
|
|||||||
type GetBlockTemplateRequestMessage struct {
|
type GetBlockTemplateRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
PayAddress string
|
PayAddress string
|
||||||
|
ExtraData string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -13,9 +14,10 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetBlockTemplateRequestMessage returns a instance of the message
|
// NewGetBlockTemplateRequestMessage returns a instance of the message
|
||||||
func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
|
func NewGetBlockTemplateRequestMessage(payAddress, extraData string) *GetBlockTemplateRequestMessage {
|
||||||
return &GetBlockTemplateRequestMessage{
|
return &GetBlockTemplateRequestMessage{
|
||||||
PayAddress: payAddress,
|
PayAddress: payAddress,
|
||||||
|
ExtraData: extraData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
40
app/appmessage/rpc_get_coin_supply.go
Normal file
40
app/appmessage/rpc_get_coin_supply.go
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// GetCoinSupplyRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetCoinSupplyRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetCoinSupplyRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdGetCoinSupplyRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetCoinSupplyRequestMessage returns a instance of the message
|
||||||
|
func NewGetCoinSupplyRequestMessage() *GetCoinSupplyRequestMessage {
|
||||||
|
return &GetCoinSupplyRequestMessage{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCoinSupplyResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetCoinSupplyResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
MaxSompi uint64
|
||||||
|
CirculatingSompi uint64
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetCoinSupplyResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdGetCoinSupplyResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetCoinSupplyResponseMessage returns a instance of the message
|
||||||
|
func NewGetCoinSupplyResponseMessage(maxSompi uint64, circulatingSompi uint64) *GetCoinSupplyResponseMessage {
|
||||||
|
return &GetCoinSupplyResponseMessage{
|
||||||
|
MaxSompi: maxSompi,
|
||||||
|
CirculatingSompi: circulatingSompi,
|
||||||
|
}
|
||||||
|
}
|
@ -23,6 +23,8 @@ type GetInfoResponseMessage struct {
|
|||||||
P2PID string
|
P2PID string
|
||||||
MempoolSize uint64
|
MempoolSize uint64
|
||||||
ServerVersion string
|
ServerVersion string
|
||||||
|
IsUtxoIndexed bool
|
||||||
|
IsSynced bool
|
||||||
|
|
||||||
Error *RPCError
|
Error *RPCError
|
||||||
}
|
}
|
||||||
@ -33,10 +35,12 @@ func (msg *GetInfoResponseMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetInfoResponseMessage returns a instance of the message
|
// NewGetInfoResponseMessage returns a instance of the message
|
||||||
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string) *GetInfoResponseMessage {
|
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string, isUtxoIndexed bool, isSynced bool) *GetInfoResponseMessage {
|
||||||
return &GetInfoResponseMessage{
|
return &GetInfoResponseMessage{
|
||||||
P2PID: p2pID,
|
P2PID: p2pID,
|
||||||
MempoolSize: mempoolSize,
|
MempoolSize: mempoolSize,
|
||||||
ServerVersion: serverVersion,
|
ServerVersion: serverVersion,
|
||||||
|
IsUtxoIndexed: isUtxoIndexed,
|
||||||
|
IsSynced: isSynced,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,8 @@ package appmessage
|
|||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type GetMempoolEntriesRequestMessage struct {
|
type GetMempoolEntriesRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
|
IncludeOrphanPool bool
|
||||||
|
FilterTransactionPool bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -12,8 +14,11 @@ func (msg *GetMempoolEntriesRequestMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetMempoolEntriesRequestMessage returns a instance of the message
|
// NewGetMempoolEntriesRequestMessage returns a instance of the message
|
||||||
func NewGetMempoolEntriesRequestMessage() *GetMempoolEntriesRequestMessage {
|
func NewGetMempoolEntriesRequestMessage(includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesRequestMessage {
|
||||||
return &GetMempoolEntriesRequestMessage{}
|
return &GetMempoolEntriesRequestMessage{
|
||||||
|
IncludeOrphanPool: includeOrphanPool,
|
||||||
|
FilterTransactionPool: filterTransactionPool,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMempoolEntriesResponseMessage is an appmessage corresponding to
|
// GetMempoolEntriesResponseMessage is an appmessage corresponding to
|
||||||
|
52
app/appmessage/rpc_get_mempool_entries_by_addresses.go
Normal file
52
app/appmessage/rpc_get_mempool_entries_by_addresses.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MempoolEntryByAddress represents MempoolEntries associated with some address
|
||||||
|
type MempoolEntryByAddress struct {
|
||||||
|
Address string
|
||||||
|
Receiving []*MempoolEntry
|
||||||
|
Sending []*MempoolEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMempoolEntriesByAddressesRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetMempoolEntriesByAddressesRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Addresses []string
|
||||||
|
IncludeOrphanPool bool
|
||||||
|
FilterTransactionPool bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetMempoolEntriesByAddressesRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdGetMempoolEntriesByAddressesRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetMempoolEntriesByAddressesRequestMessage returns a instance of the message
|
||||||
|
func NewGetMempoolEntriesByAddressesRequestMessage(addresses []string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesByAddressesRequestMessage {
|
||||||
|
return &GetMempoolEntriesByAddressesRequestMessage{
|
||||||
|
Addresses: addresses,
|
||||||
|
IncludeOrphanPool: includeOrphanPool,
|
||||||
|
FilterTransactionPool: filterTransactionPool,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMempoolEntriesByAddressesResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetMempoolEntriesByAddressesResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Entries []*MempoolEntryByAddress
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetMempoolEntriesByAddressesResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdGetMempoolEntriesByAddressesResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetMempoolEntriesByAddressesResponseMessage returns a instance of the message
|
||||||
|
func NewGetMempoolEntriesByAddressesResponseMessage(entries []*MempoolEntryByAddress) *GetMempoolEntriesByAddressesResponseMessage {
|
||||||
|
return &GetMempoolEntriesByAddressesResponseMessage{
|
||||||
|
Entries: entries,
|
||||||
|
}
|
||||||
|
}
|
@ -5,6 +5,8 @@ package appmessage
|
|||||||
type GetMempoolEntryRequestMessage struct {
|
type GetMempoolEntryRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
TxID string
|
TxID string
|
||||||
|
IncludeOrphanPool bool
|
||||||
|
FilterTransactionPool bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -13,8 +15,12 @@ func (msg *GetMempoolEntryRequestMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetMempoolEntryRequestMessage returns a instance of the message
|
// NewGetMempoolEntryRequestMessage returns a instance of the message
|
||||||
func NewGetMempoolEntryRequestMessage(txID string) *GetMempoolEntryRequestMessage {
|
func NewGetMempoolEntryRequestMessage(txID string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntryRequestMessage {
|
||||||
return &GetMempoolEntryRequestMessage{TxID: txID}
|
return &GetMempoolEntryRequestMessage{
|
||||||
|
TxID: txID,
|
||||||
|
IncludeOrphanPool: includeOrphanPool,
|
||||||
|
FilterTransactionPool: filterTransactionPool,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMempoolEntryResponseMessage is an appmessage corresponding to
|
// GetMempoolEntryResponseMessage is an appmessage corresponding to
|
||||||
@ -30,6 +36,7 @@ type GetMempoolEntryResponseMessage struct {
|
|||||||
type MempoolEntry struct {
|
type MempoolEntry struct {
|
||||||
Fee uint64
|
Fee uint64
|
||||||
Transaction *RPCTransaction
|
Transaction *RPCTransaction
|
||||||
|
IsOrphan bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -38,11 +45,12 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetMempoolEntryResponseMessage returns a instance of the message
|
// NewGetMempoolEntryResponseMessage returns a instance of the message
|
||||||
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
|
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction, isOrphan bool) *GetMempoolEntryResponseMessage {
|
||||||
return &GetMempoolEntryResponseMessage{
|
return &GetMempoolEntryResponseMessage{
|
||||||
Entry: &MempoolEntry{
|
Entry: &MempoolEntry{
|
||||||
Fee: fee,
|
Fee: fee,
|
||||||
Transaction: transaction,
|
Transaction: transaction,
|
||||||
|
IsOrphan: isOrphan,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ package appmessage
|
|||||||
type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
|
type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
StartHash string
|
StartHash string
|
||||||
|
IncludeAcceptedTransactionIDs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -13,18 +14,29 @@ func (msg *GetVirtualSelectedParentChainFromBlockRequestMessage) Command() Messa
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
|
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
|
||||||
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *GetVirtualSelectedParentChainFromBlockRequestMessage {
|
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(
|
||||||
|
startHash string, includeAcceptedTransactionIDs bool) *GetVirtualSelectedParentChainFromBlockRequestMessage {
|
||||||
|
|
||||||
return &GetVirtualSelectedParentChainFromBlockRequestMessage{
|
return &GetVirtualSelectedParentChainFromBlockRequestMessage{
|
||||||
StartHash: startHash,
|
StartHash: startHash,
|
||||||
|
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AcceptedTransactionIDs is a part of the GetVirtualSelectedParentChainFromBlockResponseMessage and
|
||||||
|
// VirtualSelectedParentChainChangedNotificationMessage appmessages
|
||||||
|
type AcceptedTransactionIDs struct {
|
||||||
|
AcceptingBlockHash string
|
||||||
|
AcceptedTransactionIDs []string
|
||||||
|
}
|
||||||
|
|
||||||
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
|
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
|
||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
|
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
RemovedChainBlockHashes []string
|
RemovedChainBlockHashes []string
|
||||||
AddedChainBlockHashes []string
|
AddedChainBlockHashes []string
|
||||||
|
AcceptedTransactionIDs []*AcceptedTransactionIDs
|
||||||
|
|
||||||
Error *RPCError
|
Error *RPCError
|
||||||
}
|
}
|
||||||
@ -36,10 +48,11 @@ func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() Mess
|
|||||||
|
|
||||||
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
|
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
|
||||||
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes,
|
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes,
|
||||||
addedChainBlockHashes []string) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
addedChainBlockHashes []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
||||||
|
|
||||||
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
|
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
|
||||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||||
AddedChainBlockHashes: addedChainBlockHashes,
|
AddedChainBlockHashes: addedChainBlockHashes,
|
||||||
|
AcceptedTransactionIDs: acceptedTransactionIDs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// NotifyNewBlockTemplateRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type NotifyNewBlockTemplateRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *NotifyNewBlockTemplateRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdNotifyNewBlockTemplateRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNotifyNewBlockTemplateRequestMessage returns an instance of the message
|
||||||
|
func NewNotifyNewBlockTemplateRequestMessage() *NotifyNewBlockTemplateRequestMessage {
|
||||||
|
return &NotifyNewBlockTemplateRequestMessage{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyNewBlockTemplateResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type NotifyNewBlockTemplateResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *NotifyNewBlockTemplateResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdNotifyNewBlockTemplateResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNotifyNewBlockTemplateResponseMessage returns an instance of the message
|
||||||
|
func NewNotifyNewBlockTemplateResponseMessage() *NotifyNewBlockTemplateResponseMessage {
|
||||||
|
return &NotifyNewBlockTemplateResponseMessage{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBlockTemplateNotificationMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type NewBlockTemplateNotificationMessage struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *NewBlockTemplateNotificationMessage) Command() MessageCommand {
|
||||||
|
return CmdNewBlockTemplateNotificationMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNewBlockTemplateNotificationMessage returns an instance of the message
|
||||||
|
func NewNewBlockTemplateNotificationMessage() *NewBlockTemplateNotificationMessage {
|
||||||
|
return &NewBlockTemplateNotificationMessage{}
|
||||||
|
}
|
@ -4,6 +4,7 @@ package appmessage
|
|||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
|
type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
|
IncludeAcceptedTransactionIDs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -11,9 +12,13 @@ func (msg *NotifyVirtualSelectedParentChainChangedRequestMessage) Command() Mess
|
|||||||
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
|
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns a instance of the message
|
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns an instance of the message
|
||||||
func NewNotifyVirtualSelectedParentChainChangedRequestMessage() *NotifyVirtualSelectedParentChainChangedRequestMessage {
|
func NewNotifyVirtualSelectedParentChainChangedRequestMessage(
|
||||||
return &NotifyVirtualSelectedParentChainChangedRequestMessage{}
|
includeAcceptedTransactionIDs bool) *NotifyVirtualSelectedParentChainChangedRequestMessage {
|
||||||
|
|
||||||
|
return &NotifyVirtualSelectedParentChainChangedRequestMessage{
|
||||||
|
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
|
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
|
||||||
@ -39,6 +44,7 @@ type VirtualSelectedParentChainChangedNotificationMessage struct {
|
|||||||
baseMessage
|
baseMessage
|
||||||
RemovedChainBlockHashes []string
|
RemovedChainBlockHashes []string
|
||||||
AddedChainBlockHashes []string
|
AddedChainBlockHashes []string
|
||||||
|
AcceptedTransactionIDs []*AcceptedTransactionIDs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -48,10 +54,11 @@ func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() Messa
|
|||||||
|
|
||||||
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
|
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
|
||||||
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes,
|
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes,
|
||||||
addedChainBlocks []string) *VirtualSelectedParentChainChangedNotificationMessage {
|
addedChainBlocks []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *VirtualSelectedParentChainChangedNotificationMessage {
|
||||||
|
|
||||||
return &VirtualSelectedParentChainChangedNotificationMessage{
|
return &VirtualSelectedParentChainChangedNotificationMessage{
|
||||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||||
AddedChainBlockHashes: addedChainBlocks,
|
AddedChainBlockHashes: addedChainBlocks,
|
||||||
|
AcceptedTransactionIDs: acceptedTransactionIDs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -99,4 +99,7 @@ type RPCBlockVerboseData struct {
|
|||||||
IsHeaderOnly bool
|
IsHeaderOnly bool
|
||||||
BlueScore uint64
|
BlueScore uint64
|
||||||
ChildrenHashes []string
|
ChildrenHashes []string
|
||||||
|
MergeSetBluesHashes []string
|
||||||
|
MergeSetRedsHashes []string
|
||||||
|
IsChainBlock bool
|
||||||
}
|
}
|
||||||
|
@ -52,6 +52,7 @@ type RPCTransaction struct {
|
|||||||
SubnetworkID string
|
SubnetworkID string
|
||||||
Gas uint64
|
Gas uint64
|
||||||
Payload string
|
Payload string
|
||||||
|
Mass uint64
|
||||||
VerboseData *RPCTransactionVerboseData
|
VerboseData *RPCTransactionVerboseData
|
||||||
}
|
}
|
||||||
|
|
||||||
|
42
app/appmessage/rpc_submit_transaction_replacement.go
Normal file
42
app/appmessage/rpc_submit_transaction_replacement.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// SubmitTransactionReplacementRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type SubmitTransactionReplacementRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Transaction *RPCTransaction
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *SubmitTransactionReplacementRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdSubmitTransactionReplacementRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSubmitTransactionReplacementRequestMessage returns a instance of the message
|
||||||
|
func NewSubmitTransactionReplacementRequestMessage(transaction *RPCTransaction) *SubmitTransactionReplacementRequestMessage {
|
||||||
|
return &SubmitTransactionReplacementRequestMessage{
|
||||||
|
Transaction: transaction,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubmitTransactionReplacementResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type SubmitTransactionReplacementResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
TransactionID string
|
||||||
|
ReplacedTransaction *RPCTransaction
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *SubmitTransactionReplacementResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdSubmitTransactionReplacementResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSubmitTransactionReplacementResponseMessage returns a instance of the message
|
||||||
|
func NewSubmitTransactionReplacementResponseMessage(transactionID string) *SubmitTransactionReplacementResponseMessage {
|
||||||
|
return &SubmitTransactionReplacementResponseMessage{
|
||||||
|
TransactionID: transactionID,
|
||||||
|
}
|
||||||
|
}
|
@ -4,6 +4,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol"
|
"github.com/kaspanet/kaspad/app/protocol"
|
||||||
@ -67,6 +69,7 @@ func (a *ComponentManager) Stop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
a.protocolManager.Close()
|
a.protocolManager.Close()
|
||||||
|
close(a.protocolManager.Context().Domain().ConsensusEventsChannel())
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -118,7 +121,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, interrupt)
|
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, domain.ConsensusEventsChannel(), interrupt)
|
||||||
|
|
||||||
return &ComponentManager{
|
return &ComponentManager{
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
@ -139,6 +142,7 @@ func setupRPC(
|
|||||||
connectionManager *connmanager.ConnectionManager,
|
connectionManager *connmanager.ConnectionManager,
|
||||||
addressManager *addressmanager.AddressManager,
|
addressManager *addressmanager.AddressManager,
|
||||||
utxoIndex *utxoindex.UTXOIndex,
|
utxoIndex *utxoindex.UTXOIndex,
|
||||||
|
consensusEventsChan chan externalapi.ConsensusEvent,
|
||||||
shutDownChan chan<- struct{},
|
shutDownChan chan<- struct{},
|
||||||
) *rpc.Manager {
|
) *rpc.Manager {
|
||||||
|
|
||||||
@ -150,10 +154,10 @@ func setupRPC(
|
|||||||
connectionManager,
|
connectionManager,
|
||||||
addressManager,
|
addressManager,
|
||||||
utxoIndex,
|
utxoIndex,
|
||||||
|
consensusEventsChan,
|
||||||
shutDownChan,
|
shutDownChan,
|
||||||
)
|
)
|
||||||
protocolManager.SetOnVirtualChange(rpcManager.NotifyVirtualChange)
|
protocolManager.SetOnNewBlockTemplateHandler(rpcManager.NotifyNewBlockTemplate)
|
||||||
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
|
|
||||||
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
||||||
|
|
||||||
return rpcManager
|
return rpcManager
|
||||||
|
@ -16,53 +16,42 @@ import (
|
|||||||
// OnNewBlock updates the mempool after a new block arrival, and
|
// OnNewBlock updates the mempool after a new block arrival, and
|
||||||
// relays newly unorphaned transactions and possibly rebroadcast
|
// relays newly unorphaned transactions and possibly rebroadcast
|
||||||
// manually added transactions when not in IBD.
|
// manually added transactions when not in IBD.
|
||||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock) error {
|
||||||
virtualChangeSet *externalapi.VirtualChangeSet) error {
|
|
||||||
|
|
||||||
hash := consensushashing.BlockHash(block)
|
hash := consensushashing.BlockHash(block)
|
||||||
log.Debugf("OnNewBlock start for block %s", hash)
|
log.Tracef("OnNewBlock start for block %s", hash)
|
||||||
defer log.Debugf("OnNewBlock end for block %s", hash)
|
defer log.Tracef("OnNewBlock end for block %s", hash)
|
||||||
|
|
||||||
unorphaningResults, err := f.UnorphanBlocks(block)
|
unorphanedBlocks, err := f.UnorphanBlocks(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
|
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphanedBlocks))
|
||||||
|
|
||||||
newBlocks := []*externalapi.DomainBlock{block}
|
newBlocks := []*externalapi.DomainBlock{block}
|
||||||
newVirtualChangeSets := []*externalapi.VirtualChangeSet{virtualChangeSet}
|
newBlocks = append(newBlocks, unorphanedBlocks...)
|
||||||
for _, unorphaningResult := range unorphaningResults {
|
|
||||||
newBlocks = append(newBlocks, unorphaningResult.block)
|
|
||||||
newVirtualChangeSets = append(newVirtualChangeSets, unorphaningResult.virtualChangeSet)
|
|
||||||
}
|
|
||||||
|
|
||||||
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
|
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
|
||||||
for i, newBlock := range newBlocks {
|
for _, newBlock := range newBlocks {
|
||||||
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
|
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
|
||||||
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
|
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
|
||||||
|
|
||||||
if f.onBlockAddedToDAGHandler != nil {
|
|
||||||
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
|
|
||||||
virtualChangeSet = newVirtualChangeSets[i]
|
|
||||||
err := f.onBlockAddedToDAGHandler(newBlock, virtualChangeSet)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
|
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnVirtualChange calls the handler function whenever the virtual block changes.
|
// OnNewBlockTemplate calls the handler function whenever a new block template is available for miners.
|
||||||
func (f *FlowContext) OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
func (f *FlowContext) OnNewBlockTemplate() error {
|
||||||
if f.onVirtualChangeHandler != nil && virtualChangeSet != nil {
|
// Clear current template cache. Note we call this even if the handler is nil, in order to keep the
|
||||||
return f.onVirtualChangeHandler(virtualChangeSet)
|
// state consistent without dependency on external event registration
|
||||||
|
f.Domain().MiningManager().ClearBlockTemplate()
|
||||||
|
if f.onNewBlockTemplateHandler != nil {
|
||||||
|
return f.onNewBlockTemplateHandler()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -118,14 +107,18 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
|||||||
return protocolerrors.Errorf(false, "cannot add header only block")
|
return protocolerrors.Errorf(false, "cannot add header only block")
|
||||||
}
|
}
|
||||||
|
|
||||||
virtualChangeSet, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||||
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
|
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = f.OnNewBlock(block, virtualChangeSet)
|
err = f.OnNewBlockTemplate()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = f.OnNewBlock(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -150,7 +143,7 @@ func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
f.ibdPeer = ibdPeer
|
f.ibdPeer = ibdPeer
|
||||||
log.Infof("IBD started")
|
log.Infof("IBD started with peer %s", ibdPeer)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package flowcontext
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
@ -9,6 +10,11 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrPingTimeout signifies that a ping operation timed out.
|
||||||
|
ErrPingTimeout = protocolerrors.New(false, "timeout expired on ping")
|
||||||
|
)
|
||||||
|
|
||||||
// HandleError handles an error from a flow,
|
// HandleError handles an error from a flow,
|
||||||
// It sends the error to errChan if isStopping == 0 and increments isStopping
|
// It sends the error to errChan if isStopping == 0 and increments isStopping
|
||||||
//
|
//
|
||||||
@ -21,8 +27,15 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
|
|||||||
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
if errors.Is(err, ErrPingTimeout) {
|
||||||
|
// Avoid printing the call stack on ping timeouts, since users get panicked and this case is not interesting
|
||||||
log.Errorf("error from %s: %s", flowName, err)
|
log.Errorf("error from %s: %s", flowName, err)
|
||||||
|
} else {
|
||||||
|
// Explain to the user that this is not a panic, but only a protocol error with a specific peer
|
||||||
|
logFrame := strings.Repeat("=", 52)
|
||||||
|
log.Errorf("Non-critical peer protocol error from %s, printing the full stack for debug purposes: \n%s\n%+v \n%s",
|
||||||
|
flowName, logFrame, err, logFrame)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||||
|
@ -18,12 +18,8 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OnBlockAddedToDAGHandler is a handler function that's triggered
|
// OnNewBlockTemplateHandler is a handler function that's triggered when a new block template is available
|
||||||
// when a block is added to the DAG
|
type OnNewBlockTemplateHandler func() error
|
||||||
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
|
||||||
|
|
||||||
// OnVirtualChangeHandler is a handler function that's triggered when the virtual changes
|
|
||||||
type OnVirtualChangeHandler func(virtualChangeSet *externalapi.VirtualChangeSet) error
|
|
||||||
|
|
||||||
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
||||||
// resets due to pruning point change via IBD.
|
// resets due to pruning point change via IBD.
|
||||||
@ -44,8 +40,7 @@ type FlowContext struct {
|
|||||||
|
|
||||||
timeStarted int64
|
timeStarted int64
|
||||||
|
|
||||||
onVirtualChangeHandler OnVirtualChangeHandler
|
onNewBlockTemplateHandler OnNewBlockTemplateHandler
|
||||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
|
||||||
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
||||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||||
|
|
||||||
@ -102,14 +97,14 @@ func (f *FlowContext) ShutdownChan() <-chan struct{} {
|
|||||||
return f.shutdownChan
|
return f.shutdownChan
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetOnVirtualChangeHandler sets the onVirtualChangeHandler handler
|
// IsNearlySynced returns whether current consensus is considered synced or close to being synced.
|
||||||
func (f *FlowContext) SetOnVirtualChangeHandler(onVirtualChangeHandler OnVirtualChangeHandler) {
|
func (f *FlowContext) IsNearlySynced() (bool, error) {
|
||||||
f.onVirtualChangeHandler = onVirtualChangeHandler
|
return f.Domain().Consensus().IsNearlySynced()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplateHandler handler
|
||||||
func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
|
func (f *FlowContext) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler OnNewBlockTemplateHandler) {
|
||||||
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
|
f.onNewBlockTemplateHandler = onNewBlockTemplateHandler
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
||||||
|
@ -72,3 +72,10 @@ func (f *FlowContext) Peers() []*peerpkg.Peer {
|
|||||||
}
|
}
|
||||||
return peers
|
return peers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasPeers returns whether there are currently active peers
|
||||||
|
func (f *FlowContext) HasPeers() bool {
|
||||||
|
f.peersMutex.RLock()
|
||||||
|
defer f.peersMutex.RUnlock()
|
||||||
|
return len(f.peers) > 0
|
||||||
|
}
|
||||||
|
@ -15,12 +15,6 @@ import (
|
|||||||
// on: 2^orphanResolutionRange * PHANTOM K.
|
// on: 2^orphanResolutionRange * PHANTOM K.
|
||||||
const maxOrphans = 600
|
const maxOrphans = 600
|
||||||
|
|
||||||
// UnorphaningResult is the result of unorphaning a block
|
|
||||||
type UnorphaningResult struct {
|
|
||||||
block *externalapi.DomainBlock
|
|
||||||
virtualChangeSet *externalapi.VirtualChangeSet
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddOrphan adds the block to the orphan set
|
// AddOrphan adds the block to the orphan set
|
||||||
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
|
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
|
||||||
f.orphansMutex.Lock()
|
f.orphansMutex.Lock()
|
||||||
@ -57,7 +51,7 @@ func (f *FlowContext) IsOrphan(blockHash *externalapi.DomainHash) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
|
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
|
||||||
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*UnorphaningResult, error) {
|
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*externalapi.DomainBlock, error) {
|
||||||
f.orphansMutex.Lock()
|
f.orphansMutex.Lock()
|
||||||
defer f.orphansMutex.Unlock()
|
defer f.orphansMutex.Unlock()
|
||||||
|
|
||||||
@ -66,7 +60,7 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
|||||||
rootBlockHash := consensushashing.BlockHash(rootBlock)
|
rootBlockHash := consensushashing.BlockHash(rootBlock)
|
||||||
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
|
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
|
||||||
|
|
||||||
var unorphaningResults []*UnorphaningResult
|
var unorphanedBlocks []*externalapi.DomainBlock
|
||||||
for len(processQueue) > 0 {
|
for len(processQueue) > 0 {
|
||||||
var orphanHash externalapi.DomainHash
|
var orphanHash externalapi.DomainHash
|
||||||
orphanHash, processQueue = processQueue[0], processQueue[1:]
|
orphanHash, processQueue = processQueue[0], processQueue[1:]
|
||||||
@ -90,21 +84,18 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if canBeUnorphaned {
|
if canBeUnorphaned {
|
||||||
virtualChangeSet, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if unorphaningSucceeded {
|
if unorphaningSucceeded {
|
||||||
unorphaningResults = append(unorphaningResults, &UnorphaningResult{
|
unorphanedBlocks = append(unorphanedBlocks, orphanBlock)
|
||||||
block: orphanBlock,
|
|
||||||
virtualChangeSet: virtualChangeSet,
|
|
||||||
})
|
|
||||||
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
|
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return unorphaningResults, nil
|
return unorphanedBlocks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// addChildOrphansToProcessQueue finds all child orphans of `blockHash`
|
// addChildOrphansToProcessQueue finds all child orphans of `blockHash`
|
||||||
@ -143,24 +134,24 @@ func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash)
|
|||||||
return childOrphans
|
return childOrphans
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.VirtualChangeSet, bool, error) {
|
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (bool, error) {
|
||||||
orphanBlock, ok := f.orphans[orphanHash]
|
orphanBlock, ok := f.orphans[orphanHash]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
return false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
||||||
}
|
}
|
||||||
delete(f.orphans, orphanHash)
|
delete(f.orphans, orphanHash)
|
||||||
|
|
||||||
virtualChangeSet, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||||
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
|
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
|
||||||
return nil, false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return nil, false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Unorphaned block %s", orphanHash)
|
log.Infof("Unorphaned block %s", orphanHash)
|
||||||
return virtualChangeSet, true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
|
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
|
||||||
|
@ -1,47 +0,0 @@
|
|||||||
package flowcontext
|
|
||||||
|
|
||||||
import "github.com/kaspanet/kaspad/util/mstime"
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
|
|
||||||
)
|
|
||||||
|
|
||||||
// ShouldMine returns whether it's ok to use block template from this node
|
|
||||||
// for mining purposes.
|
|
||||||
func (f *FlowContext) ShouldMine() (bool, error) {
|
|
||||||
peers := f.Peers()
|
|
||||||
if len(peers) == 0 {
|
|
||||||
log.Debugf("The node is not connected, so ShouldMine returns false")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.IsIBDRunning() {
|
|
||||||
log.Debugf("IBD is running, so ShouldMine returns false")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
virtualSelectedParent, err := f.domain.Consensus().GetVirtualSelectedParent()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if virtualSelectedParent.Equal(f.Config().NetParams().GenesisHash) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
now := mstime.Now().UnixMilliseconds()
|
|
||||||
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
|
|
||||||
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
|
|
||||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
|
|
||||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
|
||||||
return false, nil
|
|
||||||
}
|
|
@ -18,9 +18,9 @@ var (
|
|||||||
|
|
||||||
// minAcceptableProtocolVersion is the lowest protocol version that a
|
// minAcceptableProtocolVersion is the lowest protocol version that a
|
||||||
// connected peer may support.
|
// connected peer may support.
|
||||||
minAcceptableProtocolVersion = uint32(3)
|
minAcceptableProtocolVersion = uint32(5)
|
||||||
|
|
||||||
maxAcceptableProtocolVersion = uint32(4)
|
maxAcceptableProtocolVersion = uint32(5)
|
||||||
)
|
)
|
||||||
|
|
||||||
type receiveVersionFlow struct {
|
type receiveVersionFlow struct {
|
||||||
|
@ -1,86 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HandleIBDBlockLocatorContext is the interface for the context needed for the HandleIBDBlockLocator flow.
|
|
||||||
type HandleIBDBlockLocatorContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleIBDBlockLocator listens to appmessage.MsgIBDBlockLocator messages and sends
|
|
||||||
// the highest known block that's in the selected parent chain of `targetHash` to the
|
|
||||||
// requesting peer.
|
|
||||||
func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *router.Route,
|
|
||||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
|
||||||
|
|
||||||
for {
|
|
||||||
message, err := incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ibdBlockLocatorMessage := message.(*appmessage.MsgIBDBlockLocator)
|
|
||||||
|
|
||||||
targetHash := ibdBlockLocatorMessage.TargetHash
|
|
||||||
log.Debugf("Received IBDBlockLocator from %s with targetHash %s", peer, targetHash)
|
|
||||||
|
|
||||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(targetHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !blockInfo.Exists {
|
|
||||||
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
|
|
||||||
"with an unknown targetHash %s", targetHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
foundHighestHashInTheSelectedParentChainOfTargetHash := false
|
|
||||||
for _, blockLocatorHash := range ibdBlockLocatorMessage.BlockLocatorHashes {
|
|
||||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(blockLocatorHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The IBD block locator is checking only existing blocks with bodies.
|
|
||||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
isBlockLocatorHashInSelectedParentChainOfHighHash, err :=
|
|
||||||
context.Domain().Consensus().IsInSelectedParentChainOf(blockLocatorHash, targetHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !isBlockLocatorHashInSelectedParentChainOfHighHash {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
foundHighestHashInTheSelectedParentChainOfTargetHash = true
|
|
||||||
log.Debugf("Found a known hash %s amongst peer %s's "+
|
|
||||||
"blockLocator that's in the selected parent chain of targetHash %s", blockLocatorHash, peer, targetHash)
|
|
||||||
|
|
||||||
ibdBlockLocatorHighestHashMessage := appmessage.NewMsgIBDBlockLocatorHighestHash(blockLocatorHash)
|
|
||||||
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashMessage)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if !foundHighestHashInTheSelectedParentChainOfTargetHash {
|
|
||||||
log.Warnf("no hash was found in the blockLocator "+
|
|
||||||
"that was in the selected parent chain of targetHash %s", targetHash)
|
|
||||||
|
|
||||||
ibdBlockLocatorHighestHashNotFoundMessage := appmessage.NewMsgIBDBlockLocatorHighestHashNotFound()
|
|
||||||
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashNotFoundMessage)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,54 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HandleIBDBlockRequestsContext is the interface for the context needed for the HandleIBDBlockRequests flow.
|
|
||||||
type HandleIBDBlockRequestsContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleIBDBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
|
|
||||||
// their corresponding blocks to the requesting peer.
|
|
||||||
func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute *router.Route,
|
|
||||||
outgoingRoute *router.Route) error {
|
|
||||||
|
|
||||||
for {
|
|
||||||
message, err := incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
msgRequestIBDBlocks := message.(*appmessage.MsgRequestIBDBlocks)
|
|
||||||
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
|
|
||||||
for i, hash := range msgRequestIBDBlocks.Hashes {
|
|
||||||
// Fetch the block from the database.
|
|
||||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
|
||||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
|
||||||
}
|
|
||||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO (Partial nodes): Convert block to partial block if needed
|
|
||||||
|
|
||||||
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
|
||||||
ibdBlockMessage := appmessage.NewMsgIBDBlock(blockMessage)
|
|
||||||
err = outgoingRoute.Enqueue(ibdBlockMessage)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("sent %d out of %d", i+1, len(msgRequestIBDBlocks.Hashes))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,95 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
"runtime"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
|
|
||||||
type PruningPointAndItsAnticoneRequestsContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
var isBusy uint32
|
|
||||||
|
|
||||||
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
|
|
||||||
// the pruning point and its anticone to the requesting peer.
|
|
||||||
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
|
|
||||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
|
||||||
|
|
||||||
for {
|
|
||||||
err := func() error {
|
|
||||||
_, err := incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
|
|
||||||
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
|
|
||||||
}
|
|
||||||
defer atomic.StoreUint32(&isBusy, 0)
|
|
||||||
|
|
||||||
log.Debugf("Got request for pruning point and its anticone from %s", peer)
|
|
||||||
|
|
||||||
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
|
|
||||||
for i, header := range pruningPointHeaders {
|
|
||||||
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, blockHash := range pointAndItsAnticone {
|
|
||||||
err := sendBlockWithTrustedData(context, outgoingRoute, blockHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Sent pruning point and its anticone to %s", peer)
|
|
||||||
return nil
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sendBlockWithTrustedData(context PruningPointAndItsAnticoneRequestsContext, outgoingRoute *router.Route, blockHash *externalapi.DomainHash) error {
|
|
||||||
blockWithTrustedData, err := context.Domain().Consensus().BlockWithTrustedData(blockHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(blockWithTrustedData))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
runtime.GC()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,53 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RelayBlockRequestsContext is the interface for the context needed for the HandleRelayBlockRequests flow.
|
|
||||||
type RelayBlockRequestsContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleRelayBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
|
|
||||||
// their corresponding blocks to the requesting peer.
|
|
||||||
func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *router.Route,
|
|
||||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
|
||||||
|
|
||||||
for {
|
|
||||||
message, err := incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
getRelayBlocksMessage := message.(*appmessage.MsgRequestRelayBlocks)
|
|
||||||
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
|
|
||||||
for _, hash := range getRelayBlocksMessage.Hashes {
|
|
||||||
// Fetch the block from the database.
|
|
||||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
|
||||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
|
||||||
}
|
|
||||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO (Partial nodes): Convert block to partial block if needed
|
|
||||||
|
|
||||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("Relayed block with hash %s", hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,105 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
)
|
|
||||||
|
|
||||||
const ibdBatchSize = router.DefaultMaxMessages
|
|
||||||
|
|
||||||
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
|
||||||
type RequestHeadersContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
type handleRequestHeadersFlow struct {
|
|
||||||
RequestHeadersContext
|
|
||||||
incomingRoute, outgoingRoute *router.Route
|
|
||||||
peer *peer.Peer
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleRequestHeaders handles RequestHeaders messages
|
|
||||||
func HandleRequestHeaders(context RequestHeadersContext, incomingRoute *router.Route,
|
|
||||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
|
||||||
|
|
||||||
flow := &handleRequestHeadersFlow{
|
|
||||||
RequestHeadersContext: context,
|
|
||||||
incomingRoute: incomingRoute,
|
|
||||||
outgoingRoute: outgoingRoute,
|
|
||||||
peer: peer,
|
|
||||||
}
|
|
||||||
return flow.start()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestHeadersFlow) start() error {
|
|
||||||
for {
|
|
||||||
lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
|
||||||
|
|
||||||
for !lowHash.Equal(highHash) {
|
|
||||||
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
|
||||||
|
|
||||||
// GetHashesBetween is a relatively heavy operation so we limit it
|
|
||||||
// in order to avoid locking the consensus for too long
|
|
||||||
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
|
||||||
const maxBlocks = 1 << 10
|
|
||||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("Got %d header hashes above lowHash %s", len(blockHashes), lowHash)
|
|
||||||
|
|
||||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
|
||||||
for i, blockHash := range blockHashes {
|
|
||||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
|
|
||||||
}
|
|
||||||
|
|
||||||
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
|
|
||||||
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
message, err := flow.incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, ok := message.(*appmessage.MsgRequestNextHeaders); !ok {
|
|
||||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdRequestNextHeaders, message.Command())
|
|
||||||
}
|
|
||||||
|
|
||||||
// The next lowHash is the last element in blockHashes
|
|
||||||
lowHash = blockHashes[len(blockHashes)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func receiveRequestHeaders(incomingRoute *router.Route) (lowHash *externalapi.DomainHash,
|
|
||||||
highHash *externalapi.DomainHash, err error) {
|
|
||||||
|
|
||||||
message, err := incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
msgRequestIBDBlocks := message.(*appmessage.MsgRequestHeaders)
|
|
||||||
|
|
||||||
return msgRequestIBDBlocks.LowHash, msgRequestIBDBlocks.HighHash, nil
|
|
||||||
}
|
|
@ -1,577 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IBDContext is the interface for the context needed for the HandleIBD flow.
|
|
||||||
type IBDContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
Config() *config.Config
|
|
||||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
|
||||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
|
||||||
OnPruningPointUTXOSetOverride() error
|
|
||||||
IsIBDRunning() bool
|
|
||||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
|
||||||
UnsetIBDRunning()
|
|
||||||
IsRecoverableError(err error) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type handleIBDFlow struct {
|
|
||||||
IBDContext
|
|
||||||
incomingRoute, outgoingRoute *router.Route
|
|
||||||
peer *peerpkg.Peer
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleIBD handles IBD
|
|
||||||
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
|
||||||
peer *peerpkg.Peer) error {
|
|
||||||
|
|
||||||
flow := &handleIBDFlow{
|
|
||||||
IBDContext: context,
|
|
||||||
incomingRoute: incomingRoute,
|
|
||||||
outgoingRoute: outgoingRoute,
|
|
||||||
peer: peer,
|
|
||||||
}
|
|
||||||
return flow.start()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) start() error {
|
|
||||||
for {
|
|
||||||
// Wait for IBD requests triggered by other flows
|
|
||||||
block, ok := <-flow.peer.IBDRequestChannel()
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
err := flow.runIBDIfNotRunning(block)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
|
||||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
|
||||||
if !wasIBDNotRunning {
|
|
||||||
log.Debugf("IBD is already running")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
isFinishedSuccessfully := false
|
|
||||||
defer func() {
|
|
||||||
flow.UnsetIBDRunning()
|
|
||||||
flow.logIBDFinished(isFinishedSuccessfully)
|
|
||||||
}()
|
|
||||||
|
|
||||||
highHash := consensushashing.BlockHash(block)
|
|
||||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
|
||||||
log.Debugf("Syncing blocks up to %s", highHash)
|
|
||||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
|
||||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
|
||||||
|
|
||||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !shouldSync {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if shouldDownloadHeadersProof {
|
|
||||||
log.Infof("Starting IBD with headers proof")
|
|
||||||
err := flow.ibdWithHeadersProof(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
|
||||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if isGenesisVirtualSelectedParent {
|
|
||||||
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
|
||||||
"to the recent pruning point before normal operation can resume.", highHash)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.syncMissingBlockBodies(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Finished syncing blocks up to %s", highHash)
|
|
||||||
isFinishedSuccessfully = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
|
||||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
|
||||||
successString := "successfully"
|
|
||||||
if !isFinishedSuccessfully {
|
|
||||||
successString = "(interrupted)"
|
|
||||||
}
|
|
||||||
log.Infof("IBD finished %s", successString)
|
|
||||||
}
|
|
||||||
|
|
||||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
|
||||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
|
||||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
|
||||||
func (flow *handleIBDFlow) findHighestSharedBlockHash(
|
|
||||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
|
||||||
|
|
||||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
|
||||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
if !highestHashFound {
|
|
||||||
return nil, false, nil
|
|
||||||
}
|
|
||||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if highestHashIndex == 0 ||
|
|
||||||
// If the block locator contains only two adjacent chain blocks, the
|
|
||||||
// syncer will always find the same highest chain block, so to avoid
|
|
||||||
// an endless loop, we explicitly stop the loop in such situation.
|
|
||||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
|
||||||
|
|
||||||
return highestHash, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
locatorHashAboveHighestHash := highestHash
|
|
||||||
if highestHashIndex > 0 {
|
|
||||||
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
|
||||||
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
|
|
||||||
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
|
|
||||||
"restarting with full block locator")
|
|
||||||
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return blockLocator, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) findHighestHashIndex(
|
|
||||||
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
|
|
||||||
|
|
||||||
highestHashIndex := 0
|
|
||||||
highestHashIndexFound := false
|
|
||||||
for i, blockLocatorHash := range blockLocator {
|
|
||||||
if highestHash.Equal(blockLocatorHash) {
|
|
||||||
highestHashIndex = i
|
|
||||||
highestHashIndexFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !highestHashIndexFound {
|
|
||||||
return 0, protocolerrors.Errorf(true, "highest hash %s "+
|
|
||||||
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
|
|
||||||
}
|
|
||||||
log.Debugf("The index of the highest hash in the original "+
|
|
||||||
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
|
|
||||||
|
|
||||||
return highestHashIndex, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
|
||||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
|
||||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
|
||||||
func (flow *handleIBDFlow) fetchHighestHash(
|
|
||||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
|
||||||
|
|
||||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
|
||||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
switch message := message.(type) {
|
|
||||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
|
||||||
highestHash := message.HighestHash
|
|
||||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
|
||||||
|
|
||||||
return highestHash, true, nil
|
|
||||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
|
||||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
|
||||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
|
||||||
return nil, false, nil
|
|
||||||
default:
|
|
||||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
|
|
||||||
highHash *externalapi.DomainHash) error {
|
|
||||||
|
|
||||||
log.Infof("Downloading headers from %s", flow.peer)
|
|
||||||
|
|
||||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep a short queue of BlockHeadersMessages so that there's
|
|
||||||
// never a moment when the node is not validating and inserting
|
|
||||||
// headers
|
|
||||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
|
||||||
errChan := make(chan error)
|
|
||||||
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
|
|
||||||
for {
|
|
||||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if doneIBD {
|
|
||||||
close(blockHeadersMessageChan)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
blockHeadersMessageChan <- blockHeadersMessage
|
|
||||||
|
|
||||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
|
||||||
if !ok {
|
|
||||||
// If the highHash has not been received, the peer is misbehaving
|
|
||||||
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !highHashBlockInfo.Exists {
|
|
||||||
return protocolerrors.Errorf(true, "did not receive "+
|
|
||||||
"highHash block %s from peer %s during block download", highHash, flow.peer)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
|
||||||
err = flow.processHeader(consensus, header)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case err := <-errChan:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
|
||||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
|
||||||
|
|
||||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
|
||||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
switch message := message.(type) {
|
|
||||||
case *appmessage.BlockHeadersMessage:
|
|
||||||
return message, false, nil
|
|
||||||
case *appmessage.MsgDoneHeaders:
|
|
||||||
return nil, true, nil
|
|
||||||
default:
|
|
||||||
return nil, false,
|
|
||||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s or %s, got: %s",
|
|
||||||
appmessage.CmdBlockHeaders,
|
|
||||||
appmessage.CmdDoneHeaders,
|
|
||||||
message.Command())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
|
|
||||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
|
||||||
block := &externalapi.DomainBlock{
|
|
||||||
Header: header,
|
|
||||||
Transactions: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
blockHash := consensushashing.BlockHash(block)
|
|
||||||
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if blockInfo.Exists {
|
|
||||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
|
||||||
if err != nil {
|
|
||||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
|
||||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
|
||||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
|
||||||
} else {
|
|
||||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
|
||||||
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
|
|
||||||
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
|
|
||||||
|
|
||||||
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
|
|
||||||
|
|
||||||
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
|
|
||||||
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
|
|
||||||
"tip is smaller than the current selected tip")
|
|
||||||
}
|
|
||||||
|
|
||||||
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
|
|
||||||
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
|
|
||||||
return protocolerrors.Errorf(false, "difference between the timestamps of "+
|
|
||||||
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
|
||||||
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
|
|
||||||
|
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
|
||||||
defer onEnd()
|
|
||||||
|
|
||||||
receivedChunkCount := 0
|
|
||||||
receivedUTXOCount := 0
|
|
||||||
for {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch message := message.(type) {
|
|
||||||
case *appmessage.MsgPruningPointUTXOSetChunk:
|
|
||||||
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
|
||||||
domainOutpointAndUTXOEntryPairs :=
|
|
||||||
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
|
||||||
|
|
||||||
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
receivedChunkCount++
|
|
||||||
if receivedChunkCount%ibdBatchSize == 0 {
|
|
||||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
|
||||||
receivedChunkCount, receivedUTXOCount)
|
|
||||||
|
|
||||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
|
||||||
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
|
||||||
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
|
||||||
return true, nil
|
|
||||||
|
|
||||||
case *appmessage.MsgUnexpectedPruningPoint:
|
|
||||||
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
|
|
||||||
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
|
||||||
return false, nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
|
||||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
|
||||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(hashes) == 0 {
|
|
||||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
|
||||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
|
||||||
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
|
||||||
log.Debugf("No missing block body hashes found.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
|
||||||
var hashesToRequest []*externalapi.DomainHash
|
|
||||||
if offset+ibdBatchSize < len(hashes) {
|
|
||||||
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
|
||||||
} else {
|
|
||||||
hashesToRequest = hashes[offset:]
|
|
||||||
}
|
|
||||||
|
|
||||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, expectedHash := range hashesToRequest {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
|
||||||
if !ok {
|
|
||||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
|
||||||
}
|
|
||||||
|
|
||||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
|
||||||
blockHash := consensushashing.BlockHash(block)
|
|
||||||
if !expectedHash.Equal(blockHash) {
|
|
||||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.banIfBlockIsHeaderOnly(block)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
|
||||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
|
||||||
}
|
|
||||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return flow.resolveVirtual()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
|
||||||
if len(block.Transactions) == 0 {
|
|
||||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
|
||||||
consensushashing.BlockHash(block))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) resolveVirtual() error {
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
if i%10 == 0 {
|
|
||||||
log.Infof("Resolving virtual. This may take some time...")
|
|
||||||
}
|
|
||||||
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.OnVirtualChange(virtualChangeSet)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if isCompletelyResolved {
|
|
||||||
log.Infof("Resolved virtual")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,364 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash) error {
|
|
||||||
err := flow.Domain().InitStagingConsensus()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.downloadHeadersAndPruningUTXOSet(highHash)
|
|
||||||
if err != nil {
|
|
||||||
if !flow.IsRecoverableError(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
|
||||||
if deleteStagingConsensusErr != nil {
|
|
||||||
return deleteStagingConsensusErr
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.Domain().CommitStagingConsensus()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.OnPruningPointUTXOSetOverride()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *externalapi.DomainBlock,
|
|
||||||
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
|
|
||||||
|
|
||||||
if !highestSharedBlockFound {
|
|
||||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock)
|
|
||||||
if err != nil {
|
|
||||||
return false, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore {
|
|
||||||
return true, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock *externalapi.DomainBlock) (bool, error) {
|
|
||||||
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if highBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return highBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
|
||||||
log.Infof("Downloading the pruning point proof from %s", flow.peer)
|
|
||||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointProof())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
pruningPointProofMessage, ok := message.(*appmessage.MsgPruningPointProof)
|
|
||||||
if !ok {
|
|
||||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdPruningPointProof, message.Command())
|
|
||||||
}
|
|
||||||
pruningPointProof := appmessage.MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage)
|
|
||||||
err = flow.Domain().Consensus().ValidatePruningPointProof(pruningPointProof)
|
|
||||||
if err != nil {
|
|
||||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
|
||||||
return nil, protocolerrors.Wrapf(true, err, "pruning point proof validation failed")
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.Domain().StagingConsensus().ApplyPruningPointProof(pruningPointProof)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash) error {
|
|
||||||
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.syncPruningPointsAndPruningPointAnticone(proofPruningPoint)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Remove this condition once there's more proper way to check finality violation
|
|
||||||
// in the headers proof.
|
|
||||||
if proofPruningPoint.Equal(flow.Config().NetParams().GenesisHash) {
|
|
||||||
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("Headers downloaded from peer %s", flow.peer)
|
|
||||||
|
|
||||||
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !highHashInfo.Exists {
|
|
||||||
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.validatePruningPointFutureHeaderTimestamps()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Syncing the current pruning point UTXO set")
|
|
||||||
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet(flow.Domain().StagingConsensus(), proofPruningPoint)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !syncedPruningPointUTXOSetSuccessfully {
|
|
||||||
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
log.Debugf("Finished syncing the current pruning point UTXO set")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error {
|
|
||||||
log.Infof("Downloading the past pruning points and the pruning point anticone from %s", flow.peer)
|
|
||||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.validateAndInsertPruningPoints(proofPruningPoint)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pruningPointWithMetaData, done, err := flow.receiveBlockWithTrustedData()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if done {
|
|
||||||
return protocolerrors.Errorf(true, "got `done` message before receiving the pruning point")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !pruningPointWithMetaData.Block.Header.BlockHash().Equal(proofPruningPoint) {
|
|
||||||
return protocolerrors.Errorf(true, "first block with trusted data is not the pruning point")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if done {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) processBlockWithTrustedData(
|
|
||||||
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedData) error {
|
|
||||||
|
|
||||||
_, err := consensus.ValidateAndInsertBlockWithTrustedData(appmessage.BlockWithTrustedDataToDomainBlockWithTrustedData(block), false)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedData, bool, error) {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch downCastedMessage := message.(type) {
|
|
||||||
case *appmessage.MsgBlockWithTrustedData:
|
|
||||||
return downCastedMessage, false, nil
|
|
||||||
case *appmessage.MsgDoneBlocksWithTrustedData:
|
|
||||||
return nil, true, nil
|
|
||||||
default:
|
|
||||||
return nil, false,
|
|
||||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s or %s, got: %s",
|
|
||||||
(&appmessage.MsgBlockWithTrustedData{}).Command(),
|
|
||||||
(&appmessage.MsgDoneBlocksWithTrustedData{}).Command(),
|
|
||||||
downCastedMessage.Command())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
msgPruningPoints, ok := message.(*appmessage.MsgPruningPoints)
|
|
||||||
if !ok {
|
|
||||||
return nil,
|
|
||||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdPruningPoints, message.Command())
|
|
||||||
}
|
|
||||||
|
|
||||||
return msgPruningPoints, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error {
|
|
||||||
currentPruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if currentPruningPoint.Equal(proofPruningPoint) {
|
|
||||||
return protocolerrors.Errorf(true, "the proposed pruning point is the same as the current pruning point")
|
|
||||||
}
|
|
||||||
|
|
||||||
pruningPoints, err := flow.receivePruningPoints()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := make([]externalapi.BlockHeader, len(pruningPoints.Headers))
|
|
||||||
for i, header := range pruningPoints.Headers {
|
|
||||||
headers[i] = appmessage.BlockHeaderToDomainBlockHeader(header)
|
|
||||||
}
|
|
||||||
|
|
||||||
arePruningPointsViolatingFinality, err := flow.Domain().Consensus().ArePruningPointsViolatingFinality(headers)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if arePruningPointsViolatingFinality {
|
|
||||||
// TODO: Find a better way to deal with finality conflicts.
|
|
||||||
return protocolerrors.Errorf(false, "pruning points are violating finality")
|
|
||||||
}
|
|
||||||
|
|
||||||
lastPruningPoint := consensushashing.HeaderHash(headers[len(headers)-1])
|
|
||||||
if !lastPruningPoint.Equal(proofPruningPoint) {
|
|
||||||
return protocolerrors.Errorf(true, "the proof pruning point is not equal to the last pruning "+
|
|
||||||
"point in the list")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.Domain().StagingConsensus().ImportPruningPoints(headers)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus,
|
|
||||||
pruningPoint *externalapi.DomainHash) (bool, error) {
|
|
||||||
|
|
||||||
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint)
|
|
||||||
isValid, err := flow.Domain().StagingConsensus().IsValidPruningPoint(pruningPoint)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isValid {
|
|
||||||
return false, protocolerrors.Errorf(true, "invalid pruning point %s", pruningPoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Fetching the pruning point UTXO set")
|
|
||||||
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isSuccessful {
|
|
||||||
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Fetched the new pruning point UTXO set")
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
|
|
||||||
defer func() {
|
|
||||||
err := flow.Domain().StagingConsensus().ClearImportedPruningPointData()
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSet(pruningPointHash))
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(consensus, pruningPointHash)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if !receivedAll {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.Domain().StagingConsensus().ValidateAndInsertImportedPruningPoint(pruningPointHash)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Find a better way to deal with finality conflicts.
|
|
||||||
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, nil
|
|
||||||
}
|
|
@ -1,187 +0,0 @@
|
|||||||
package v3
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/addressexchange"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/blockrelay"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/ping"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/rejects"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/transactionrelay"
|
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
|
||||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
)
|
|
||||||
|
|
||||||
type protocolManager interface {
|
|
||||||
RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
|
||||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
|
||||||
RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
|
|
||||||
isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
|
||||||
RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
|
|
||||||
messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
|
||||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
|
||||||
Context() *flowcontext.FlowContext
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register is used in order to register all the protocol flows to the given router.
|
|
||||||
func Register(m protocolManager, router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*common.Flow) {
|
|
||||||
flows = registerAddressFlows(m, router, isStopping, errChan)
|
|
||||||
flows = append(flows, registerBlockRelayFlows(m, router, isStopping, errChan)...)
|
|
||||||
flows = append(flows, registerPingFlows(m, router, isStopping, errChan)...)
|
|
||||||
flows = append(flows, registerTransactionRelayFlow(m, router, isStopping, errChan)...)
|
|
||||||
flows = append(flows, registerRejectsFlow(m, router, isStopping, errChan)...)
|
|
||||||
|
|
||||||
return flows
|
|
||||||
}
|
|
||||||
|
|
||||||
func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
|
||||||
outgoingRoute := router.OutgoingRoute()
|
|
||||||
|
|
||||||
return []*common.Flow{
|
|
||||||
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return addressexchange.ReceiveAddresses(m.Context(), incomingRoute, outgoingRoute, peer)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
|
||||||
outgoingRoute := router.OutgoingRoute()
|
|
||||||
|
|
||||||
return []*common.Flow{
|
|
||||||
m.RegisterOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
|
|
||||||
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return blockrelay.SendVirtualSelectedParentInv(m.Context(), outgoingRoute, peer)
|
|
||||||
}),
|
|
||||||
|
|
||||||
m.RegisterFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
|
|
||||||
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator,
|
|
||||||
},
|
|
||||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return blockrelay.HandleRelayInvs(m.Context(), incomingRoute,
|
|
||||||
outgoingRoute, peer)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
|
|
||||||
m.RegisterFlow("HandleIBD", router, []appmessage.MessageCommand{
|
|
||||||
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
|
|
||||||
appmessage.CmdBlockHeaders, appmessage.CmdIBDBlockLocatorHighestHash, appmessage.CmdBlockWithTrustedData,
|
|
||||||
appmessage.CmdDoneBlocksWithTrustedData, appmessage.CmdIBDBlockLocatorHighestHashNotFound,
|
|
||||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
|
|
||||||
appmessage.CmdPruningPointProof,
|
|
||||||
},
|
|
||||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return blockrelay.HandleIBD(m.Context(), incomingRoute,
|
|
||||||
outgoingRoute, peer)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
|
|
||||||
m.RegisterFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return blockrelay.HandleRelayBlockRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
|
|
||||||
m.RegisterFlow("HandleRequestBlockLocator", router,
|
|
||||||
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return blockrelay.HandleRequestBlockLocator(m.Context(), incomingRoute, outgoingRoute)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
|
|
||||||
m.RegisterFlow("HandleRequestHeaders", router,
|
|
||||||
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return blockrelay.HandleRequestHeaders(m.Context(), incomingRoute, outgoingRoute, peer)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
|
|
||||||
m.RegisterFlow("HandleIBDBlockRequests", router,
|
|
||||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return blockrelay.HandleIBDBlockRequests(m.Context(), incomingRoute, outgoingRoute)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
|
|
||||||
m.RegisterFlow("HandleRequestPruningPointUTXOSet", router,
|
|
||||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSet,
|
|
||||||
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return blockrelay.HandleRequestPruningPointUTXOSet(m.Context(), incomingRoute, outgoingRoute)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
|
|
||||||
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
|
|
||||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
|
|
||||||
m.RegisterFlow("HandleIBDBlockLocator", router,
|
|
||||||
[]appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return blockrelay.HandleIBDBlockLocator(m.Context(), incomingRoute, outgoingRoute, peer)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
|
|
||||||
m.RegisterFlow("HandlePruningPointProofRequests", router,
|
|
||||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return blockrelay.HandlePruningPointProofRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func registerPingFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
|
||||||
outgoingRoute := router.OutgoingRoute()
|
|
||||||
|
|
||||||
return []*common.Flow{
|
|
||||||
m.RegisterFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return ping.ReceivePings(m.Context(), incomingRoute, outgoingRoute)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
|
|
||||||
m.RegisterFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return ping.SendPings(m.Context(), incomingRoute, outgoingRoute, peer)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func registerTransactionRelayFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
|
||||||
outgoingRoute := router.OutgoingRoute()
|
|
||||||
|
|
||||||
return []*common.Flow{
|
|
||||||
m.RegisterFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
|
|
||||||
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return transactionrelay.HandleRelayedTransactions(m.Context(), incomingRoute, outgoingRoute)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
m.RegisterFlow("HandleRequestTransactions", router,
|
|
||||||
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return transactionrelay.HandleRequestedTransactions(m.Context(), incomingRoute, outgoingRoute)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func registerRejectsFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
|
||||||
outgoingRoute := router.OutgoingRoute()
|
|
||||||
|
|
||||||
return []*common.Flow{
|
|
||||||
m.RegisterFlow("HandleRejects", router,
|
|
||||||
[]appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return rejects.HandleRejects(m.Context(), incomingRoute, outgoingRoute)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,33 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.DomainHash, limit uint32) error {
|
|
||||||
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, limit)
|
|
||||||
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
|
|
||||||
for {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch message := message.(type) {
|
|
||||||
case *appmessage.MsgInvRelayBlock:
|
|
||||||
flow.invsQueue = append(flow.invsQueue, message)
|
|
||||||
case *appmessage.MsgBlockLocator:
|
|
||||||
return message.BlockLocatorHashes, nil
|
|
||||||
default:
|
|
||||||
return nil,
|
|
||||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,40 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PruningPointProofRequestsContext is the interface for the context needed for the HandlePruningPointProofRequests flow.
|
|
||||||
type PruningPointProofRequestsContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlePruningPointProofRequests listens to appmessage.MsgRequestPruningPointProof messages and sends
|
|
||||||
// the pruning point proof to the requesting peer.
|
|
||||||
func HandlePruningPointProofRequests(context PruningPointProofRequestsContext, incomingRoute *router.Route,
|
|
||||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
|
||||||
|
|
||||||
for {
|
|
||||||
_, err := incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Got request for pruning point proof from %s", peer)
|
|
||||||
|
|
||||||
pruningPointProof, err := context.Domain().Consensus().BuildPruningPointProof()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
pruningPointProofMessage := appmessage.DomainPruningPointProofToMsgPruningPointProof(pruningPointProof)
|
|
||||||
err = outgoingRoute.Enqueue(pruningPointProofMessage)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Sent pruning point proof to %s", peer)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,382 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// orphanResolutionRange is the maximum amount of blockLocator hashes
|
|
||||||
// to search for known blocks. See isBlockInOrphanResolutionRange for
|
|
||||||
// further details
|
|
||||||
var orphanResolutionRange uint32 = 5
|
|
||||||
|
|
||||||
// RelayInvsContext is the interface for the context needed for the HandleRelayInvs flow.
|
|
||||||
type RelayInvsContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
Config() *config.Config
|
|
||||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
|
||||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
|
||||||
OnPruningPointUTXOSetOverride() error
|
|
||||||
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
|
||||||
Broadcast(message appmessage.Message) error
|
|
||||||
AddOrphan(orphanBlock *externalapi.DomainBlock)
|
|
||||||
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
|
|
||||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
|
||||||
IsIBDRunning() bool
|
|
||||||
IsRecoverableError(err error) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type handleRelayInvsFlow struct {
|
|
||||||
RelayInvsContext
|
|
||||||
incomingRoute, outgoingRoute *router.Route
|
|
||||||
peer *peerpkg.Peer
|
|
||||||
invsQueue []*appmessage.MsgInvRelayBlock
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
|
||||||
// are missing, adds them to the DAG and propagates them to the rest of the network.
|
|
||||||
func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
|
||||||
peer *peerpkg.Peer) error {
|
|
||||||
|
|
||||||
flow := &handleRelayInvsFlow{
|
|
||||||
RelayInvsContext: context,
|
|
||||||
incomingRoute: incomingRoute,
|
|
||||||
outgoingRoute: outgoingRoute,
|
|
||||||
peer: peer,
|
|
||||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
|
||||||
}
|
|
||||||
err := flow.start()
|
|
||||||
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
|
||||||
close(peer.IBDRequestChannel())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) start() error {
|
|
||||||
for {
|
|
||||||
log.Debugf("Waiting for inv")
|
|
||||||
inv, err := flow.readInv()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Got relay inv for block %s", inv.Hash)
|
|
||||||
|
|
||||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(inv.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
|
||||||
if blockInfo.BlockStatus == externalapi.StatusInvalid {
|
|
||||||
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
|
|
||||||
inv.Hash)
|
|
||||||
}
|
|
||||||
log.Debugf("Block %s already exists. continuing...", inv.Hash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if flow.IsOrphan(inv.Hash) {
|
|
||||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent {
|
|
||||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
|
||||||
"to the recent pruning point before normal operation can resume.", inv.Hash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
|
|
||||||
err := flow.AddOrphanRootsToQueue(inv.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block relay is disabled during IBD
|
|
||||||
if flow.IsIBDRunning() {
|
|
||||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Requesting block %s", inv.Hash)
|
|
||||||
block, exists, err := flow.requestBlock(inv.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if exists {
|
|
||||||
log.Debugf("Aborting requesting block %s because it already exists", inv.Hash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.banIfBlockIsHeaderOnly(block)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) {
|
|
||||||
log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Processing block %s", inv.Hash)
|
|
||||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
|
||||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
|
||||||
log.Infof("Ignoring duplicate block %s", inv.Hash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(missingParents) > 0 {
|
|
||||||
log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents)
|
|
||||||
err := flow.processOrphan(block)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Relaying block %s", inv.Hash)
|
|
||||||
err = flow.relayBlock(block)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
|
||||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
|
||||||
if len(block.Transactions) == 0 {
|
|
||||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
|
||||||
consensushashing.BlockHash(block))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
|
||||||
if len(flow.invsQueue) > 0 {
|
|
||||||
var inv *appmessage.MsgInvRelayBlock
|
|
||||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
|
||||||
return inv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
msg, err := flow.incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
|
||||||
if !ok {
|
|
||||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
|
||||||
"expecting an inv message", msg.Command())
|
|
||||||
}
|
|
||||||
return inv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
|
||||||
exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash)
|
|
||||||
if exists {
|
|
||||||
return nil, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
|
|
||||||
// clean from any pending blocks.
|
|
||||||
defer flow.SharedRequestedBlocks().Remove(requestHash)
|
|
||||||
|
|
||||||
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
|
|
||||||
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
msgBlock, err := flow.readMsgBlock()
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
block := appmessage.MsgBlockToDomainBlock(msgBlock)
|
|
||||||
blockHash := consensushashing.BlockHash(block)
|
|
||||||
if !blockHash.Equal(requestHash) {
|
|
||||||
return nil, false, protocolerrors.Errorf(true, "got unrequested block %s", blockHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
return block, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive.
|
|
||||||
//
|
|
||||||
// Note: this function assumes msgChan can contain only appmessage.MsgInvRelayBlock and appmessage.MsgBlock messages.
|
|
||||||
func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock, err error) {
|
|
||||||
for {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch message := message.(type) {
|
|
||||||
case *appmessage.MsgInvRelayBlock:
|
|
||||||
flow.invsQueue = append(flow.invsQueue, message)
|
|
||||||
case *appmessage.MsgBlock:
|
|
||||||
return message, nil
|
|
||||||
default:
|
|
||||||
return nil, errors.Errorf("unexpected message %s", message.Command())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
|
|
||||||
blockHash := consensushashing.BlockHash(block)
|
|
||||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
|
||||||
if err != nil {
|
|
||||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
|
||||||
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
missingParentsError := &ruleerrors.ErrMissingParents{}
|
|
||||||
if errors.As(err, missingParentsError) {
|
|
||||||
return missingParentsError.MissingParentHashes, nil, nil
|
|
||||||
}
|
|
||||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
|
||||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
|
||||||
}
|
|
||||||
return nil, virtualChangeSet, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
|
||||||
blockHash := consensushashing.BlockHash(block)
|
|
||||||
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) error {
|
|
||||||
blockHash := consensushashing.BlockHash(block)
|
|
||||||
|
|
||||||
// Return if the block has been orphaned from elsewhere already
|
|
||||||
if flow.IsOrphan(blockHash) {
|
|
||||||
log.Debugf("Skipping orphan processing for block %s because it is already an orphan", blockHash)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add the block to the orphan set if it's within orphan resolution range
|
|
||||||
isBlockInOrphanResolutionRange, err := flow.isBlockInOrphanResolutionRange(blockHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if isBlockInOrphanResolutionRange {
|
|
||||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
|
||||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if isGenesisVirtualSelectedParent {
|
|
||||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
|
||||||
"to the recent pruning point before normal operation can resume.", blockHash)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Block %s is within orphan resolution range. "+
|
|
||||||
"Adding it to the orphan set", blockHash)
|
|
||||||
flow.AddOrphan(block)
|
|
||||||
log.Debugf("Requesting block %s missing ancestors", blockHash)
|
|
||||||
return flow.AddOrphanRootsToQueue(blockHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start IBD unless we already are in IBD
|
|
||||||
log.Debugf("Block %s is out of orphan resolution range. "+
|
|
||||||
"Attempting to start IBD against it.", blockHash)
|
|
||||||
|
|
||||||
// Send the block to IBD flow via the IBDRequestChannel.
|
|
||||||
// Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it
|
|
||||||
select {
|
|
||||||
case flow.peer.IBDRequestChannel() <- block:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
|
||||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool {
|
|
||||||
parents := block.Header.DirectParents()
|
|
||||||
return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
|
|
||||||
// retrieved via the unorphaning mechanism or via IBD. This method sends a
|
|
||||||
// getBlockLocator request to the peer with a limit of orphanResolutionRange.
|
|
||||||
// In the response, if we know none of the hashes, we should retrieve the given
|
|
||||||
// blockHash via IBD. Otherwise, via unorphaning.
|
|
||||||
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
|
|
||||||
err := flow.sendGetBlockLocator(blockHash, orphanResolutionRange)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
blockLocatorHashes, err := flow.receiveBlockLocator()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
for _, blockLocatorHash := range blockLocatorHashes {
|
|
||||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockLocatorHash)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.DomainHash) error {
|
|
||||||
orphanRoots, orphanExists, err := flow.GetOrphanRoots(orphan)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !orphanExists {
|
|
||||||
log.Infof("Orphan block %s was missing from the orphan pool while requesting for its roots. This "+
|
|
||||||
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
|
||||||
|
|
||||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
|
||||||
for i, root := range orphanRoots {
|
|
||||||
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
|
||||||
invMessages[i] = appmessage.NewMsgInvBlock(root)
|
|
||||||
}
|
|
||||||
|
|
||||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,75 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RequestBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
|
|
||||||
type RequestBlockLocatorContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
type handleRequestBlockLocatorFlow struct {
|
|
||||||
RequestBlockLocatorContext
|
|
||||||
incomingRoute, outgoingRoute *router.Route
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleRequestBlockLocator handles getBlockLocator messages
|
|
||||||
func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute *router.Route,
|
|
||||||
outgoingRoute *router.Route) error {
|
|
||||||
|
|
||||||
flow := &handleRequestBlockLocatorFlow{
|
|
||||||
RequestBlockLocatorContext: context,
|
|
||||||
incomingRoute: incomingRoute,
|
|
||||||
outgoingRoute: outgoingRoute,
|
|
||||||
}
|
|
||||||
return flow.start()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestBlockLocatorFlow) start() error {
|
|
||||||
for {
|
|
||||||
highHash, limit, err := flow.receiveGetBlockLocator()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("Received getBlockLocator with highHash: %s, limit: %d", highHash, limit)
|
|
||||||
|
|
||||||
locator, err := flow.Domain().Consensus().CreateBlockLocatorFromPruningPoint(highHash, limit)
|
|
||||||
if err != nil || len(locator) == 0 {
|
|
||||||
if err != nil {
|
|
||||||
log.Debugf("Received error from CreateBlockLocatorFromPruningPoint: %s", err)
|
|
||||||
}
|
|
||||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
|
||||||
"locator between the pruning point and %s", highHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.sendBlockLocator(locator)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (highHash *externalapi.DomainHash, limit uint32, err error) {
|
|
||||||
|
|
||||||
message, err := flow.incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
|
|
||||||
|
|
||||||
return msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {
|
|
||||||
msgBlockLocator := appmessage.NewMsgBlockLocator(locator)
|
|
||||||
err := flow.outgoingRoute.Enqueue(msgBlockLocator)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,140 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HandleRequestPruningPointUTXOSetContext is the interface for the context needed for the HandleRequestPruningPointUTXOSet flow.
|
|
||||||
type HandleRequestPruningPointUTXOSetContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
type handleRequestPruningPointUTXOSetFlow struct {
|
|
||||||
HandleRequestPruningPointUTXOSetContext
|
|
||||||
incomingRoute, outgoingRoute *router.Route
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleRequestPruningPointUTXOSet listens to appmessage.MsgRequestPruningPointUTXOSet messages and sends
|
|
||||||
// the pruning point UTXO set and block body.
|
|
||||||
func HandleRequestPruningPointUTXOSet(context HandleRequestPruningPointUTXOSetContext, incomingRoute,
|
|
||||||
outgoingRoute *router.Route) error {
|
|
||||||
|
|
||||||
flow := &handleRequestPruningPointUTXOSetFlow{
|
|
||||||
HandleRequestPruningPointUTXOSetContext: context,
|
|
||||||
incomingRoute: incomingRoute,
|
|
||||||
outgoingRoute: outgoingRoute,
|
|
||||||
}
|
|
||||||
|
|
||||||
return flow.start()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestPruningPointUTXOSetFlow) start() error {
|
|
||||||
for {
|
|
||||||
msgRequestPruningPointUTXOSet, err := flow.waitForRequestPruningPointUTXOSetMessages()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.handleRequestPruningPointUTXOSetMessage(msgRequestPruningPointUTXOSet)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestPruningPointUTXOSetFlow) handleRequestPruningPointUTXOSetMessage(
|
|
||||||
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
|
||||||
|
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetFlow")
|
|
||||||
defer onEnd()
|
|
||||||
|
|
||||||
log.Debugf("Got request for pruning point UTXO set")
|
|
||||||
|
|
||||||
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSet)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXOSetMessages() (
|
|
||||||
*appmessage.MsgRequestPruningPointUTXOSet, error) {
|
|
||||||
|
|
||||||
message, err := flow.incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
|
|
||||||
if !ok {
|
|
||||||
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
|
||||||
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
|
|
||||||
}
|
|
||||||
return msgRequestPruningPointUTXOSet, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
|
|
||||||
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
|
||||||
|
|
||||||
// Send the UTXO set in `step`-sized chunks
|
|
||||||
const step = 1000
|
|
||||||
var fromOutpoint *externalapi.DomainOutpoint
|
|
||||||
chunksSent := 0
|
|
||||||
for {
|
|
||||||
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
|
|
||||||
msgRequestPruningPointUTXOSet.PruningPointHash, fromOutpoint, step)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
|
|
||||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Retrieved %d UTXOs for pruning block %s",
|
|
||||||
len(pruningPointUTXOs), msgRequestPruningPointUTXOSet.PruningPointHash)
|
|
||||||
|
|
||||||
outpointAndUTXOEntryPairs :=
|
|
||||||
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
|
|
||||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
finished := len(pruningPointUTXOs) < step
|
|
||||||
if finished && chunksSent%ibdBatchSize != 0 {
|
|
||||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
|
||||||
msgRequestPruningPointUTXOSet.PruningPointHash)
|
|
||||||
|
|
||||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(pruningPointUTXOs) > 0 {
|
|
||||||
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
|
|
||||||
}
|
|
||||||
chunksSent++
|
|
||||||
|
|
||||||
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
|
|
||||||
if chunksSent%ibdBatchSize == 0 {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
|
|
||||||
if !ok {
|
|
||||||
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
|
||||||
return protocolerrors.Errorf(false, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
|
|
||||||
}
|
|
||||||
|
|
||||||
if finished {
|
|
||||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
|
||||||
msgRequestPruningPointUTXOSet.PruningPointHash)
|
|
||||||
|
|
||||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,577 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IBDContext is the interface for the context needed for the HandleIBD flow.
|
|
||||||
type IBDContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
Config() *config.Config
|
|
||||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
|
||||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
|
||||||
OnPruningPointUTXOSetOverride() error
|
|
||||||
IsIBDRunning() bool
|
|
||||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
|
||||||
UnsetIBDRunning()
|
|
||||||
IsRecoverableError(err error) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type handleIBDFlow struct {
|
|
||||||
IBDContext
|
|
||||||
incomingRoute, outgoingRoute *router.Route
|
|
||||||
peer *peerpkg.Peer
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleIBD handles IBD
|
|
||||||
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
|
||||||
peer *peerpkg.Peer) error {
|
|
||||||
|
|
||||||
flow := &handleIBDFlow{
|
|
||||||
IBDContext: context,
|
|
||||||
incomingRoute: incomingRoute,
|
|
||||||
outgoingRoute: outgoingRoute,
|
|
||||||
peer: peer,
|
|
||||||
}
|
|
||||||
return flow.start()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) start() error {
|
|
||||||
for {
|
|
||||||
// Wait for IBD requests triggered by other flows
|
|
||||||
block, ok := <-flow.peer.IBDRequestChannel()
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
err := flow.runIBDIfNotRunning(block)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
|
||||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
|
||||||
if !wasIBDNotRunning {
|
|
||||||
log.Debugf("IBD is already running")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
isFinishedSuccessfully := false
|
|
||||||
defer func() {
|
|
||||||
flow.UnsetIBDRunning()
|
|
||||||
flow.logIBDFinished(isFinishedSuccessfully)
|
|
||||||
}()
|
|
||||||
|
|
||||||
highHash := consensushashing.BlockHash(block)
|
|
||||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
|
||||||
log.Debugf("Syncing blocks up to %s", highHash)
|
|
||||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
|
||||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
|
||||||
|
|
||||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !shouldSync {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if shouldDownloadHeadersProof {
|
|
||||||
log.Infof("Starting IBD with headers proof")
|
|
||||||
err := flow.ibdWithHeadersProof(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
|
||||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if isGenesisVirtualSelectedParent {
|
|
||||||
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
|
||||||
"to the recent pruning point before normal operation can resume.", highHash)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.syncMissingBlockBodies(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Finished syncing blocks up to %s", highHash)
|
|
||||||
isFinishedSuccessfully = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
|
||||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
|
||||||
successString := "successfully"
|
|
||||||
if !isFinishedSuccessfully {
|
|
||||||
successString = "(interrupted)"
|
|
||||||
}
|
|
||||||
log.Infof("IBD finished %s", successString)
|
|
||||||
}
|
|
||||||
|
|
||||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
|
||||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
|
||||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
|
||||||
func (flow *handleIBDFlow) findHighestSharedBlockHash(
|
|
||||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
|
||||||
|
|
||||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
|
||||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
if !highestHashFound {
|
|
||||||
return nil, false, nil
|
|
||||||
}
|
|
||||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if highestHashIndex == 0 ||
|
|
||||||
// If the block locator contains only two adjacent chain blocks, the
|
|
||||||
// syncer will always find the same highest chain block, so to avoid
|
|
||||||
// an endless loop, we explicitly stop the loop in such situation.
|
|
||||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
|
||||||
|
|
||||||
return highestHash, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
locatorHashAboveHighestHash := highestHash
|
|
||||||
if highestHashIndex > 0 {
|
|
||||||
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
|
||||||
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
|
|
||||||
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
|
|
||||||
"restarting with full block locator")
|
|
||||||
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return blockLocator, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) findHighestHashIndex(
|
|
||||||
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
|
|
||||||
|
|
||||||
highestHashIndex := 0
|
|
||||||
highestHashIndexFound := false
|
|
||||||
for i, blockLocatorHash := range blockLocator {
|
|
||||||
if highestHash.Equal(blockLocatorHash) {
|
|
||||||
highestHashIndex = i
|
|
||||||
highestHashIndexFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !highestHashIndexFound {
|
|
||||||
return 0, protocolerrors.Errorf(true, "highest hash %s "+
|
|
||||||
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
|
|
||||||
}
|
|
||||||
log.Debugf("The index of the highest hash in the original "+
|
|
||||||
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
|
|
||||||
|
|
||||||
return highestHashIndex, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
|
||||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
|
||||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
|
||||||
func (flow *handleIBDFlow) fetchHighestHash(
|
|
||||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
|
||||||
|
|
||||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
|
||||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
switch message := message.(type) {
|
|
||||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
|
||||||
highestHash := message.HighestHash
|
|
||||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
|
||||||
|
|
||||||
return highestHash, true, nil
|
|
||||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
|
||||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
|
||||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
|
||||||
return nil, false, nil
|
|
||||||
default:
|
|
||||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
|
|
||||||
highHash *externalapi.DomainHash) error {
|
|
||||||
|
|
||||||
log.Infof("Downloading headers from %s", flow.peer)
|
|
||||||
|
|
||||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep a short queue of BlockHeadersMessages so that there's
|
|
||||||
// never a moment when the node is not validating and inserting
|
|
||||||
// headers
|
|
||||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
|
||||||
errChan := make(chan error)
|
|
||||||
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
|
|
||||||
for {
|
|
||||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if doneIBD {
|
|
||||||
close(blockHeadersMessageChan)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
blockHeadersMessageChan <- blockHeadersMessage
|
|
||||||
|
|
||||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
|
||||||
if !ok {
|
|
||||||
// If the highHash has not been received, the peer is misbehaving
|
|
||||||
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !highHashBlockInfo.Exists {
|
|
||||||
return protocolerrors.Errorf(true, "did not receive "+
|
|
||||||
"highHash block %s from peer %s during block download", highHash, flow.peer)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
|
||||||
err = flow.processHeader(consensus, header)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case err := <-errChan:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
|
||||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
|
||||||
|
|
||||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
|
||||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
switch message := message.(type) {
|
|
||||||
case *appmessage.BlockHeadersMessage:
|
|
||||||
return message, false, nil
|
|
||||||
case *appmessage.MsgDoneHeaders:
|
|
||||||
return nil, true, nil
|
|
||||||
default:
|
|
||||||
return nil, false,
|
|
||||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s or %s, got: %s",
|
|
||||||
appmessage.CmdBlockHeaders,
|
|
||||||
appmessage.CmdDoneHeaders,
|
|
||||||
message.Command())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
|
|
||||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
|
||||||
block := &externalapi.DomainBlock{
|
|
||||||
Header: header,
|
|
||||||
Transactions: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
blockHash := consensushashing.BlockHash(block)
|
|
||||||
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if blockInfo.Exists {
|
|
||||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
|
||||||
if err != nil {
|
|
||||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
|
||||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
|
||||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
|
||||||
} else {
|
|
||||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
|
||||||
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
|
|
||||||
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
|
|
||||||
|
|
||||||
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
|
|
||||||
|
|
||||||
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
|
|
||||||
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
|
|
||||||
"tip is smaller than the current selected tip")
|
|
||||||
}
|
|
||||||
|
|
||||||
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
|
|
||||||
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
|
|
||||||
return protocolerrors.Errorf(false, "difference between the timestamps of "+
|
|
||||||
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
|
||||||
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
|
|
||||||
|
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
|
||||||
defer onEnd()
|
|
||||||
|
|
||||||
receivedChunkCount := 0
|
|
||||||
receivedUTXOCount := 0
|
|
||||||
for {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch message := message.(type) {
|
|
||||||
case *appmessage.MsgPruningPointUTXOSetChunk:
|
|
||||||
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
|
||||||
domainOutpointAndUTXOEntryPairs :=
|
|
||||||
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
|
||||||
|
|
||||||
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
receivedChunkCount++
|
|
||||||
if receivedChunkCount%ibdBatchSize == 0 {
|
|
||||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
|
||||||
receivedChunkCount, receivedUTXOCount)
|
|
||||||
|
|
||||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
|
||||||
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
|
||||||
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
|
||||||
return true, nil
|
|
||||||
|
|
||||||
case *appmessage.MsgUnexpectedPruningPoint:
|
|
||||||
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
|
|
||||||
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
|
||||||
return false, nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
|
||||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
|
||||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(hashes) == 0 {
|
|
||||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
|
||||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
|
||||||
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
|
||||||
log.Debugf("No missing block body hashes found.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
|
||||||
var hashesToRequest []*externalapi.DomainHash
|
|
||||||
if offset+ibdBatchSize < len(hashes) {
|
|
||||||
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
|
||||||
} else {
|
|
||||||
hashesToRequest = hashes[offset:]
|
|
||||||
}
|
|
||||||
|
|
||||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, expectedHash := range hashesToRequest {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
|
||||||
if !ok {
|
|
||||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
|
||||||
}
|
|
||||||
|
|
||||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
|
||||||
blockHash := consensushashing.BlockHash(block)
|
|
||||||
if !expectedHash.Equal(blockHash) {
|
|
||||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.banIfBlockIsHeaderOnly(block)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
|
||||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
|
||||||
}
|
|
||||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return flow.resolveVirtual()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
|
||||||
if len(block.Transactions) == 0 {
|
|
||||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
|
||||||
consensushashing.BlockHash(block))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleIBDFlow) resolveVirtual() error {
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
if i%10 == 0 {
|
|
||||||
log.Infof("Resolving virtual. This may take some time...")
|
|
||||||
}
|
|
||||||
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.OnVirtualChange(virtualChangeSet)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if isCompletelyResolved {
|
|
||||||
log.Infof("Resolved virtual")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,9 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
|
||||||
"github.com/kaspanet/kaspad/util/panics"
|
|
||||||
)
|
|
||||||
|
|
||||||
var log = logger.RegisterSubSystem("PROT")
|
|
||||||
var spawn = panics.GoroutineWrapperFunc(log)
|
|
@ -1,35 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SendVirtualSelectedParentInvContext is the interface for the context needed for the SendVirtualSelectedParentInv flow.
|
|
||||||
type SendVirtualSelectedParentInvContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
Config() *config.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendVirtualSelectedParentInv sends a peer the selected parent hash of the virtual
|
|
||||||
func SendVirtualSelectedParentInv(context SendVirtualSelectedParentInvContext,
|
|
||||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
|
||||||
|
|
||||||
virtualSelectedParent, err := context.Domain().Consensus().GetVirtualSelectedParent()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if virtualSelectedParent.Equal(context.Config().NetParams().GenesisHash) {
|
|
||||||
log.Debugf("Skipping sending the virtual selected parent hash to peer %s because it's the genesis", peer)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Sending virtual selected parent hash %s to peer %s", virtualSelectedParent, peer)
|
|
||||||
|
|
||||||
virtualSelectedParentInv := appmessage.NewMsgInvBlock(virtualSelectedParent)
|
|
||||||
return outgoingRoute.Enqueue(virtualSelectedParentInv)
|
|
||||||
}
|
|
16
app/protocol/flows/v5/blockrelay/batch_size_test.go
Normal file
16
app/protocol/flows/v5/blockrelay/batch_size_test.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
|
||||||
|
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
|
||||||
|
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
|
||||||
|
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
|
||||||
|
if ibdBatchSize >= router.DefaultMaxMessages {
|
||||||
|
t.Fatalf("IBD batch size (%d) must be smaller than router.DefaultMaxMessages (%d)",
|
||||||
|
ibdBatchSize, router.DefaultMaxMessages)
|
||||||
|
}
|
||||||
|
}
|
@ -21,7 +21,7 @@ func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*ex
|
|||||||
|
|
||||||
switch message := message.(type) {
|
switch message := message.(type) {
|
||||||
case *appmessage.MsgInvRelayBlock:
|
case *appmessage.MsgInvRelayBlock:
|
||||||
flow.invsQueue = append(flow.invsQueue, message)
|
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||||
case *appmessage.MsgBlockLocator:
|
case *appmessage.MsgBlockLocator:
|
||||||
return message.BlockLocatorHashes, nil
|
return message.BlockLocatorHashes, nil
|
||||||
default:
|
default:
|
@ -5,7 +5,6 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -34,7 +33,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !blockInfo.Exists {
|
if !blockInfo.HasHeader() {
|
||||||
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
|
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
|
||||||
"with an unknown targetHash %s", targetHash)
|
"with an unknown targetHash %s", targetHash)
|
||||||
}
|
}
|
||||||
@ -47,7 +46,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The IBD block locator is checking only existing blocks with bodies.
|
// The IBD block locator is checking only existing blocks with bodies.
|
||||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
if !blockInfo.HasBody() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -4,7 +4,6 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@ -28,18 +27,15 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
|||||||
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
|
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
|
||||||
for i, hash := range msgRequestIBDBlocks.Hashes {
|
for i, hash := range msgRequestIBDBlocks.Hashes {
|
||||||
// Fetch the block from the database.
|
// Fetch the block from the database.
|
||||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
block, found, err := context.Domain().Consensus().GetBlock(hash)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
|
||||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
|
||||||
}
|
|
||||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return protocolerrors.Errorf(false, "IBD block %s not found", hash)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO (Partial nodes): Convert block to partial block if needed
|
// TODO (Partial nodes): Convert block to partial block if needed
|
||||||
|
|
||||||
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
@ -0,0 +1,85 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RequestIBDChainBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
|
||||||
|
type RequestIBDChainBlockLocatorContext interface {
|
||||||
|
Domain() domain.Domain
|
||||||
|
}
|
||||||
|
|
||||||
|
type handleRequestIBDChainBlockLocatorFlow struct {
|
||||||
|
RequestIBDChainBlockLocatorContext
|
||||||
|
incomingRoute, outgoingRoute *router.Route
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleRequestIBDChainBlockLocator handles getBlockLocator messages
|
||||||
|
func HandleRequestIBDChainBlockLocator(context RequestIBDChainBlockLocatorContext, incomingRoute *router.Route,
|
||||||
|
outgoingRoute *router.Route) error {
|
||||||
|
|
||||||
|
flow := &handleRequestIBDChainBlockLocatorFlow{
|
||||||
|
RequestIBDChainBlockLocatorContext: context,
|
||||||
|
incomingRoute: incomingRoute,
|
||||||
|
outgoingRoute: outgoingRoute,
|
||||||
|
}
|
||||||
|
return flow.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestIBDChainBlockLocatorFlow) start() error {
|
||||||
|
for {
|
||||||
|
highHash, lowHash, err := flow.receiveRequestIBDChainBlockLocator()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("Received getIBDChainBlockLocator with highHash: %s, lowHash: %s", highHash, lowHash)
|
||||||
|
|
||||||
|
var locator externalapi.BlockLocator
|
||||||
|
if highHash == nil || lowHash == nil {
|
||||||
|
locator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||||
|
} else {
|
||||||
|
locator, err = flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||||
|
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||||
|
// The chain has been modified, signal it by sending an empty locator
|
||||||
|
locator, err = externalapi.BlockLocator{}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("Received error from CreateHeadersSelectedChainBlockLocator: %s", err)
|
||||||
|
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||||
|
"locator between %s and %s", lowHash, highHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.sendIBDChainBlockLocator(locator)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestIBDChainBlockLocatorFlow) receiveRequestIBDChainBlockLocator() (highHash, lowHash *externalapi.DomainHash, err error) {
|
||||||
|
|
||||||
|
message, err := flow.incomingRoute.Dequeue()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
msgGetBlockLocator := message.(*appmessage.MsgRequestIBDChainBlockLocator)
|
||||||
|
|
||||||
|
return msgGetBlockLocator.HighHash, msgGetBlockLocator.LowHash, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestIBDChainBlockLocatorFlow) sendIBDChainBlockLocator(locator externalapi.BlockLocator) error {
|
||||||
|
msgIBDChainBlockLocator := appmessage.NewMsgIBDChainBlockLocator(locator)
|
||||||
|
err := flow.outgoingRoute.Enqueue(msgIBDChainBlockLocator)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@ -118,16 +118,33 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, blockHash := range pointAndItsAnticone {
|
for i, blockHash := range pointAndItsAnticone {
|
||||||
block, err := context.Domain().Consensus().GetBlock(blockHash)
|
block, found, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return protocolerrors.Errorf(false, "pruning point anticone block %s not found", blockHash)
|
||||||
|
}
|
||||||
|
|
||||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
|
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (i+1)%ibdBatchSize == 0 {
|
||||||
|
// No timeout here, as we don't care if the syncee takes its time computing,
|
||||||
|
// since it only blocks this dedicated flow
|
||||||
|
message, err := incomingRoute.Dequeue()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, ok := message.(*appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks); !ok {
|
||||||
|
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks, message.Command())
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
@ -5,7 +5,6 @@ import (
|
|||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@ -29,18 +28,15 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
|
|||||||
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
|
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
|
||||||
for _, hash := range getRelayBlocksMessage.Hashes {
|
for _, hash := range getRelayBlocksMessage.Hashes {
|
||||||
// Fetch the block from the database.
|
// Fetch the block from the database.
|
||||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
block, found, err := context.Domain().Consensus().GetBlock(hash)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
|
||||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
|
||||||
}
|
|
||||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return protocolerrors.Errorf(false, "Relay block %s not found", hash)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO (Partial nodes): Convert block to partial block if needed
|
// TODO (Partial nodes): Convert block to partial block if needed
|
||||||
|
|
||||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
@ -7,9 +7,11 @@ import (
|
|||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -24,8 +26,8 @@ var orphanResolutionRange uint32 = 5
|
|||||||
type RelayInvsContext interface {
|
type RelayInvsContext interface {
|
||||||
Domain() domain.Domain
|
Domain() domain.Domain
|
||||||
Config() *config.Config
|
Config() *config.Config
|
||||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
OnNewBlock(block *externalapi.DomainBlock) error
|
||||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
OnNewBlockTemplate() error
|
||||||
OnPruningPointUTXOSetOverride() error
|
OnPruningPointUTXOSetOverride() error
|
||||||
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
||||||
Broadcast(message appmessage.Message) error
|
Broadcast(message appmessage.Message) error
|
||||||
@ -34,13 +36,19 @@ type RelayInvsContext interface {
|
|||||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||||
IsIBDRunning() bool
|
IsIBDRunning() bool
|
||||||
IsRecoverableError(err error) bool
|
IsRecoverableError(err error) bool
|
||||||
|
IsNearlySynced() (bool, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type invRelayBlock struct {
|
||||||
|
Hash *externalapi.DomainHash
|
||||||
|
IsOrphanRoot bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type handleRelayInvsFlow struct {
|
type handleRelayInvsFlow struct {
|
||||||
RelayInvsContext
|
RelayInvsContext
|
||||||
incomingRoute, outgoingRoute *router.Route
|
incomingRoute, outgoingRoute *router.Route
|
||||||
peer *peerpkg.Peer
|
peer *peerpkg.Peer
|
||||||
invsQueue []*appmessage.MsgInvRelayBlock
|
invsQueue []invRelayBlock
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
||||||
@ -53,7 +61,7 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
|
|||||||
incomingRoute: incomingRoute,
|
incomingRoute: incomingRoute,
|
||||||
outgoingRoute: outgoingRoute,
|
outgoingRoute: outgoingRoute,
|
||||||
peer: peer,
|
peer: peer,
|
||||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
invsQueue: make([]invRelayBlock, 0),
|
||||||
}
|
}
|
||||||
err := flow.start()
|
err := flow.start()
|
||||||
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
||||||
@ -104,11 +112,17 @@ func (flow *handleRelayInvsFlow) start() error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Block relay is disabled during IBD
|
// Block relay is disabled if the node is already during IBD AND considered out of sync
|
||||||
if flow.IsIBDRunning() {
|
if flow.IsIBDRunning() {
|
||||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
isNearlySynced, err := flow.IsNearlySynced()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !isNearlySynced {
|
||||||
|
log.Debugf("Got block %s while in IBD and the node is out of sync. Continuing...", inv.Hash)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Debugf("Requesting block %s", inv.Hash)
|
log.Debugf("Requesting block %s", inv.Hash)
|
||||||
block, exists, err := flow.requestBlock(inv.Hash)
|
block, exists, err := flow.requestBlock(inv.Hash)
|
||||||
@ -130,8 +144,36 @@ func (flow *handleRelayInvsFlow) start() error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note we do not apply the heuristic below if inv was queued as an orphan root, since
|
||||||
|
// that means the process started by a proper and relevant relay block
|
||||||
|
if !inv.IsOrphanRoot {
|
||||||
|
// Check bounded merge depth to avoid requesting irrelevant data which cannot be merged under virtual
|
||||||
|
virtualMergeDepthRoot, err := flow.Domain().Consensus().VirtualMergeDepthRoot()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !virtualMergeDepthRoot.Equal(model.VirtualGenesisBlockHash) {
|
||||||
|
mergeDepthRootHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualMergeDepthRoot)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Since `BlueWork` respects topology, this condition means that the relay
|
||||||
|
// block is not in the future of virtual's merge depth root, and thus cannot be merged unless
|
||||||
|
// other valid blocks Kosherize it, in which case it will be obtained once the merger is relayed
|
||||||
|
if block.Header.BlueWork().Cmp(mergeDepthRootHeader.BlueWork()) <= 0 {
|
||||||
|
log.Debugf("Block %s has lower blue work than virtual's merge root %s (%d <= %d), hence we are skipping it",
|
||||||
|
inv.Hash, virtualMergeDepthRoot, block.Header.BlueWork(), mergeDepthRootHeader.BlueWork())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Debugf("Processing block %s", inv.Hash)
|
log.Debugf("Processing block %s", inv.Hash)
|
||||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
missingParents, err := flow.processBlock(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||||
@ -153,13 +195,48 @@ func (flow *handleRelayInvsFlow) start() error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Relaying block %s", inv.Hash)
|
oldVirtualParents := hashset.New()
|
||||||
|
for _, parent := range oldVirtualInfo.ParentHashes {
|
||||||
|
oldVirtualParents.Add(parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
virtualHasNewParents := false
|
||||||
|
for _, parent := range newVirtualInfo.ParentHashes {
|
||||||
|
if oldVirtualParents.Contains(parent) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
virtualHasNewParents = true
|
||||||
|
block, found, err := flow.Domain().Consensus().GetBlock(parent)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return protocolerrors.Errorf(false, "Virtual parent %s not found", parent)
|
||||||
|
}
|
||||||
|
blockHash := consensushashing.BlockHash(block)
|
||||||
|
log.Debugf("Relaying block %s", blockHash)
|
||||||
err = flow.relayBlock(block)
|
err = flow.relayBlock(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if virtualHasNewParents {
|
||||||
|
log.Debugf("Virtual %d has new parents, raising new block template event", newVirtualInfo.DAAScore)
|
||||||
|
err = flow.OnNewBlockTemplate()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
err = flow.OnNewBlock(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -175,24 +252,24 @@ func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.Domai
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
func (flow *handleRelayInvsFlow) readInv() (invRelayBlock, error) {
|
||||||
if len(flow.invsQueue) > 0 {
|
if len(flow.invsQueue) > 0 {
|
||||||
var inv *appmessage.MsgInvRelayBlock
|
var inv invRelayBlock
|
||||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||||
return inv, nil
|
return inv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
msg, err := flow.incomingRoute.Dequeue()
|
msg, err := flow.incomingRoute.Dequeue()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return invRelayBlock{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
msgInv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
return invRelayBlock{}, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||||
"expecting an inv message", msg.Command())
|
"expecting an inv message", msg.Command())
|
||||||
}
|
}
|
||||||
return inv, nil
|
return invRelayBlock{Hash: msgInv.Hash, IsOrphanRoot: false}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||||
@ -237,7 +314,7 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
|||||||
|
|
||||||
switch message := message.(type) {
|
switch message := message.(type) {
|
||||||
case *appmessage.MsgInvRelayBlock:
|
case *appmessage.MsgInvRelayBlock:
|
||||||
flow.invsQueue = append(flow.invsQueue, message)
|
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||||
case *appmessage.MsgBlock:
|
case *appmessage.MsgBlock:
|
||||||
return message, nil
|
return message, nil
|
||||||
default:
|
default:
|
||||||
@ -246,22 +323,25 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
|
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, error) {
|
||||||
blockHash := consensushashing.BlockHash(block)
|
blockHash := consensushashing.BlockHash(block)
|
||||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||||
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
return nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
missingParentsError := &ruleerrors.ErrMissingParents{}
|
missingParentsError := &ruleerrors.ErrMissingParents{}
|
||||||
if errors.As(err, missingParentsError) {
|
if errors.As(err, missingParentsError) {
|
||||||
return missingParentsError.MissingParentHashes, nil, nil
|
return missingParentsError.MissingParentHashes, nil
|
||||||
}
|
}
|
||||||
|
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
|
||||||
|
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
|
||||||
}
|
}
|
||||||
return nil, virtualChangeSet, nil
|
return nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
||||||
@ -369,12 +449,16 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
|
|||||||
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(orphanRoots) == 0 {
|
||||||
|
// In some rare cases we get here when there are no orphan roots already
|
||||||
|
return nil
|
||||||
|
}
|
||||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
||||||
|
|
||||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
invMessages := make([]invRelayBlock, len(orphanRoots))
|
||||||
for i, root := range orphanRoots {
|
for i, root := range orphanRoots {
|
||||||
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
||||||
invMessages[i] = appmessage.NewMsgInvBlock(root)
|
invMessages[i] = invRelayBlock{Hash: root, IsOrphanRoot: true}
|
||||||
}
|
}
|
||||||
|
|
||||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
95
app/protocol/flows/v5/blockrelay/handle_request_anticone.go
Normal file
95
app/protocol/flows/v5/blockrelay/handle_request_anticone.go
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RequestAnticoneContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||||
|
type RequestAnticoneContext interface {
|
||||||
|
Domain() domain.Domain
|
||||||
|
Config() *config.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
type handleRequestAnticoneFlow struct {
|
||||||
|
RequestAnticoneContext
|
||||||
|
incomingRoute, outgoingRoute *router.Route
|
||||||
|
peer *peer.Peer
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleRequestAnticone handles RequestAnticone messages
|
||||||
|
func HandleRequestAnticone(context RequestAnticoneContext, incomingRoute *router.Route,
|
||||||
|
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||||
|
|
||||||
|
flow := &handleRequestAnticoneFlow{
|
||||||
|
RequestAnticoneContext: context,
|
||||||
|
incomingRoute: incomingRoute,
|
||||||
|
outgoingRoute: outgoingRoute,
|
||||||
|
peer: peer,
|
||||||
|
}
|
||||||
|
return flow.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestAnticoneFlow) start() error {
|
||||||
|
for {
|
||||||
|
blockHash, contextHash, err := receiveRequestAnticone(flow.incomingRoute)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("Received requestAnticone with blockHash: %s, contextHash: %s", blockHash, contextHash)
|
||||||
|
log.Debugf("Getting past(%s) cap anticone(%s) for peer %s", contextHash, blockHash, flow.peer)
|
||||||
|
|
||||||
|
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
|
||||||
|
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
|
||||||
|
// we relay blocks only if they enter virtual's mergeset. We add a 2 factor for possible sync gaps.
|
||||||
|
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
|
||||||
|
flow.Config().ActiveNetParams.MergeSetSizeLimit*2)
|
||||||
|
if err != nil {
|
||||||
|
return protocolerrors.Wrap(true, err, "Failed querying anticone")
|
||||||
|
}
|
||||||
|
log.Debugf("Got %d header hashes in past(%s) cap anticone(%s)", len(blockHashes), contextHash, blockHash)
|
||||||
|
|
||||||
|
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||||
|
for i, blockHash := range blockHashes {
|
||||||
|
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We sort the headers in bottom-up topological order before sending
|
||||||
|
sort.Slice(blockHeaders, func(i, j int) bool {
|
||||||
|
return blockHeaders[i].BlueWork.Cmp(blockHeaders[j].BlueWork) < 0
|
||||||
|
})
|
||||||
|
|
||||||
|
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
|
||||||
|
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func receiveRequestAnticone(incomingRoute *router.Route) (blockHash *externalapi.DomainHash,
|
||||||
|
contextHash *externalapi.DomainHash, err error) {
|
||||||
|
|
||||||
|
message, err := incomingRoute.Dequeue()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
msgRequestAnticone := message.(*appmessage.MsgRequestAnticone)
|
||||||
|
|
||||||
|
return msgRequestAnticone.BlockHash, msgRequestAnticone.ContextHash, nil
|
||||||
|
}
|
@ -10,7 +10,9 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
)
|
)
|
||||||
|
|
||||||
const ibdBatchSize = router.DefaultMaxMessages
|
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
|
||||||
|
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
|
||||||
|
const ibdBatchSize = 99
|
||||||
|
|
||||||
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||||
type RequestHeadersContext interface {
|
type RequestHeadersContext interface {
|
||||||
@ -42,7 +44,34 @@ func (flow *handleRequestHeadersFlow) start() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||||
|
|
||||||
|
consensus := flow.Domain().Consensus()
|
||||||
|
|
||||||
|
lowHashInfo, err := consensus.GetBlockInfo(lowHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !lowHashInfo.HasHeader() {
|
||||||
|
return protocolerrors.Errorf(true, "Block %s does not exist", lowHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
highHashInfo, err := consensus.GetBlockInfo(highHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !highHashInfo.HasHeader() {
|
||||||
|
return protocolerrors.Errorf(true, "Block %s does not exist", highHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
isLowSelectedAncestorOfHigh, err := consensus.IsInSelectedParentChainOf(lowHash, highHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !isLowSelectedAncestorOfHigh {
|
||||||
|
return protocolerrors.Errorf(true, "Expected %s to be on the selected chain of %s",
|
||||||
|
lowHash, highHash)
|
||||||
|
}
|
||||||
|
|
||||||
for !lowHash.Equal(highHash) {
|
for !lowHash.Equal(highHash) {
|
||||||
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
||||||
@ -51,7 +80,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
|||||||
// in order to avoid locking the consensus for too long
|
// in order to avoid locking the consensus for too long
|
||||||
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
||||||
const maxBlocks = 1 << 10
|
const maxBlocks = 1 << 10
|
||||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
|
blockHashes, _, err := consensus.GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -59,7 +88,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
|||||||
|
|
||||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||||
for i, blockHash := range blockHashes {
|
for i, blockHash := range blockHashes {
|
||||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
blockHeader, err := consensus.GetBlockHeader(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
751
app/protocol/flows/v5/blockrelay/ibd.go
Normal file
751
app/protocol/flows/v5/blockrelay/ibd.go
Normal file
@ -0,0 +1,751 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||||
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IBDContext is the interface for the context needed for the HandleIBD flow.
|
||||||
|
type IBDContext interface {
|
||||||
|
Domain() domain.Domain
|
||||||
|
Config() *config.Config
|
||||||
|
OnNewBlock(block *externalapi.DomainBlock) error
|
||||||
|
OnNewBlockTemplate() error
|
||||||
|
OnPruningPointUTXOSetOverride() error
|
||||||
|
IsIBDRunning() bool
|
||||||
|
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||||
|
UnsetIBDRunning()
|
||||||
|
IsRecoverableError(err error) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type handleIBDFlow struct {
|
||||||
|
IBDContext
|
||||||
|
incomingRoute, outgoingRoute *router.Route
|
||||||
|
peer *peerpkg.Peer
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleIBD handles IBD
|
||||||
|
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||||
|
peer *peerpkg.Peer) error {
|
||||||
|
|
||||||
|
flow := &handleIBDFlow{
|
||||||
|
IBDContext: context,
|
||||||
|
incomingRoute: incomingRoute,
|
||||||
|
outgoingRoute: outgoingRoute,
|
||||||
|
peer: peer,
|
||||||
|
}
|
||||||
|
return flow.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) start() error {
|
||||||
|
for {
|
||||||
|
// Wait for IBD requests triggered by other flows
|
||||||
|
block, ok := <-flow.peer.IBDRequestChannel()
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := flow.runIBDIfNotRunning(block)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
||||||
|
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
||||||
|
if !wasIBDNotRunning {
|
||||||
|
log.Debugf("IBD is already running")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
isFinishedSuccessfully := false
|
||||||
|
var err error
|
||||||
|
defer func() {
|
||||||
|
flow.UnsetIBDRunning()
|
||||||
|
flow.logIBDFinished(isFinishedSuccessfully, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
relayBlockHash := consensushashing.BlockHash(block)
|
||||||
|
|
||||||
|
log.Infof("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
|
||||||
|
log.Infof("Syncing blocks up to %s", relayBlockHash)
|
||||||
|
log.Infof("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
|
||||||
|
|
||||||
|
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(
|
||||||
|
block, highestKnownSyncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !shouldSync {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if shouldDownloadHeadersProof {
|
||||||
|
log.Infof("Starting IBD with headers proof")
|
||||||
|
err = flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||||
|
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGenesisVirtualSelectedParent {
|
||||||
|
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
||||||
|
"to the recent pruning point before normal operation can resume.", relayBlockHash)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.syncPruningPointFutureHeaders(
|
||||||
|
flow.Domain().Consensus(),
|
||||||
|
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash, block.Header.DAAScore())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We start by syncing missing bodies over the syncer selected chain
|
||||||
|
err = flow.syncMissingBlockBodies(syncerHeaderSelectedTipHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
relayBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(relayBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Relay block might be in the anticone of syncer selected tip, thus
|
||||||
|
// check his chain for missing bodies as well.
|
||||||
|
// Note: this operation can be slightly optimized to avoid the full chain search since relay block
|
||||||
|
// is in syncer virtual mergeset which has bounded size.
|
||||||
|
if relayBlockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||||
|
err = flow.syncMissingBlockBodies(relayBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Finished syncing blocks up to %s", relayBlockHash)
|
||||||
|
isFinishedSuccessfully = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) {
|
||||||
|
/*
|
||||||
|
Algorithm:
|
||||||
|
Request full selected chain block locator from syncer
|
||||||
|
Find the highest block which we know
|
||||||
|
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Empty hashes indicate that the full chain is queried
|
||||||
|
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(locatorHashes) == 0 {
|
||||||
|
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||||
|
"to contain at least one element")
|
||||||
|
}
|
||||||
|
log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer,
|
||||||
|
len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||||
|
syncerHeaderSelectedTipHash := locatorHashes[0]
|
||||||
|
var highestKnownSyncerChainHash *externalapi.DomainHash
|
||||||
|
chainNegotiationRestartCounter := 0
|
||||||
|
chainNegotiationZoomCounts := 0
|
||||||
|
initialLocatorLen := len(locatorHashes)
|
||||||
|
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
|
||||||
|
for _, syncerChainHash := range locatorHashes {
|
||||||
|
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if info.Exists {
|
||||||
|
if info.BlockStatus == externalapi.StatusInvalid {
|
||||||
|
return nil, nil, protocolerrors.Errorf(true, "Sent invalid chain block %s", syncerChainHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
isPruningPointOnSyncerChain, err := flow.Domain().Consensus().IsInSelectedParentChainOf(pruningPoint, syncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Error checking isPruningPointOnSyncerChain: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're only interested in syncer chain blocks that have our pruning
|
||||||
|
// point in their selected chain. Otherwise, it means one of the following:
|
||||||
|
// 1) We will not switch the virtual selected chain to the syncers chain since it will violate finality
|
||||||
|
// (hence we can ignore it unless merged by others).
|
||||||
|
// 2) syncerChainHash is actually in the past of our pruning point so there's no
|
||||||
|
// point in syncing from it.
|
||||||
|
if err == nil && isPruningPointOnSyncerChain {
|
||||||
|
currentHighestKnownSyncerChainHash = syncerChainHash
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lowestUnknownSyncerChainHash = syncerChainHash
|
||||||
|
}
|
||||||
|
// No unknown blocks, break. Note this can only happen in the first iteration
|
||||||
|
if lowestUnknownSyncerChainHash == nil {
|
||||||
|
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// No shared block, break
|
||||||
|
if currentHighestKnownSyncerChainHash == nil {
|
||||||
|
highestKnownSyncerChainHash = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// No point in zooming further
|
||||||
|
if len(locatorHashes) == 1 {
|
||||||
|
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Zoom in
|
||||||
|
locatorHashes, err = flow.getSyncerChainBlockLocator(
|
||||||
|
lowestUnknownSyncerChainHash,
|
||||||
|
currentHighestKnownSyncerChainHash, time.Second*10)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(locatorHashes) > 0 {
|
||||||
|
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
|
||||||
|
!locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) {
|
||||||
|
return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+
|
||||||
|
"hashes to match the locator bounds")
|
||||||
|
}
|
||||||
|
|
||||||
|
chainNegotiationZoomCounts++
|
||||||
|
log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||||
|
chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||||
|
|
||||||
|
if len(locatorHashes) == 2 {
|
||||||
|
// We found our search target
|
||||||
|
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if chainNegotiationZoomCounts > initialLocatorLen*2 {
|
||||||
|
// Since the zoom-in always queries two consecutive entries in the previous locator, it is
|
||||||
|
// expected to decrease in size at least every two iterations
|
||||||
|
return nil, nil, protocolerrors.Errorf(true,
|
||||||
|
"IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d",
|
||||||
|
chainNegotiationZoomCounts, initialLocatorLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
} else { // Empty locator signals a restart due to chain changes
|
||||||
|
chainNegotiationZoomCounts = 0
|
||||||
|
chainNegotiationRestartCounter++
|
||||||
|
if chainNegotiationRestartCounter > 32 {
|
||||||
|
return nil, nil, protocolerrors.Errorf(false,
|
||||||
|
"IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
|
||||||
|
}
|
||||||
|
log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter)
|
||||||
|
|
||||||
|
// An empty locator signals that the syncer chain was modified and no longer contains one of
|
||||||
|
// the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack
|
||||||
|
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(locatorHashes) == 0 {
|
||||||
|
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||||
|
"to contain at least one element")
|
||||||
|
}
|
||||||
|
log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||||
|
chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||||
|
|
||||||
|
initialLocatorLen = len(locatorHashes)
|
||||||
|
// Reset syncer's header selected tip
|
||||||
|
syncerHeaderSelectedTipHash = locatorHashes[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Found highest known syncer chain block %s from peer %s",
|
||||||
|
highestKnownSyncerChainHash, flow.peer)
|
||||||
|
|
||||||
|
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||||
|
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool, err error) {
|
||||||
|
successString := "successfully"
|
||||||
|
if !isFinishedSuccessfully {
|
||||||
|
if err != nil {
|
||||||
|
successString = fmt.Sprintf("(interrupted: %s)", err)
|
||||||
|
} else {
|
||||||
|
successString = fmt.Sprintf("(interrupted)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Infof("IBD with peer %s finished %s", flow.peer, successString)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) getSyncerChainBlockLocator(
|
||||||
|
highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) {
|
||||||
|
|
||||||
|
requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash)
|
||||||
|
err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch message := message.(type) {
|
||||||
|
case *appmessage.MsgIBDChainBlockLocator:
|
||||||
|
if len(message.BlockLocatorHashes) > 64 {
|
||||||
|
return nil, protocolerrors.Errorf(true,
|
||||||
|
"Got block locator of size %d>64 while expecting locator to have size "+
|
||||||
|
"which is logarithmic in DAG size (which should never exceed 2^64)",
|
||||||
|
len(message.BlockLocatorHashes))
|
||||||
|
}
|
||||||
|
return message.BlockLocatorHashes, nil
|
||||||
|
default:
|
||||||
|
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdIBDChainBlockLocator, message.Command())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus,
|
||||||
|
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash *externalapi.DomainHash,
|
||||||
|
highBlockDAAScoreHint uint64) error {
|
||||||
|
|
||||||
|
log.Infof("Downloading headers from %s", flow.peer)
|
||||||
|
|
||||||
|
if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) {
|
||||||
|
// No need to get syncer selected tip headers, so sync relay past and return
|
||||||
|
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestKnownSyncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScoreHint, "block headers")
|
||||||
|
|
||||||
|
// Keep a short queue of BlockHeadersMessages so that there's
|
||||||
|
// never a moment when the node is not validating and inserting
|
||||||
|
// headers
|
||||||
|
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
||||||
|
errChan := make(chan error)
|
||||||
|
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
|
||||||
|
for {
|
||||||
|
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
||||||
|
if err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if doneIBD {
|
||||||
|
close(blockHeadersMessageChan)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(blockHeadersMessage.BlockHeaders) == 0 {
|
||||||
|
// The syncer should have sent a done message if the search completed, and not an empty list
|
||||||
|
errChan <- protocolerrors.Errorf(true, "Received an empty headers message from peer %s", flow.peer)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
blockHeadersMessageChan <- blockHeadersMessage
|
||||||
|
|
||||||
|
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
||||||
|
if err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||||
|
if !ok {
|
||||||
|
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||||
|
}
|
||||||
|
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||||
|
err = flow.processHeader(consensus, header)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1]
|
||||||
|
progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore)
|
||||||
|
case err := <-errChan:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error {
|
||||||
|
// Finished downloading syncer selected tip blocks,
|
||||||
|
// check if we already have the triggering relayBlockHash
|
||||||
|
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !relayBlockInfo.Exists {
|
||||||
|
// Send a special header request for the selected tip anticone. This is expected to
|
||||||
|
// be a small set, as it is bounded to the size of virtual's mergeset.
|
||||||
|
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if anticoneDone {
|
||||||
|
return protocolerrors.Errorf(true,
|
||||||
|
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
|
||||||
|
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||||
|
}
|
||||||
|
_, anticoneDone, err = flow.receiveHeaders()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !anticoneDone {
|
||||||
|
return protocolerrors.Errorf(true,
|
||||||
|
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
|
||||||
|
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||||
|
}
|
||||||
|
for _, header := range anticoneHeadersMessage.BlockHeaders {
|
||||||
|
err = flow.processHeader(consensus, header)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the relayBlockHash has still not been received, the peer is misbehaving
|
||||||
|
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !relayBlockInfo.Exists {
|
||||||
|
return protocolerrors.Errorf(true, "did not receive "+
|
||||||
|
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) sendRequestAnticone(
|
||||||
|
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error {
|
||||||
|
|
||||||
|
msgRequestAnticone := appmessage.NewMsgRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||||
|
return flow.outgoingRoute.Enqueue(msgRequestAnticone)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) sendRequestHeaders(
|
||||||
|
highestKnownSyncerChainHash, syncerHeaderSelectedTipHash *externalapi.DomainHash) error {
|
||||||
|
|
||||||
|
msgRequestHeaders := appmessage.NewMsgRequstHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||||
|
return flow.outgoingRoute.Enqueue(msgRequestHeaders)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
switch message := message.(type) {
|
||||||
|
case *appmessage.BlockHeadersMessage:
|
||||||
|
return message, false, nil
|
||||||
|
case *appmessage.MsgDoneHeaders:
|
||||||
|
return nil, true, nil
|
||||||
|
default:
|
||||||
|
return nil, false,
|
||||||
|
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s or %s, got: %s",
|
||||||
|
appmessage.CmdBlockHeaders,
|
||||||
|
appmessage.CmdDoneHeaders,
|
||||||
|
message.Command())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||||
|
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||||
|
block := &externalapi.DomainBlock{
|
||||||
|
Header: header,
|
||||||
|
Transactions: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
blockHash := consensushashing.BlockHash(block)
|
||||||
|
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if blockInfo.Exists {
|
||||||
|
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err = consensus.ValidateAndInsertBlock(block, false)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||||
|
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||||
|
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
||||||
|
} else {
|
||||||
|
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||||
|
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
|
||||||
|
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
|
||||||
|
|
||||||
|
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
|
||||||
|
|
||||||
|
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
|
||||||
|
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
|
||||||
|
"tip is smaller than the current selected tip")
|
||||||
|
}
|
||||||
|
|
||||||
|
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
|
||||||
|
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
|
||||||
|
return protocolerrors.Errorf(false, "difference between the timestamps of "+
|
||||||
|
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
||||||
|
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
|
||||||
|
|
||||||
|
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
||||||
|
defer onEnd()
|
||||||
|
|
||||||
|
receivedChunkCount := 0
|
||||||
|
receivedUTXOCount := 0
|
||||||
|
for {
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch message := message.(type) {
|
||||||
|
case *appmessage.MsgPruningPointUTXOSetChunk:
|
||||||
|
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
||||||
|
domainOutpointAndUTXOEntryPairs :=
|
||||||
|
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
||||||
|
|
||||||
|
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
receivedChunkCount++
|
||||||
|
if receivedChunkCount%ibdBatchSize == 0 {
|
||||||
|
log.Infof("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||||
|
receivedChunkCount, receivedUTXOCount)
|
||||||
|
|
||||||
|
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||||
|
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
||||||
|
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
||||||
|
return true, nil
|
||||||
|
|
||||||
|
case *appmessage.MsgUnexpectedPruningPoint:
|
||||||
|
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
|
||||||
|
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
||||||
|
return false, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
||||||
|
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
||||||
|
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(hashes) == 0 {
|
||||||
|
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
||||||
|
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
||||||
|
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
||||||
|
log.Debugf("No missing block body hashes found.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
|
||||||
|
highestProcessedDAAScore := lowBlockHeader.DAAScore()
|
||||||
|
|
||||||
|
// If the IBD is small, we want to update the virtual after each block in order to avoid complications and possible bugs.
|
||||||
|
updateVirtual, err := flow.Domain().Consensus().IsNearlySynced()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||||
|
var hashesToRequest []*externalapi.DomainHash
|
||||||
|
if offset+ibdBatchSize < len(hashes) {
|
||||||
|
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
||||||
|
} else {
|
||||||
|
hashesToRequest = hashes[offset:]
|
||||||
|
}
|
||||||
|
|
||||||
|
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, expectedHash := range hashesToRequest {
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
||||||
|
if !ok {
|
||||||
|
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
||||||
|
blockHash := consensushashing.BlockHash(block)
|
||||||
|
if !expectedHash.Equal(blockHash) {
|
||||||
|
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.banIfBlockIsHeaderOnly(block)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.Domain().Consensus().ValidateAndInsertBlock(block, updateVirtual)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||||
|
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||||
|
}
|
||||||
|
err = flow.OnNewBlock(block)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
highestProcessedDAAScore = block.Header.DAAScore()
|
||||||
|
}
|
||||||
|
|
||||||
|
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to resolve virtual only if it wasn't updated while syncing block bodies
|
||||||
|
if !updateVirtual {
|
||||||
|
err := flow.resolveVirtual(highestProcessedDAAScore)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return flow.OnNewBlockTemplate()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||||
|
if len(block.Transactions) == 0 {
|
||||||
|
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||||
|
consensushashing.BlockHash(block))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
|
||||||
|
err := flow.Domain().Consensus().ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) {
|
||||||
|
var percents int
|
||||||
|
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||||
|
percents = 100
|
||||||
|
} else {
|
||||||
|
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||||
|
}
|
||||||
|
if percents < 0 {
|
||||||
|
percents = 0
|
||||||
|
} else if percents > 100 {
|
||||||
|
percents = 100
|
||||||
|
}
|
||||||
|
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Resolved virtual")
|
||||||
|
return nil
|
||||||
|
}
|
45
app/protocol/flows/v5/blockrelay/ibd_progress_reporter.go
Normal file
45
app/protocol/flows/v5/blockrelay/ibd_progress_reporter.go
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
type ibdProgressReporter struct {
|
||||||
|
lowDAAScore uint64
|
||||||
|
highDAAScore uint64
|
||||||
|
objectName string
|
||||||
|
totalDAAScoreDifference uint64
|
||||||
|
lastReportedProgressPercent int
|
||||||
|
processed int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
|
||||||
|
if highDAAScore <= lowDAAScore {
|
||||||
|
// Avoid a zero or negative diff
|
||||||
|
highDAAScore = lowDAAScore + 1
|
||||||
|
}
|
||||||
|
return &ibdProgressReporter{
|
||||||
|
lowDAAScore: lowDAAScore,
|
||||||
|
highDAAScore: highDAAScore,
|
||||||
|
objectName: objectName,
|
||||||
|
totalDAAScoreDifference: highDAAScore - lowDAAScore,
|
||||||
|
lastReportedProgressPercent: 0,
|
||||||
|
processed: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
|
||||||
|
ipr.processed += processedDelta
|
||||||
|
|
||||||
|
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
|
||||||
|
if highestProcessedDAAScore > ipr.highDAAScore {
|
||||||
|
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
|
||||||
|
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
|
||||||
|
}
|
||||||
|
relativeDAAScore := uint64(0)
|
||||||
|
if highestProcessedDAAScore > ipr.lowDAAScore {
|
||||||
|
// Avoid a negative diff
|
||||||
|
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
|
||||||
|
}
|
||||||
|
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
|
||||||
|
if progressPercent > ipr.lastReportedProgressPercent {
|
||||||
|
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
|
||||||
|
ipr.lastReportedProgressPercent = progressPercent
|
||||||
|
}
|
||||||
|
}
|
@ -9,20 +9,23 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash) error {
|
func (flow *handleIBDFlow) ibdWithHeadersProof(
|
||||||
err := flow.Domain().InitStagingConsensus()
|
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||||
|
err := flow.Domain().InitStagingConsensusWithoutGenesis()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = flow.downloadHeadersAndPruningUTXOSet(highHash)
|
err = flow.downloadHeadersAndPruningUTXOSet(syncerHeaderSelectedTipHash, relayBlockHash, highBlockDAAScore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !flow.IsRecoverableError(err) {
|
if !flow.IsRecoverableError(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus. (%s)", flow.peer, err)
|
||||||
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
||||||
if deleteStagingConsensusErr != nil {
|
if deleteStagingConsensusErr != nil {
|
||||||
return deleteStagingConsensusErr
|
return deleteStagingConsensusErr
|
||||||
@ -31,6 +34,8 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
|
||||||
|
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
|
||||||
err = flow.Domain().CommitStagingConsensus()
|
err = flow.Domain().CommitStagingConsensus()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -44,11 +49,34 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *externalapi.DomainBlock,
|
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
|
||||||
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
|
relayBlock *externalapi.DomainBlock,
|
||||||
|
highestKnownSyncerChainHash *externalapi.DomainHash) (shouldDownload, shouldSync bool, err error) {
|
||||||
|
|
||||||
if !highestSharedBlockFound {
|
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
|
||||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock)
|
if highestKnownSyncerChainHash != nil {
|
||||||
|
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highestKnownSyncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
return false, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
highestSharedBlockFound = blockInfo.HasBody()
|
||||||
|
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||||
|
if err != nil {
|
||||||
|
return false, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
isPruningPointInSharedBlockChain, err = flow.Domain().Consensus().IsInSelectedParentChainOf(
|
||||||
|
pruningPoint, highestKnownSyncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
return false, false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Note: in the case where `highestSharedBlockFound == true && isPruningPointInSharedBlockChain == false`
|
||||||
|
// we might have here info which is relevant to finality conflict decisions. This should be taken into
|
||||||
|
// account when we improve this aspect.
|
||||||
|
if !highestSharedBlockFound || !isPruningPointInSharedBlockChain {
|
||||||
|
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, false, err
|
return false, false, err
|
||||||
}
|
}
|
||||||
@ -57,28 +85,33 @@ func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *ex
|
|||||||
return true, true, nil
|
return true, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if highestKnownSyncerChainHash == nil {
|
||||||
|
log.Infof("Stopping IBD since IBD from this node will cause a finality conflict")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return false, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
return false, true, nil
|
return false, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock *externalapi.DomainBlock) (bool, error) {
|
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
|
||||||
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
|
virtualSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(virtualSelectedParent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if highBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
if relayBlock.Header.BlueScore() < virtualSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return highBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
return relayBlock.Header.BlueWork().Cmp(virtualSelectedTipInfo.BlueWork) > 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
||||||
@ -87,7 +120,7 @@ func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.Doma
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
message, err := flow.incomingRoute.DequeueWithTimeout(10 * time.Minute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -113,7 +146,10 @@ func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.Doma
|
|||||||
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash) error {
|
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
|
||||||
|
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash,
|
||||||
|
highBlockDAAScore uint64) error {
|
||||||
|
|
||||||
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -130,19 +166,20 @@ func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalap
|
|||||||
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash)
|
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(),
|
||||||
|
syncerHeaderSelectedTipHash, proofPruningPoint, relayBlockHash, highBlockDAAScore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Headers downloaded from peer %s", flow.peer)
|
log.Infof("Headers downloaded from peer %s", flow.peer)
|
||||||
|
|
||||||
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
|
relayBlockInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(relayBlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !highHashInfo.Exists {
|
if !relayBlockInfo.Exists {
|
||||||
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
|
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +242,8 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
i := 0
|
||||||
|
for ; ; i++ {
|
||||||
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
|
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -219,9 +257,19 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We're using i+2 because we want to check if the next block will belong to the next batch, but we already downloaded
|
||||||
|
// the pruning point outside the loop so we use i+2 instead of i+1.
|
||||||
|
if (i+2)%ibdBatchSize == 0 {
|
||||||
|
log.Infof("Downloaded %d blocks from the pruning point anticone", i+1)
|
||||||
|
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextPruningPointAndItsAnticoneBlocks())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
|
log.Infof("Finished downloading pruning point and its anticone from %s. Total blocks downloaded: %d", flow.peer, i+1)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,8 +290,14 @@ func (flow *handleIBDFlow) processBlockWithTrustedData(
|
|||||||
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
|
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||||
|
if err != nil {
|
||||||
|
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||||
|
return protocolerrors.Wrapf(true, err, "failed validating block with trusted data")
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
|
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
|
||||||
@ -343,6 +397,7 @@ func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consens
|
|||||||
log.Info("Fetching the pruning point UTXO set")
|
log.Info("Fetching the pruning point UTXO set")
|
||||||
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
@ -2,6 +2,8 @@ package ping
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
@ -61,6 +63,9 @@ func (flow *sendPingsFlow) start() error {
|
|||||||
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(err, router.ErrTimeout) {
|
||||||
|
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
pongMessage := message.(*appmessage.MsgPong)
|
pongMessage := message.(*appmessage.MsgPong)
|
@ -1,14 +1,14 @@
|
|||||||
package v4
|
package v5
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/addressexchange"
|
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/ping"
|
"github.com/kaspanet/kaspad/app/protocol/flows/v5/blockrelay"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/rejects"
|
"github.com/kaspanet/kaspad/app/protocol/flows/v5/ping"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/transactionrelay"
|
"github.com/kaspanet/kaspad/app/protocol/flows/v5/rejects"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/blockrelay"
|
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
)
|
)
|
||||||
@ -39,13 +39,11 @@ func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStoppin
|
|||||||
outgoingRoute := router.OutgoingRoute()
|
outgoingRoute := router.OutgoingRoute()
|
||||||
|
|
||||||
return []*common.Flow{
|
return []*common.Flow{
|
||||||
// TODO: This code was moved to the upper level to prevent a race condition when connecting to v3 peers. This should be uncommented
|
m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
|
||||||
// and removed from the upper level once v3 is obsolete.
|
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||||
//m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
|
return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute)
|
||||||
// func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
},
|
||||||
// return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute)
|
),
|
||||||
// },
|
|
||||||
//),
|
|
||||||
|
|
||||||
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
|
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||||
@ -80,6 +78,7 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
|||||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
|
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
|
||||||
appmessage.CmdPruningPointProof,
|
appmessage.CmdPruningPointProof,
|
||||||
appmessage.CmdTrustedData,
|
appmessage.CmdTrustedData,
|
||||||
|
appmessage.CmdIBDChainBlockLocator,
|
||||||
},
|
},
|
||||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||||
return blockrelay.HandleIBD(m.Context(), incomingRoute,
|
return blockrelay.HandleIBD(m.Context(), incomingRoute,
|
||||||
@ -123,7 +122,7 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
|||||||
),
|
),
|
||||||
|
|
||||||
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
|
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
|
||||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone}, isStopping, errChan,
|
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone, appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks}, isStopping, errChan,
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||||
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||||
},
|
},
|
||||||
@ -136,6 +135,20 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
|||||||
},
|
},
|
||||||
),
|
),
|
||||||
|
|
||||||
|
m.RegisterFlow("HandleRequestIBDChainBlockLocator", router,
|
||||||
|
[]appmessage.MessageCommand{appmessage.CmdRequestIBDChainBlockLocator}, isStopping, errChan,
|
||||||
|
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||||
|
return blockrelay.HandleRequestIBDChainBlockLocator(m.Context(), incomingRoute, outgoingRoute)
|
||||||
|
},
|
||||||
|
),
|
||||||
|
|
||||||
|
m.RegisterFlow("HandleRequestAnticone", router,
|
||||||
|
[]appmessage.MessageCommand{appmessage.CmdRequestAnticone}, isStopping, errChan,
|
||||||
|
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||||
|
return blockrelay.HandleRequestAnticone(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||||
|
},
|
||||||
|
),
|
||||||
|
|
||||||
m.RegisterFlow("HandlePruningPointProofRequests", router,
|
m.RegisterFlow("HandlePruningPointProofRequests", router,
|
||||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
|
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
@ -1,11 +1,11 @@
|
|||||||
package testing
|
package testing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/addressexchange"
|
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus"
|
"github.com/kaspanet/kaspad/domain/consensus"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
@ -22,6 +22,7 @@ type TransactionsRelayContext interface {
|
|||||||
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
|
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
|
||||||
OnTransactionAddedToMempool()
|
OnTransactionAddedToMempool()
|
||||||
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
|
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
|
||||||
|
IsNearlySynced() (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type handleRelayedTransactionsFlow struct {
|
type handleRelayedTransactionsFlow struct {
|
||||||
@ -49,6 +50,15 @@ func (flow *handleRelayedTransactionsFlow) start() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
isNearlySynced, err := flow.IsNearlySynced()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Transaction relay is disabled if the node is out of sync and thus not mining
|
||||||
|
if !isNearlySynced {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
requestedIDs, err := flow.requestInvTransactions(inv)
|
requestedIDs, err := flow.requestInvTransactions(inv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -92,7 +102,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
|
|||||||
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
|
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
|
||||||
// Ask the transaction memory pool if the transaction is known
|
// Ask the transaction memory pool if the transaction is known
|
||||||
// to it in any form (main pool or orphan).
|
// to it in any form (main pool or orphan).
|
||||||
if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
|
if _, _, ok := flow.Domain().MiningManager().GetTransaction(txID, true, true); ok {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -3,10 +3,10 @@ package transactionrelay_test
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/transactionrelay"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus"
|
"github.com/kaspanet/kaspad/domain/consensus"
|
||||||
@ -47,6 +47,10 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
|
|||||||
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
|
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *mocTransactionsRelayContext) IsNearlySynced() (bool, error) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
|
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
|
||||||
// have the requested transactions in the mempool.
|
// have the requested transactions in the mempool.
|
||||||
func TestHandleRelayedTransactionsNotFound(t *testing.T) {
|
func TestHandleRelayedTransactionsNotFound(t *testing.T) {
|
@ -30,7 +30,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, transactionID := range msgRequestTransactions.IDs {
|
for _, transactionID := range msgRequestTransactions.IDs {
|
||||||
tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
|
tx, _, ok := flow.Domain().MiningManager().GetTransaction(transactionID, true, false)
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
|
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
|
||||||
@ -40,7 +40,6 @@ func (flow *handleRequestedTransactionsFlow) start() error {
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
|
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
@ -2,10 +2,10 @@ package transactionrelay_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/transactionrelay"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus"
|
"github.com/kaspanet/kaspad/domain/consensus"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
@ -2,10 +2,11 @@ package protocol
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
@ -90,14 +91,9 @@ func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-c
|
|||||||
return <-errChan
|
return <-errChan
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetOnVirtualChange sets the onVirtualChangeHandler handler
|
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplate handler
|
||||||
func (m *Manager) SetOnVirtualChange(onVirtualChangeHandler flowcontext.OnVirtualChangeHandler) {
|
func (m *Manager) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler flowcontext.OnNewBlockTemplateHandler) {
|
||||||
m.context.SetOnVirtualChangeHandler(onVirtualChangeHandler)
|
m.context.SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler)
|
||||||
}
|
|
||||||
|
|
||||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
|
||||||
func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowcontext.OnBlockAddedToDAGHandler) {
|
|
||||||
m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
|
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
|
||||||
@ -110,12 +106,6 @@ func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMemp
|
|||||||
m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler)
|
m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShouldMine returns whether it's ok to use block template from this node
|
|
||||||
// for mining purposes.
|
|
||||||
func (m *Manager) ShouldMine() (bool, error) {
|
|
||||||
return m.context.ShouldMine()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsIBDRunning returns true if IBD is currently marked as running
|
// IsIBDRunning returns true if IBD is currently marked as running
|
||||||
func (m *Manager) IsIBDRunning() bool {
|
func (m *Manager) IsIBDRunning() bool {
|
||||||
return m.context.IsIBDRunning()
|
return m.context.IsIBDRunning()
|
||||||
|
@ -3,9 +3,7 @@ package protocol
|
|||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
|
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
|
||||||
v3 "github.com/kaspanet/kaspad/app/protocol/flows/v3"
|
"github.com/kaspanet/kaspad/app/protocol/flows/v5"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/addressexchange"
|
|
||||||
v4 "github.com/kaspanet/kaspad/app/protocol/flows/v4"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
@ -25,7 +23,7 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
|||||||
// errChan is used by the flow goroutines to return to runFlows when an error occurs.
|
// errChan is used by the flow goroutines to return to runFlows when an error occurs.
|
||||||
// They are both initialized here and passed to register flows.
|
// They are both initialized here and passed to register flows.
|
||||||
isStopping := uint32(0)
|
isStopping := uint32(0)
|
||||||
errChan := make(chan error)
|
errChan := make(chan error, 1)
|
||||||
|
|
||||||
receiveVersionRoute, sendVersionRoute, receiveReadyRoute := registerHandshakeRoutes(router)
|
receiveVersionRoute, sendVersionRoute, receiveReadyRoute := registerHandshakeRoutes(router)
|
||||||
|
|
||||||
@ -55,14 +53,6 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// TODO: This code was moved here to prevent a race condition when connecting to v3 peers. This should be moved to v4.registerAddressFlows
|
|
||||||
// once v3 is obsolete.
|
|
||||||
sendAddressesFlow := m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, &isStopping, errChan,
|
|
||||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
|
||||||
return addressexchange.SendAddresses(m.Context(), incomingRoute, router.OutgoingRoute())
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
peer, err := handshake.HandleHandshake(m.context, netConnection, receiveVersionRoute,
|
peer, err := handshake.HandleHandshake(m.context, netConnection, receiveVersionRoute,
|
||||||
sendVersionRoute, router.OutgoingRoute())
|
sendVersionRoute, router.OutgoingRoute())
|
||||||
|
|
||||||
@ -86,22 +76,17 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
|||||||
var flows []*common.Flow
|
var flows []*common.Flow
|
||||||
log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion())
|
log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion())
|
||||||
switch peer.ProtocolVersion() {
|
switch peer.ProtocolVersion() {
|
||||||
case 3:
|
case 5:
|
||||||
flows = v3.Register(m, router, errChan, &isStopping)
|
flows = v5.Register(m, router, errChan, &isStopping)
|
||||||
case 4:
|
|
||||||
flows = v4.Register(m, router, errChan, &isStopping)
|
|
||||||
default:
|
default:
|
||||||
panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion()))
|
panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion()))
|
||||||
}
|
}
|
||||||
flows = append(flows, sendAddressesFlow)
|
|
||||||
|
|
||||||
if peer.ProtocolVersion() > 3 {
|
|
||||||
err = ready.HandleReady(receiveReadyRoute, router.OutgoingRoute(), peer)
|
err = ready.HandleReady(receiveReadyRoute, router.OutgoingRoute(), peer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
removeHandshakeRoutes(router)
|
removeHandshakeRoutes(router)
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Manager is an RPC manager
|
// Manager is an RPC manager
|
||||||
@ -28,6 +29,7 @@ func NewManager(
|
|||||||
connectionManager *connmanager.ConnectionManager,
|
connectionManager *connmanager.ConnectionManager,
|
||||||
addressManager *addressmanager.AddressManager,
|
addressManager *addressmanager.AddressManager,
|
||||||
utxoIndex *utxoindex.UTXOIndex,
|
utxoIndex *utxoindex.UTXOIndex,
|
||||||
|
consensusEventsChan chan externalapi.ConsensusEvent,
|
||||||
shutDownChan chan<- struct{}) *Manager {
|
shutDownChan chan<- struct{}) *Manager {
|
||||||
|
|
||||||
manager := Manager{
|
manager := Manager{
|
||||||
@ -44,50 +46,90 @@ func NewManager(
|
|||||||
}
|
}
|
||||||
netAdapter.SetRPCRouterInitializer(manager.routerInitializer)
|
netAdapter.SetRPCRouterInitializer(manager.routerInitializer)
|
||||||
|
|
||||||
|
manager.initConsensusEventsHandler(consensusEventsChan)
|
||||||
|
|
||||||
return &manager
|
return &manager
|
||||||
}
|
}
|
||||||
|
|
||||||
// NotifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
|
func (m *Manager) initConsensusEventsHandler(consensusEventsChan chan externalapi.ConsensusEvent) {
|
||||||
func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error {
|
spawn("consensusEventsHandler", func() {
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
|
for {
|
||||||
|
consensusEvent, ok := <-consensusEventsChan
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch event := consensusEvent.(type) {
|
||||||
|
case *externalapi.VirtualChangeSet:
|
||||||
|
err := m.notifyVirtualChange(event)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
case *externalapi.BlockAdded:
|
||||||
|
err := m.notifyBlockAddedToDAG(event.Block)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(errors.Errorf("Got event of unsupported type %T", consensusEvent))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// notifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
|
||||||
|
func (m *Manager) notifyBlockAddedToDAG(block *externalapi.DomainBlock) error {
|
||||||
|
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.notifyBlockAddedToDAG")
|
||||||
defer onEnd()
|
defer onEnd()
|
||||||
|
|
||||||
err := m.NotifyVirtualChange(virtualChangeSet)
|
// Before converting the block and populating it, we check if any listeners are interested.
|
||||||
if err != nil {
|
// This is done since most nodes do not use this event.
|
||||||
return err
|
if !m.context.NotificationManager.HasBlockAddedListeners() {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
|
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
|
||||||
err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
|
err := m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
|
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
|
||||||
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
err = m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NotifyVirtualChange notifies the manager that the virtual block has been changed.
|
// notifyVirtualChange notifies the manager that the virtual block has been changed.
|
||||||
func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
func (m *Manager) notifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
|
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualChange")
|
||||||
defer onEnd()
|
defer onEnd()
|
||||||
|
|
||||||
if m.context.Config.UTXOIndex {
|
if m.context.Config.UTXOIndex && virtualChangeSet.VirtualUTXODiff != nil {
|
||||||
err := m.notifyUTXOsChanged(virtualChangeSet)
|
err := m.notifyUTXOsChanged(virtualChangeSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err := m.notifyVirtualSelectedParentBlueScoreChanged()
|
err := m.notifyVirtualSelectedParentBlueScoreChanged(virtualChangeSet.VirtualSelectedParentBlueScore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = m.notifyVirtualDaaScoreChanged()
|
err = m.notifyVirtualDaaScoreChanged(virtualChangeSet.VirtualDAAScore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if virtualChangeSet.VirtualSelectedParentChainChanges == nil ||
|
||||||
|
(len(virtualChangeSet.VirtualSelectedParentChainChanges.Added) == 0 &&
|
||||||
|
len(virtualChangeSet.VirtualSelectedParentChainChanges.Removed) == 0) {
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
err = m.notifyVirtualSelectedParentChainChanged(virtualChangeSet)
|
err = m.notifyVirtualSelectedParentChainChanged(virtualChangeSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -96,6 +138,13 @@ func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChang
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NotifyNewBlockTemplate notifies the manager that a new
|
||||||
|
// block template is available for miners
|
||||||
|
func (m *Manager) NotifyNewBlockTemplate() error {
|
||||||
|
notification := appmessage.NewNewBlockTemplateNotificationMessage()
|
||||||
|
return m.context.NotificationManager.NotifyNewBlockTemplate(notification)
|
||||||
|
}
|
||||||
|
|
||||||
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
|
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
|
||||||
// resets due to pruning point change via IBD.
|
// resets due to pruning point change via IBD.
|
||||||
func (m *Manager) NotifyPruningPointUTXOSetOverride() error {
|
func (m *Manager) NotifyPruningPointUTXOSetOverride() error {
|
||||||
@ -138,6 +187,7 @@ func (m *Manager) notifyUTXOsChanged(virtualChangeSet *externalapi.VirtualChange
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return m.context.NotificationManager.NotifyUTXOsChanged(utxoIndexChanges)
|
return m.context.NotificationManager.NotifyUTXOsChanged(utxoIndexChanges)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,33 +203,18 @@ func (m *Manager) notifyPruningPointUTXOSetOverride() error {
|
|||||||
return m.context.NotificationManager.NotifyPruningPointUTXOSetOverride()
|
return m.context.NotificationManager.NotifyPruningPointUTXOSetOverride()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged() error {
|
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged(virtualSelectedParentBlueScore uint64) error {
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentBlueScoreChanged")
|
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentBlueScoreChanged")
|
||||||
defer onEnd()
|
defer onEnd()
|
||||||
|
|
||||||
virtualSelectedParent, err := m.context.Domain.Consensus().GetVirtualSelectedParent()
|
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(virtualSelectedParentBlueScore)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
blockInfo, err := m.context.Domain.Consensus().GetBlockInfo(virtualSelectedParent)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(blockInfo.BlueScore)
|
|
||||||
return m.context.NotificationManager.NotifyVirtualSelectedParentBlueScoreChanged(notification)
|
return m.context.NotificationManager.NotifyVirtualSelectedParentBlueScoreChanged(notification)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) notifyVirtualDaaScoreChanged() error {
|
func (m *Manager) notifyVirtualDaaScoreChanged(virtualDAAScore uint64) error {
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualDaaScoreChanged")
|
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualDaaScoreChanged")
|
||||||
defer onEnd()
|
defer onEnd()
|
||||||
|
|
||||||
virtualDAAScore, err := m.context.Domain.Consensus().GetVirtualDAAScore()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
notification := appmessage.NewVirtualDaaScoreChangedNotificationMessage(virtualDAAScore)
|
notification := appmessage.NewVirtualDaaScoreChangedNotificationMessage(virtualDAAScore)
|
||||||
return m.context.NotificationManager.NotifyVirtualDaaScoreChanged(notification)
|
return m.context.NotificationManager.NotifyVirtualDaaScoreChanged(notification)
|
||||||
}
|
}
|
||||||
@ -188,10 +223,16 @@ func (m *Manager) notifyVirtualSelectedParentChainChanged(virtualChangeSet *exte
|
|||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged")
|
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged")
|
||||||
defer onEnd()
|
defer onEnd()
|
||||||
|
|
||||||
|
hasListeners, includeAcceptedTransactionIDs := m.context.NotificationManager.HasListenersThatPropagateVirtualSelectedParentChainChanged()
|
||||||
|
|
||||||
|
if hasListeners {
|
||||||
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||||
virtualChangeSet.VirtualSelectedParentChainChanges)
|
virtualChangeSet.VirtualSelectedParentChainChanges, includeAcceptedTransactionIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification)
|
return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,7 @@ var handlers = map[appmessage.MessageCommand]handler{
|
|||||||
appmessage.CmdNotifyUTXOsChangedRequestMessage: rpchandlers.HandleNotifyUTXOsChanged,
|
appmessage.CmdNotifyUTXOsChangedRequestMessage: rpchandlers.HandleNotifyUTXOsChanged,
|
||||||
appmessage.CmdStopNotifyingUTXOsChangedRequestMessage: rpchandlers.HandleStopNotifyingUTXOsChanged,
|
appmessage.CmdStopNotifyingUTXOsChangedRequestMessage: rpchandlers.HandleStopNotifyingUTXOsChanged,
|
||||||
appmessage.CmdGetUTXOsByAddressesRequestMessage: rpchandlers.HandleGetUTXOsByAddresses,
|
appmessage.CmdGetUTXOsByAddressesRequestMessage: rpchandlers.HandleGetUTXOsByAddresses,
|
||||||
|
appmessage.CmdGetBalancesByAddressesRequestMessage: rpchandlers.HandleGetBalancesByAddresses,
|
||||||
appmessage.CmdGetVirtualSelectedParentBlueScoreRequestMessage: rpchandlers.HandleGetVirtualSelectedParentBlueScore,
|
appmessage.CmdGetVirtualSelectedParentBlueScoreRequestMessage: rpchandlers.HandleGetVirtualSelectedParentBlueScore,
|
||||||
appmessage.CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualSelectedParentBlueScoreChanged,
|
appmessage.CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualSelectedParentBlueScoreChanged,
|
||||||
appmessage.CmdBanRequestMessage: rpchandlers.HandleBan,
|
appmessage.CmdBanRequestMessage: rpchandlers.HandleBan,
|
||||||
@ -47,6 +48,9 @@ var handlers = map[appmessage.MessageCommand]handler{
|
|||||||
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
|
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
|
||||||
appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond,
|
appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond,
|
||||||
appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged,
|
appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged,
|
||||||
|
appmessage.CmdNotifyNewBlockTemplateRequestMessage: rpchandlers.HandleNotifyNewBlockTemplate,
|
||||||
|
appmessage.CmdGetCoinSupplyRequestMessage: rpchandlers.HandleGetCoinSupply,
|
||||||
|
appmessage.CmdGetMempoolEntriesByAddressesRequestMessage: rpchandlers.HandleGetMempoolEntriesByAddresses,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {
|
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {
|
||||||
|
@ -3,12 +3,14 @@ package rpccontext
|
|||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage converts
|
// ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage converts
|
||||||
// VirtualSelectedParentChainChanges to VirtualSelectedParentChainChangedNotificationMessage
|
// VirtualSelectedParentChainChanges to VirtualSelectedParentChainChangedNotificationMessage
|
||||||
func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||||
selectedParentChainChanges *externalapi.SelectedChainPath) (*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
|
selectedParentChainChanges *externalapi.SelectedChainPath, includeAcceptedTransactionIDs bool) (
|
||||||
|
*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
|
||||||
|
|
||||||
removedChainBlockHashes := make([]string, len(selectedParentChainChanges.Removed))
|
removedChainBlockHashes := make([]string, len(selectedParentChainChanges.Removed))
|
||||||
for i, removed := range selectedParentChainChanges.Removed {
|
for i, removed := range selectedParentChainChanges.Removed {
|
||||||
@ -20,5 +22,58 @@ func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotifi
|
|||||||
addedChainBlocks[i] = added.String()
|
addedChainBlocks[i] = added.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes, addedChainBlocks), nil
|
var acceptedTransactionIDs []*appmessage.AcceptedTransactionIDs
|
||||||
|
if includeAcceptedTransactionIDs {
|
||||||
|
var err error
|
||||||
|
acceptedTransactionIDs, err = ctx.getAndConvertAcceptedTransactionIDs(selectedParentChainChanges)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(
|
||||||
|
removedChainBlockHashes, addedChainBlocks, acceptedTransactionIDs), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *Context) getAndConvertAcceptedTransactionIDs(selectedParentChainChanges *externalapi.SelectedChainPath) (
|
||||||
|
[]*appmessage.AcceptedTransactionIDs, error) {
|
||||||
|
|
||||||
|
acceptedTransactionIDs := make([]*appmessage.AcceptedTransactionIDs, len(selectedParentChainChanges.Added))
|
||||||
|
|
||||||
|
const chunk = 1000
|
||||||
|
position := 0
|
||||||
|
|
||||||
|
for position < len(selectedParentChainChanges.Added) {
|
||||||
|
var chainBlocksChunk []*externalapi.DomainHash
|
||||||
|
if position+chunk > len(selectedParentChainChanges.Added) {
|
||||||
|
chainBlocksChunk = selectedParentChainChanges.Added[position:]
|
||||||
|
} else {
|
||||||
|
chainBlocksChunk = selectedParentChainChanges.Added[position : position+chunk]
|
||||||
|
}
|
||||||
|
// We use chunks in order to avoid blocking consensus for too long
|
||||||
|
chainBlocksAcceptanceData, err := ctx.Domain.Consensus().GetBlocksAcceptanceData(chainBlocksChunk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, addedChainBlock := range chainBlocksChunk {
|
||||||
|
chainBlockAcceptanceData := chainBlocksAcceptanceData[i]
|
||||||
|
acceptedTransactionIDs[position+i] = &appmessage.AcceptedTransactionIDs{
|
||||||
|
AcceptingBlockHash: addedChainBlock.String(),
|
||||||
|
AcceptedTransactionIDs: nil,
|
||||||
|
}
|
||||||
|
for _, blockAcceptanceData := range chainBlockAcceptanceData {
|
||||||
|
for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData {
|
||||||
|
if transactionAcceptanceData.IsAccepted {
|
||||||
|
acceptedTransactionIDs[position+i].AcceptedTransactionIDs =
|
||||||
|
append(acceptedTransactionIDs[position+i].AcceptedTransactionIDs,
|
||||||
|
consensushashing.TransactionID(transactionAcceptanceData.Transaction).String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
position += chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
return acceptedTransactionIDs, nil
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ func NewContext(cfg *config.Config,
|
|||||||
UTXOIndex: utxoIndex,
|
UTXOIndex: utxoIndex,
|
||||||
ShutDownChan: shutDownChan,
|
ShutDownChan: shutDownChan,
|
||||||
}
|
}
|
||||||
context.NotificationManager = NewNotificationManager()
|
context.NotificationManager = NewNotificationManager(cfg.ActiveNetParams)
|
||||||
|
|
||||||
return context
|
return context
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,11 @@ package rpccontext
|
|||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||||
|
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
@ -13,6 +18,7 @@ import (
|
|||||||
type NotificationManager struct {
|
type NotificationManager struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
listeners map[*routerpkg.Router]*NotificationListener
|
listeners map[*routerpkg.Router]*NotificationListener
|
||||||
|
params *dagconfig.Params
|
||||||
}
|
}
|
||||||
|
|
||||||
// UTXOsChangedNotificationAddress represents a kaspad address.
|
// UTXOsChangedNotificationAddress represents a kaspad address.
|
||||||
@ -24,6 +30,8 @@ type UTXOsChangedNotificationAddress struct {
|
|||||||
|
|
||||||
// NotificationListener represents a registered RPC notification listener
|
// NotificationListener represents a registered RPC notification listener
|
||||||
type NotificationListener struct {
|
type NotificationListener struct {
|
||||||
|
params *dagconfig.Params
|
||||||
|
|
||||||
propagateBlockAddedNotifications bool
|
propagateBlockAddedNotifications bool
|
||||||
propagateVirtualSelectedParentChainChangedNotifications bool
|
propagateVirtualSelectedParentChainChangedNotifications bool
|
||||||
propagateFinalityConflictNotifications bool
|
propagateFinalityConflictNotifications bool
|
||||||
@ -32,13 +40,16 @@ type NotificationListener struct {
|
|||||||
propagateVirtualSelectedParentBlueScoreChangedNotifications bool
|
propagateVirtualSelectedParentBlueScoreChangedNotifications bool
|
||||||
propagateVirtualDaaScoreChangedNotifications bool
|
propagateVirtualDaaScoreChangedNotifications bool
|
||||||
propagatePruningPointUTXOSetOverrideNotifications bool
|
propagatePruningPointUTXOSetOverrideNotifications bool
|
||||||
|
propagateNewBlockTemplateNotifications bool
|
||||||
|
|
||||||
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
||||||
|
includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNotificationManager creates a new NotificationManager
|
// NewNotificationManager creates a new NotificationManager
|
||||||
func NewNotificationManager() *NotificationManager {
|
func NewNotificationManager(params *dagconfig.Params) *NotificationManager {
|
||||||
return &NotificationManager{
|
return &NotificationManager{
|
||||||
|
params: params,
|
||||||
listeners: make(map[*routerpkg.Router]*NotificationListener),
|
listeners: make(map[*routerpkg.Router]*NotificationListener),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -48,7 +59,7 @@ func (nm *NotificationManager) AddListener(router *routerpkg.Router) {
|
|||||||
nm.Lock()
|
nm.Lock()
|
||||||
defer nm.Unlock()
|
defer nm.Unlock()
|
||||||
|
|
||||||
listener := newNotificationListener()
|
listener := newNotificationListener(nm.params)
|
||||||
nm.listeners[router] = listener
|
nm.listeners[router] = listener
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,6 +83,19 @@ func (nm *NotificationManager) Listener(router *routerpkg.Router) (*Notification
|
|||||||
return listener, nil
|
return listener, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasBlockAddedListeners indicates if the notification manager has any listeners for `BlockAdded` events
|
||||||
|
func (nm *NotificationManager) HasBlockAddedListeners() bool {
|
||||||
|
nm.RLock()
|
||||||
|
defer nm.RUnlock()
|
||||||
|
|
||||||
|
for _, listener := range nm.listeners {
|
||||||
|
if listener.propagateBlockAddedNotifications {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// NotifyBlockAdded notifies the notification manager that a block has been added to the DAG
|
// NotifyBlockAdded notifies the notification manager that a block has been added to the DAG
|
||||||
func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAddedNotificationMessage) error {
|
func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAddedNotificationMessage) error {
|
||||||
nm.RLock()
|
nm.RLock()
|
||||||
@ -79,10 +103,8 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
|
|||||||
|
|
||||||
for router, listener := range nm.listeners {
|
for router, listener := range nm.listeners {
|
||||||
if listener.propagateBlockAddedNotifications {
|
if listener.propagateBlockAddedNotifications {
|
||||||
err := router.OutgoingRoute().Enqueue(notification)
|
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
if err != nil {
|
||||||
log.Warnf("Couldn't send notification: %s", err)
|
|
||||||
} else if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -91,13 +113,27 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NotifyVirtualSelectedParentChainChanged notifies the notification manager that the DAG's selected parent chain has changed
|
// NotifyVirtualSelectedParentChainChanged notifies the notification manager that the DAG's selected parent chain has changed
|
||||||
func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
|
func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(
|
||||||
|
notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
|
||||||
|
|
||||||
nm.RLock()
|
nm.RLock()
|
||||||
defer nm.RUnlock()
|
defer nm.RUnlock()
|
||||||
|
|
||||||
|
notificationWithoutAcceptedTransactionIDs := &appmessage.VirtualSelectedParentChainChangedNotificationMessage{
|
||||||
|
RemovedChainBlockHashes: notification.RemovedChainBlockHashes,
|
||||||
|
AddedChainBlockHashes: notification.AddedChainBlockHashes,
|
||||||
|
}
|
||||||
|
|
||||||
for router, listener := range nm.listeners {
|
for router, listener := range nm.listeners {
|
||||||
if listener.propagateVirtualSelectedParentChainChangedNotifications {
|
if listener.propagateVirtualSelectedParentChainChangedNotifications {
|
||||||
err := router.OutgoingRoute().Enqueue(notification)
|
var err error
|
||||||
|
|
||||||
|
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
|
||||||
|
err = router.OutgoingRoute().MaybeEnqueue(notification)
|
||||||
|
} else {
|
||||||
|
err = router.OutgoingRoute().MaybeEnqueue(notificationWithoutAcceptedTransactionIDs)
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -106,6 +142,31 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(notificat
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasListenersThatPropagateVirtualSelectedParentChainChanged returns whether there's any listener that is
|
||||||
|
// subscribed to VirtualSelectedParentChainChanged notifications as well as checks if any such listener requested
|
||||||
|
// to include AcceptedTransactionIDs.
|
||||||
|
func (nm *NotificationManager) HasListenersThatPropagateVirtualSelectedParentChainChanged() (hasListeners, hasListenersThatRequireAcceptedTransactionIDs bool) {
|
||||||
|
|
||||||
|
nm.RLock()
|
||||||
|
defer nm.RUnlock()
|
||||||
|
|
||||||
|
hasListeners = false
|
||||||
|
hasListenersThatRequireAcceptedTransactionIDs = false
|
||||||
|
|
||||||
|
for _, listener := range nm.listeners {
|
||||||
|
if listener.propagateVirtualSelectedParentChainChangedNotifications {
|
||||||
|
hasListeners = true
|
||||||
|
// Generating acceptedTransactionIDs is a heavy operation, so we check if it's needed by any listener.
|
||||||
|
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
|
||||||
|
hasListenersThatRequireAcceptedTransactionIDs = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return hasListeners, hasListenersThatRequireAcceptedTransactionIDs
|
||||||
|
}
|
||||||
|
|
||||||
// NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG
|
// NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG
|
||||||
func (nm *NotificationManager) NotifyFinalityConflict(notification *appmessage.FinalityConflictNotificationMessage) error {
|
func (nm *NotificationManager) NotifyFinalityConflict(notification *appmessage.FinalityConflictNotificationMessage) error {
|
||||||
nm.RLock()
|
nm.RLock()
|
||||||
@ -146,7 +207,10 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
|
|||||||
for router, listener := range nm.listeners {
|
for router, listener := range nm.listeners {
|
||||||
if listener.propagateUTXOsChangedNotifications {
|
if listener.propagateUTXOsChangedNotifications {
|
||||||
// Filter utxoChanges and create a notification
|
// Filter utxoChanges and create a notification
|
||||||
notification := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges)
|
notification, err := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Don't send the notification if it's empty
|
// Don't send the notification if it's empty
|
||||||
if len(notification.Added) == 0 && len(notification.Removed) == 0 {
|
if len(notification.Added) == 0 && len(notification.Removed) == 0 {
|
||||||
@ -154,7 +218,7 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Enqueue the notification
|
// Enqueue the notification
|
||||||
err := router.OutgoingRoute().Enqueue(notification)
|
err = router.OutgoingRoute().MaybeEnqueue(notification)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -173,7 +237,7 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentBlueScoreChanged(
|
|||||||
|
|
||||||
for router, listener := range nm.listeners {
|
for router, listener := range nm.listeners {
|
||||||
if listener.propagateVirtualSelectedParentBlueScoreChangedNotifications {
|
if listener.propagateVirtualSelectedParentBlueScoreChangedNotifications {
|
||||||
err := router.OutgoingRoute().Enqueue(notification)
|
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -192,6 +256,25 @@ func (nm *NotificationManager) NotifyVirtualDaaScoreChanged(
|
|||||||
|
|
||||||
for router, listener := range nm.listeners {
|
for router, listener := range nm.listeners {
|
||||||
if listener.propagateVirtualDaaScoreChangedNotifications {
|
if listener.propagateVirtualDaaScoreChangedNotifications {
|
||||||
|
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyNewBlockTemplate notifies the notification manager that a new
|
||||||
|
// block template is available for miners
|
||||||
|
func (nm *NotificationManager) NotifyNewBlockTemplate(
|
||||||
|
notification *appmessage.NewBlockTemplateNotificationMessage) error {
|
||||||
|
|
||||||
|
nm.RLock()
|
||||||
|
defer nm.RUnlock()
|
||||||
|
|
||||||
|
for router, listener := range nm.listeners {
|
||||||
|
if listener.propagateNewBlockTemplateNotifications {
|
||||||
err := router.OutgoingRoute().Enqueue(notification)
|
err := router.OutgoingRoute().Enqueue(notification)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -218,18 +301,27 @@ func (nm *NotificationManager) NotifyPruningPointUTXOSetOverride() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNotificationListener() *NotificationListener {
|
func newNotificationListener(params *dagconfig.Params) *NotificationListener {
|
||||||
return &NotificationListener{
|
return &NotificationListener{
|
||||||
|
params: params,
|
||||||
|
|
||||||
propagateBlockAddedNotifications: false,
|
propagateBlockAddedNotifications: false,
|
||||||
propagateVirtualSelectedParentChainChangedNotifications: false,
|
propagateVirtualSelectedParentChainChangedNotifications: false,
|
||||||
propagateFinalityConflictNotifications: false,
|
propagateFinalityConflictNotifications: false,
|
||||||
propagateFinalityConflictResolvedNotifications: false,
|
propagateFinalityConflictResolvedNotifications: false,
|
||||||
propagateUTXOsChangedNotifications: false,
|
propagateUTXOsChangedNotifications: false,
|
||||||
propagateVirtualSelectedParentBlueScoreChangedNotifications: false,
|
propagateVirtualSelectedParentBlueScoreChangedNotifications: false,
|
||||||
|
propagateNewBlockTemplateNotifications: false,
|
||||||
propagatePruningPointUTXOSetOverrideNotifications: false,
|
propagatePruningPointUTXOSetOverrideNotifications: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications returns true if this listener
|
||||||
|
// includes accepted transaction IDs in it's virtual-selected-parent-chain-changed notifications
|
||||||
|
func (nl *NotificationListener) IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications() bool {
|
||||||
|
return nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications
|
||||||
|
}
|
||||||
|
|
||||||
// PropagateBlockAddedNotifications instructs the listener to send block added notifications
|
// PropagateBlockAddedNotifications instructs the listener to send block added notifications
|
||||||
// to the remote listener
|
// to the remote listener
|
||||||
func (nl *NotificationListener) PropagateBlockAddedNotifications() {
|
func (nl *NotificationListener) PropagateBlockAddedNotifications() {
|
||||||
@ -238,8 +330,9 @@ func (nl *NotificationListener) PropagateBlockAddedNotifications() {
|
|||||||
|
|
||||||
// PropagateVirtualSelectedParentChainChangedNotifications instructs the listener to send chain changed notifications
|
// PropagateVirtualSelectedParentChainChangedNotifications instructs the listener to send chain changed notifications
|
||||||
// to the remote listener
|
// to the remote listener
|
||||||
func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications() {
|
func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications(includeAcceptedTransactionIDs bool) {
|
||||||
nl.propagateVirtualSelectedParentChainChangedNotifications = true
|
nl.propagateVirtualSelectedParentChainChangedNotifications = true
|
||||||
|
nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications = includeAcceptedTransactionIDs
|
||||||
}
|
}
|
||||||
|
|
||||||
// PropagateFinalityConflictNotifications instructs the listener to send finality conflict notifications
|
// PropagateFinalityConflictNotifications instructs the listener to send finality conflict notifications
|
||||||
@ -258,7 +351,11 @@ func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications()
|
|||||||
// to the remote listener for the given addresses. Subsequent calls instruct the listener to
|
// to the remote listener for the given addresses. Subsequent calls instruct the listener to
|
||||||
// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses
|
// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses
|
||||||
// are ignored.
|
// are ignored.
|
||||||
func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
func (nm *NotificationManager) PropagateUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
|
||||||
|
// Apply a write-lock since the internal listener address map is modified
|
||||||
|
nm.Lock()
|
||||||
|
defer nm.Unlock()
|
||||||
|
|
||||||
if !nl.propagateUTXOsChangedNotifications {
|
if !nl.propagateUTXOsChangedNotifications {
|
||||||
nl.propagateUTXOsChangedNotifications = true
|
nl.propagateUTXOsChangedNotifications = true
|
||||||
nl.propagateUTXOsChangedNotificationAddresses =
|
nl.propagateUTXOsChangedNotificationAddresses =
|
||||||
@ -273,7 +370,11 @@ func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*
|
|||||||
// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs
|
// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs
|
||||||
// changed notifications to the remote listener for the given addresses. Addresses for which
|
// changed notifications to the remote listener for the given addresses. Addresses for which
|
||||||
// notifications are not currently sent are ignored.
|
// notifications are not currently sent are ignored.
|
||||||
func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
func (nm *NotificationManager) StopPropagatingUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
|
||||||
|
// Apply a write-lock since the internal listener address map is modified
|
||||||
|
nm.Lock()
|
||||||
|
defer nm.Unlock()
|
||||||
|
|
||||||
if !nl.propagateUTXOsChangedNotifications {
|
if !nl.propagateUTXOsChangedNotifications {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -284,7 +385,7 @@ func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(address
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
||||||
utxoChanges *utxoindex.UTXOChanges) *appmessage.UTXOsChangedNotificationMessage {
|
utxoChanges *utxoindex.UTXOChanges) (*appmessage.UTXOsChangedNotificationMessage, error) {
|
||||||
|
|
||||||
// As an optimization, we iterate over the smaller set (O(n)) among the two below
|
// As an optimization, we iterate over the smaller set (O(n)) among the two below
|
||||||
// and check existence over the larger set (O(1))
|
// and check existence over the larger set (O(1))
|
||||||
@ -299,27 +400,64 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
|||||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for scriptPublicKeyString, removedOutpoints := range utxoChanges.Removed {
|
for scriptPublicKeyString, removedPairs := range utxoChanges.Removed {
|
||||||
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
|
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
|
||||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
|
||||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else if addressesSize > 0 {
|
||||||
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
|
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
|
||||||
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
|
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
|
||||||
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
|
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
|
||||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
|
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
|
||||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||||
}
|
}
|
||||||
if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
if removedPairs, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
||||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
|
||||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
for scriptPublicKeyString, addedPairs := range utxoChanges.Added {
|
||||||
|
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return notification
|
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, addedPairs)
|
||||||
|
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||||
|
}
|
||||||
|
for scriptPublicKeyString, removedPAirs := range utxoChanges.Removed {
|
||||||
|
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, removedPAirs)
|
||||||
|
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return notification, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *NotificationListener) scriptPubKeyStringToAddressString(scriptPublicKeyString utxoindex.ScriptPublicKeyString) (string, error) {
|
||||||
|
scriptPubKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString))
|
||||||
|
|
||||||
|
// ignore error because it is often returned when the script is of unknown type
|
||||||
|
scriptType, address, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey, nl.params)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var addressString string
|
||||||
|
if scriptType == txscript.NonStandardTy {
|
||||||
|
addressString = ""
|
||||||
|
} else {
|
||||||
|
addressString = address.String()
|
||||||
|
}
|
||||||
|
return addressString, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PropagateVirtualSelectedParentBlueScoreChangedNotifications instructs the listener to send
|
// PropagateVirtualSelectedParentBlueScoreChangedNotifications instructs the listener to send
|
||||||
@ -334,6 +472,12 @@ func (nl *NotificationListener) PropagateVirtualDaaScoreChangedNotifications() {
|
|||||||
nl.propagateVirtualDaaScoreChangedNotifications = true
|
nl.propagateVirtualDaaScoreChangedNotifications = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PropagateNewBlockTemplateNotifications instructs the listener to send
|
||||||
|
// new block template notifications to the remote listener
|
||||||
|
func (nl *NotificationListener) PropagateNewBlockTemplateNotifications() {
|
||||||
|
nl.propagateNewBlockTemplateNotifications = true
|
||||||
|
}
|
||||||
|
|
||||||
// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications
|
// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications
|
||||||
// to the remote listener.
|
// to the remote listener.
|
||||||
func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() {
|
func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() {
|
||||||
|
@ -32,22 +32,6 @@ func ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(address string, pair
|
|||||||
return utxosByAddressesEntries
|
return utxosByAddressesEntries
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertUTXOOutpointsToUTXOsByAddressesEntries converts
|
|
||||||
// UTXOOutpoints to a slice of UTXOsByAddressesEntry
|
|
||||||
func convertUTXOOutpointsToUTXOsByAddressesEntries(address string, outpoints utxoindex.UTXOOutpoints) []*appmessage.UTXOsByAddressesEntry {
|
|
||||||
utxosByAddressesEntries := make([]*appmessage.UTXOsByAddressesEntry, 0, len(outpoints))
|
|
||||||
for outpoint := range outpoints {
|
|
||||||
utxosByAddressesEntries = append(utxosByAddressesEntries, &appmessage.UTXOsByAddressesEntry{
|
|
||||||
Address: address,
|
|
||||||
Outpoint: &appmessage.RPCOutpoint{
|
|
||||||
TransactionID: outpoint.TransactionID.String(),
|
|
||||||
Index: outpoint.Index,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return utxosByAddressesEntries
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings
|
// ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings
|
||||||
// to UTXOsChangedNotificationAddresses
|
// to UTXOsChangedNotificationAddresses
|
||||||
func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
|
func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
|
||||||
@ -63,7 +47,7 @@ func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||||
}
|
}
|
||||||
scriptPublicKeyString := utxoindex.ConvertScriptPublicKeyToString(scriptPublicKey)
|
scriptPublicKeyString := utxoindex.ScriptPublicKeyString(scriptPublicKey.String())
|
||||||
addresses[i] = &UTXOsChangedNotificationAddress{
|
addresses[i] = &UTXOsChangedNotificationAddress{
|
||||||
Address: addressString,
|
Address: addressString,
|
||||||
ScriptPublicKeyString: scriptPublicKeyString,
|
ScriptPublicKeyString: scriptPublicKeyString,
|
||||||
|
@ -56,7 +56,12 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
|
|||||||
"invalid block")
|
"invalid block")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, selectedParentHash, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
|
_, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
isChainBlock, err := ctx.Domain.Consensus().IsChainBlock(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -67,14 +72,13 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
|
|||||||
ChildrenHashes: hashes.ToStrings(childrenHashes),
|
ChildrenHashes: hashes.ToStrings(childrenHashes),
|
||||||
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
|
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
|
||||||
BlueScore: blockInfo.BlueScore,
|
BlueScore: blockInfo.BlueScore,
|
||||||
|
MergeSetBluesHashes: hashes.ToStrings(blockInfo.MergeSetBlues),
|
||||||
|
MergeSetRedsHashes: hashes.ToStrings(blockInfo.MergeSetReds),
|
||||||
|
IsChainBlock: isChainBlock,
|
||||||
}
|
}
|
||||||
// selectedParentHash will be nil in the genesis block
|
// selectedParentHash will be nil in the genesis block
|
||||||
if selectedParentHash != nil {
|
if blockInfo.SelectedParent != nil {
|
||||||
block.VerboseData.SelectedParentHash = selectedParentHash.String()
|
block.VerboseData.SelectedParentHash = blockInfo.SelectedParent.String()
|
||||||
}
|
|
||||||
|
|
||||||
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the block if we didn't receive it previously
|
// Get the block if we didn't receive it previously
|
||||||
@ -85,6 +89,10 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(domainBlock.Transactions) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
transactionIDs := make([]string, len(domainBlock.Transactions))
|
transactionIDs := make([]string, len(domainBlock.Transactions))
|
||||||
for i, transaction := range domainBlock.Transactions {
|
for i, transaction := range domainBlock.Transactions {
|
||||||
transactionIDs[i] = consensushashing.TransactionID(transaction).String()
|
transactionIDs[i] = consensushashing.TransactionID(transaction).String()
|
||||||
@ -114,6 +122,7 @@ func (ctx *Context) PopulateTransactionWithVerboseData(
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctx.Domain.Consensus().PopulateMass(domainTransaction)
|
ctx.Domain.Consensus().PopulateMass(domainTransaction)
|
||||||
|
|
||||||
transaction.VerboseData = &appmessage.RPCTransactionVerboseData{
|
transaction.VerboseData = &appmessage.RPCTransactionVerboseData{
|
||||||
TransactionID: consensushashing.TransactionID(domainTransaction).String(),
|
TransactionID: consensushashing.TransactionID(domainTransaction).String(),
|
||||||
Hash: consensushashing.TransactionHash(domainTransaction).String(),
|
Hash: consensushashing.TransactionHash(domainTransaction).String(),
|
||||||
|
@ -9,6 +9,14 @@ import (
|
|||||||
|
|
||||||
// HandleAddPeer handles the respectively named RPC command
|
// HandleAddPeer handles the respectively named RPC command
|
||||||
func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||||
|
if context.Config.SafeRPC {
|
||||||
|
log.Warn("AddPeer RPC command called while node in safe RPC mode -- ignoring.")
|
||||||
|
response := appmessage.NewAddPeerResponseMessage()
|
||||||
|
response.Error =
|
||||||
|
appmessage.RPCErrorf("AddPeer RPC command called while node in safe RPC mode")
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
AddPeerRequest := request.(*appmessage.AddPeerRequestMessage)
|
AddPeerRequest := request.(*appmessage.AddPeerRequestMessage)
|
||||||
address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort)
|
address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -9,6 +9,14 @@ import (
|
|||||||
|
|
||||||
// HandleBan handles the respectively named RPC command
|
// HandleBan handles the respectively named RPC command
|
||||||
func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||||
|
if context.Config.SafeRPC {
|
||||||
|
log.Warn("Ban RPC command called while node in safe RPC mode -- ignoring.")
|
||||||
|
response := appmessage.NewBanResponseMessage()
|
||||||
|
response.Error =
|
||||||
|
appmessage.RPCErrorf("Ban RPC command called while node in safe RPC mode")
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
banRequest := request.(*appmessage.BanRequestMessage)
|
banRequest := request.(*appmessage.BanRequestMessage)
|
||||||
ip := net.ParseIP(banRequest.IP)
|
ip := net.ParseIP(banRequest.IP)
|
||||||
if ip == nil {
|
if ip == nil {
|
||||||
|
@ -27,6 +27,27 @@ func HandleEstimateNetworkHashesPerSecond(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if context.Config.SafeRPC {
|
||||||
|
const windowSizeLimit = 10000
|
||||||
|
if windowSize > windowSizeLimit {
|
||||||
|
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||||
|
response.Error =
|
||||||
|
appmessage.RPCErrorf(
|
||||||
|
"Requested window size %d is larger than max allowed in RPC safe mode (%d)",
|
||||||
|
windowSize, windowSizeLimit)
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if uint64(windowSize) > context.Config.ActiveNetParams.PruningDepth() {
|
||||||
|
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||||
|
response.Error =
|
||||||
|
appmessage.RPCErrorf(
|
||||||
|
"Requested window size %d is larger than pruning point depth %d",
|
||||||
|
windowSize, context.Config.ActiveNetParams.PruningDepth())
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize)
|
networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
"github.com/kaspanet/kaspad/util"
|
"github.com/kaspanet/kaspad/util"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HandleGetBalanceByAddress handles the respectively named RPC command
|
// HandleGetBalanceByAddress handles the respectively named RPC command
|
||||||
@ -18,30 +19,39 @@ func HandleGetBalanceByAddress(context *rpccontext.Context, _ *router.Router, re
|
|||||||
|
|
||||||
getBalanceByAddressRequest := request.(*appmessage.GetBalanceByAddressRequestMessage)
|
getBalanceByAddressRequest := request.(*appmessage.GetBalanceByAddressRequestMessage)
|
||||||
|
|
||||||
var balance uint64 = 0
|
balance, err := getBalanceByAddress(context, getBalanceByAddressRequest.Address)
|
||||||
addressString := getBalanceByAddressRequest.Address
|
|
||||||
|
|
||||||
address, err := util.DecodeAddress(addressString, context.Config.ActiveNetParams.Prefix)
|
|
||||||
if err != nil {
|
|
||||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
|
||||||
errorMessage.Error = appmessage.RPCErrorf("Could decode address '%s': %s", addressString, err)
|
|
||||||
return errorMessage, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
|
||||||
if err != nil {
|
|
||||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
|
||||||
errorMessage.Error = appmessage.RPCErrorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
|
||||||
return errorMessage, nil
|
|
||||||
}
|
|
||||||
utxoOutpointEntryPairs, err := context.UTXOIndex.UTXOs(scriptPublicKey)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
rpcError := &appmessage.RPCError{}
|
||||||
|
if !errors.As(err, &rpcError) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, utxoOutpointEntryPair := range utxoOutpointEntryPairs {
|
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||||
balance += utxoOutpointEntryPair.Amount()
|
errorMessage.Error = rpcError
|
||||||
|
return errorMessage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
response := appmessage.NewGetBalanceByAddressResponse(balance)
|
response := appmessage.NewGetBalanceByAddressResponse(balance)
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getBalanceByAddress(context *rpccontext.Context, addressString string) (uint64, error) {
|
||||||
|
address, err := util.DecodeAddress(addressString, context.Config.ActiveNetParams.Prefix)
|
||||||
|
if err != nil {
|
||||||
|
return 0, appmessage.RPCErrorf("Couldn't decode address '%s': %s", addressString, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||||
|
if err != nil {
|
||||||
|
return 0, appmessage.RPCErrorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||||
|
}
|
||||||
|
utxoOutpointEntryPairs, err := context.UTXOIndex.UTXOs(scriptPublicKey)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
balance := uint64(0)
|
||||||
|
for _, utxoOutpointEntryPair := range utxoOutpointEntryPairs {
|
||||||
|
balance += utxoOutpointEntryPair.Amount()
|
||||||
|
}
|
||||||
|
return balance, nil
|
||||||
|
}
|
||||||
|
41
app/rpc/rpchandlers/get_balances_by_addresses.go
Normal file
41
app/rpc/rpchandlers/get_balances_by_addresses.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package rpchandlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HandleGetBalancesByAddresses handles the respectively named RPC command
|
||||||
|
func HandleGetBalancesByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||||
|
if !context.Config.UTXOIndex {
|
||||||
|
errorMessage := &appmessage.GetBalancesByAddressesResponseMessage{}
|
||||||
|
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
|
||||||
|
return errorMessage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
getBalancesByAddressesRequest := request.(*appmessage.GetBalancesByAddressesRequestMessage)
|
||||||
|
|
||||||
|
allEntries := make([]*appmessage.BalancesByAddressesEntry, len(getBalancesByAddressesRequest.Addresses))
|
||||||
|
for i, address := range getBalancesByAddressesRequest.Addresses {
|
||||||
|
balance, err := getBalanceByAddress(context, address)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
rpcError := &appmessage.RPCError{}
|
||||||
|
if !errors.As(err, &rpcError) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||||
|
errorMessage.Error = rpcError
|
||||||
|
return errorMessage, nil
|
||||||
|
}
|
||||||
|
allEntries[i] = &appmessage.BalancesByAddressesEntry{
|
||||||
|
Address: address,
|
||||||
|
Balance: balance,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response := appmessage.NewGetBalancesByAddressesResponse(allEntries)
|
||||||
|
return response, nil
|
||||||
|
}
|
@ -4,9 +4,11 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
"github.com/kaspanet/kaspad/util"
|
"github.com/kaspanet/kaspad/util"
|
||||||
|
"github.com/kaspanet/kaspad/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HandleGetBlockTemplate handles the respectively named RPC command
|
// HandleGetBlockTemplate handles the respectively named RPC command
|
||||||
@ -15,7 +17,7 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
|||||||
|
|
||||||
payAddress, err := util.DecodeAddress(getBlockTemplateRequest.PayAddress, context.Config.ActiveNetParams.Prefix)
|
payAddress, err := util.DecodeAddress(getBlockTemplateRequest.PayAddress, context.Config.ActiveNetParams.Prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorMessage := &appmessage.GetBlockResponseMessage{}
|
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||||
errorMessage.Error = appmessage.RPCErrorf("Could not decode address: %s", err)
|
errorMessage.Error = appmessage.RPCErrorf("Could not decode address: %s", err)
|
||||||
return errorMessage, nil
|
return errorMessage, nil
|
||||||
}
|
}
|
||||||
@ -25,18 +27,20 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey}
|
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version() + "/" + getBlockTemplateRequest.ExtraData)}
|
||||||
|
|
||||||
templateBlock, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
templateBlock, isNearlySynced, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if uint64(len(templateBlock.Transactions[transactionhelper.CoinbaseTransactionIndex].Payload)) > context.Config.NetParams().MaxCoinbasePayloadLength {
|
||||||
|
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||||
|
errorMessage.Error = appmessage.RPCErrorf("Coinbase payload is above max length (%d). Try to shorten the extra data.", context.Config.NetParams().MaxCoinbasePayloadLength)
|
||||||
|
return errorMessage, nil
|
||||||
|
}
|
||||||
|
|
||||||
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
|
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
|
||||||
|
|
||||||
isSynced, err := context.ProtocolManager.ShouldMine()
|
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, context.ProtocolManager.Context().HasPeers() && isNearlySynced), nil
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, isSynced), nil
|
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user