mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-09-14 21:40:11 +00:00
Compare commits
339 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
4bb5bf25d3 | ||
![]() |
25c2dd8670 | ||
![]() |
c93100ccd0 | ||
![]() |
03cc7dfc19 | ||
![]() |
ed745a9acb | ||
![]() |
c23c1d141c | ||
![]() |
352d261fd6 | ||
![]() |
43b9523919 | ||
![]() |
6085d1fc84 | ||
![]() |
1e9ddc42d0 | ||
![]() |
48a142e12f | ||
![]() |
86b89065cf | ||
![]() |
f41dc7fa0b | ||
![]() |
6b38bf7069 | ||
![]() |
d2453f8e7b | ||
![]() |
629faa8436 | ||
![]() |
91e6c6b74b | ||
![]() |
0819244ba1 | ||
![]() |
a0149cd8d0 | ||
![]() |
5a3b8a0066 | ||
![]() |
8e71f79f98 | ||
![]() |
346341a709 | ||
![]() |
8c881aea39 | ||
![]() |
40ec440dcf | ||
![]() |
88bdcb43bc | ||
![]() |
9d1e44673f | ||
![]() |
387fade044 | ||
![]() |
c417c8b525 | ||
![]() |
bd1420220a | ||
![]() |
5640ec4020 | ||
![]() |
1c0887ca60 | ||
![]() |
7be3f41aa7 | ||
![]() |
26c4c73624 | ||
![]() |
880d917e58 | ||
![]() |
3c53c6d8cd | ||
![]() |
3c4b973090 | ||
![]() |
8aee8f81c5 | ||
![]() |
ec3441e63f | ||
![]() |
e3ba1ca07e | ||
![]() |
27fdbd9c88 | ||
![]() |
377d9aaaeb | ||
![]() |
beee947dda | ||
![]() |
d4a27bf1c1 | ||
![]() |
eec6eb9669 | ||
![]() |
d5c10832c2 | ||
![]() |
9fbfba17b6 | ||
![]() |
09d698dd0e | ||
![]() |
ec51c6926a | ||
![]() |
7d44275eb1 | ||
![]() |
a3387a56b3 | ||
![]() |
c2ae03fc89 | ||
![]() |
6c774c966b | ||
![]() |
2d54c9693b | ||
![]() |
d8350d62b0 | ||
![]() |
26c7db251f | ||
![]() |
4d435f2b3a | ||
![]() |
067688f549 | ||
![]() |
3a3fa0d3f0 | ||
![]() |
cf4073b773 | ||
![]() |
6a5e7c9e3f | ||
![]() |
7e9b5b9010 | ||
![]() |
953838e0d8 | ||
![]() |
a1dcb34c29 | ||
![]() |
23764e1b0b | ||
![]() |
0838cc8e32 | ||
![]() |
9f51330f38 | ||
![]() |
f6d46fd23f | ||
![]() |
2a7e03e232 | ||
![]() |
3286a7d010 | ||
![]() |
aabbc741d7 | ||
![]() |
20b7ab89f9 | ||
![]() |
10f1e7e3f4 | ||
![]() |
d941c73701 | ||
![]() |
3f80638c86 | ||
![]() |
266ec6c270 | ||
![]() |
9ee409afaa | ||
![]() |
715cb3b1ac | ||
![]() |
eb693c4a86 | ||
![]() |
7a61c637b0 | ||
![]() |
c7bd84ef9d | ||
![]() |
b26b9f6c4b | ||
![]() |
1c9bb54cc2 | ||
![]() |
b9093d59eb | ||
![]() |
18d000f625 | ||
![]() |
c5aade7e7f | ||
![]() |
d4b741fd7c | ||
![]() |
74a4f927e9 | ||
![]() |
847aafc91f | ||
![]() |
c87e541570 | ||
![]() |
2ea1c4f922 | ||
![]() |
5e9c28b77b | ||
![]() |
d957a6d93a | ||
![]() |
b2648aa5bd | ||
![]() |
3908f274ae | ||
![]() |
fa7ea121ff | ||
![]() |
24848da895 | ||
![]() |
b200b77541 | ||
![]() |
d50ad0667c | ||
![]() |
5cea285960 | ||
![]() |
7eb5085f6b | ||
![]() |
491e3569d2 | ||
![]() |
440aea19b0 | ||
![]() |
968d47c3e6 | ||
![]() |
052193865e | ||
![]() |
85febcb551 | ||
![]() |
a4d9fa10bf | ||
![]() |
cd5fd86ad3 | ||
![]() |
b84d6fed2c | ||
![]() |
24c94b38be | ||
![]() |
4dd7113dc5 | ||
![]() |
48c7fa0104 | ||
![]() |
4d0cf2169a | ||
![]() |
5f7cc079e9 | ||
![]() |
016ddfdfce | ||
![]() |
5d24e2afbc | ||
![]() |
8735da045f | ||
![]() |
c839337425 | ||
![]() |
7390651072 | ||
![]() |
52fbeedf20 | ||
![]() |
1660cf0cf1 | ||
![]() |
2b5202be7a | ||
![]() |
9ffbb15160 | ||
![]() |
540b0d3a22 | ||
![]() |
8d5faee53a | ||
![]() |
6e2fd0633b | ||
![]() |
beb038c815 | ||
![]() |
35a959b56f | ||
![]() |
57c6118be8 | ||
![]() |
723aebbec9 | ||
![]() |
2b395e34b1 | ||
![]() |
ada559f007 | ||
![]() |
357e8ce73c | ||
![]() |
6725902663 | ||
![]() |
99bb21c512 | ||
![]() |
a4669f3fb5 | ||
![]() |
e8f40bdff9 | ||
![]() |
68a407ea37 | ||
![]() |
80879cabe1 | ||
![]() |
71afc62298 | ||
![]() |
ca5c8549b9 | ||
![]() |
ab73def07a | ||
![]() |
3f840233d8 | ||
![]() |
90d9edb8e5 | ||
![]() |
b9b360bce4 | ||
![]() |
27654961f9 | ||
![]() |
d45af760d8 | ||
![]() |
95fa045297 | ||
![]() |
cb65dae63d | ||
![]() |
21b82d7efc | ||
![]() |
63c6d7443b | ||
![]() |
753f4a2ec1 | ||
![]() |
ed667f7e54 | ||
![]() |
c4a034eb43 | ||
![]() |
2eca0f0b5f | ||
![]() |
58d627e05a | ||
![]() |
639183ba0e | ||
![]() |
9fa08442cf | ||
![]() |
0dd50394ec | ||
![]() |
ac8d4e1341 | ||
![]() |
2488fbde78 | ||
![]() |
2ab8065142 | ||
![]() |
25410b86ae | ||
![]() |
4e44dd8510 | ||
![]() |
1e56a22b32 | ||
![]() |
7a95f0c7a4 | ||
![]() |
c81506220b | ||
![]() |
e5598c15a7 | ||
![]() |
433af5e0fe | ||
![]() |
b7be807167 | ||
![]() |
e687ceeae7 | ||
![]() |
04e35321aa | ||
![]() |
061e65be93 | ||
![]() |
190e725dd0 | ||
![]() |
6449b03034 | ||
![]() |
9f02a24e8b | ||
![]() |
9b23bbcdb5 | ||
![]() |
b30f7309a2 | ||
![]() |
1c18a49992 | ||
![]() |
28d0f1ea2e | ||
![]() |
3f7e482291 | ||
![]() |
ce4f5fcc33 | ||
![]() |
be3a6604d7 | ||
![]() |
f452531df0 | ||
![]() |
13a09da848 | ||
![]() |
f58aeb4f9f | ||
![]() |
82f0a4d74f | ||
![]() |
69d90fe827 | ||
![]() |
c85b5d70fd | ||
![]() |
1cd712a63e | ||
![]() |
27ba9d0374 | ||
![]() |
b1229f7908 | ||
![]() |
4a560f25a6 | ||
![]() |
dab1a881fe | ||
![]() |
598392d0cf | ||
![]() |
6d27637055 | ||
![]() |
4855d845b3 | ||
![]() |
b1b179c105 | ||
![]() |
dadacdc0f4 | ||
![]() |
d2379608ad | ||
![]() |
14b2bcbd81 | ||
![]() |
71b284f4d5 | ||
![]() |
0e1d247915 | ||
![]() |
504ec36612 | ||
![]() |
c80b113319 | ||
![]() |
0bdd19136f | ||
![]() |
7c1cddff11 | ||
![]() |
064b0454e8 | ||
![]() |
8282fb486e | ||
![]() |
428449bb7d | ||
![]() |
f54659ead0 | ||
![]() |
99f82eb80f | ||
![]() |
aa43c14fc5 | ||
![]() |
129e9119d2 | ||
![]() |
cae7faced2 | ||
![]() |
f036b18f9e | ||
![]() |
1d740e1eab | ||
![]() |
011871cda2 | ||
![]() |
7b4f761fb9 | ||
![]() |
5806fef35f | ||
![]() |
227ef392ba | ||
![]() |
f3d76d6565 | ||
![]() |
df573bba63 | ||
![]() |
2a97b7c9bb | ||
![]() |
70900c571b | ||
![]() |
7292438e4a | ||
![]() |
dced1a9376 | ||
![]() |
32e8e539ac | ||
![]() |
11103a36d3 | ||
![]() |
606b781ca0 | ||
![]() |
dbf18d8052 | ||
![]() |
2a1b38ce7a | ||
![]() |
29c410d123 | ||
![]() |
6e6fabf956 | ||
![]() |
b04292c97a | ||
![]() |
765dd170e4 | ||
![]() |
8e362845b3 | ||
![]() |
5c1ba9170e | ||
![]() |
9d8c555bdf | ||
![]() |
a2f574eab8 | ||
![]() |
7bed86dc1b | ||
![]() |
9b81f5145e | ||
![]() |
cd8341ef57 | ||
![]() |
ad8bdbed21 | ||
![]() |
7cdceb6df0 | ||
![]() |
cc5248106e | ||
![]() |
e3463b7268 | ||
![]() |
a2173ef80a | ||
![]() |
aeb4500b61 | ||
![]() |
0a1daae319 | ||
![]() |
131cd3357e | ||
![]() |
ff72568d6b | ||
![]() |
2dddb650b9 | ||
![]() |
99aaacd649 | ||
![]() |
77a344cc29 | ||
![]() |
3dbc42b4f7 | ||
![]() |
1b9be28613 | ||
![]() |
5dbb1da84b | ||
![]() |
afaac28da1 | ||
![]() |
0053ee788d | ||
![]() |
af7e7de247 | ||
![]() |
02a08902a7 | ||
![]() |
d9bc94a2a8 | ||
![]() |
837dac68b5 | ||
![]() |
ba5880fab1 | ||
![]() |
7b5720a155 | ||
![]() |
65b5a080e4 | ||
![]() |
ce17348175 | ||
![]() |
d922ee1be2 | ||
![]() |
4132891ac9 | ||
![]() |
2094f4facf | ||
![]() |
2de68f43f0 | ||
![]() |
d748089a14 | ||
![]() |
7d1071a9b1 | ||
![]() |
f26a7fdedf | ||
![]() |
d207888b67 | ||
![]() |
38e2ee1b43 | ||
![]() |
aba44e7bfb | ||
![]() |
c731d74bc0 | ||
![]() |
60e7a8ebed | ||
![]() |
369a3bac09 | ||
![]() |
8022e4cbea | ||
![]() |
28ac77b202 | ||
![]() |
28af7eb596 | ||
![]() |
a4d241c30a | ||
![]() |
487fab0e2b | ||
![]() |
2f272cd517 | ||
![]() |
e3a6d9e49a | ||
![]() |
069ee26e84 | ||
![]() |
61aa15fd61 | ||
![]() |
f7cce5cb39 | ||
![]() |
2f7a1395e7 | ||
![]() |
8b1ac86532 | ||
![]() |
ab721f3ad6 | ||
![]() |
798c5fab7d | ||
![]() |
c13a4d90ed | ||
![]() |
4ba8b14675 | ||
![]() |
319cbce768 | ||
![]() |
bdd42903b4 | ||
![]() |
9bedf84740 | ||
![]() |
f317f51cdd | ||
![]() |
4207c82f5a | ||
![]() |
70399dae2a | ||
![]() |
2ae1b7853f | ||
![]() |
d53d040bee | ||
![]() |
79c74c482b | ||
![]() |
3b0394eefe | ||
![]() |
43e6467ff1 | ||
![]() |
363494ef7a | ||
![]() |
d1df97c4c5 | ||
![]() |
4f52a6de51 | ||
![]() |
4f4a8934e7 | ||
![]() |
16ba2bd312 | ||
![]() |
6613faee2d | ||
![]() |
edc459ae1b | ||
![]() |
d7f2cf81c0 | ||
![]() |
4658f9d05c | ||
![]() |
010df3b0d3 | ||
![]() |
346598e67f | ||
![]() |
268906a7ce | ||
![]() |
befc60b185 | ||
![]() |
dd3e04e671 | ||
![]() |
9c743db4d6 | ||
![]() |
eb3dba5c88 | ||
![]() |
e46e2580b1 | ||
![]() |
414f58fb90 | ||
![]() |
9df80957b1 | ||
![]() |
268c9fa83c | ||
![]() |
2e3592e351 | ||
![]() |
19718ac102 | ||
![]() |
28a8e96e65 | ||
![]() |
4df283934a | ||
![]() |
ab89efe3dc | ||
![]() |
fa16c30cf3 | ||
![]() |
c28366eb50 | ||
![]() |
dc0bf56bf3 | ||
![]() |
91de1807ad | ||
![]() |
830684167c | ||
![]() |
1f56a68a28 | ||
![]() |
13a6b4cc51 |
4
.github/workflows/SetPageFileSize.ps1
vendored
4
.github/workflows/SetPageFileSize.ps1
vendored
@ -11,8 +11,8 @@
|
|||||||
#>
|
#>
|
||||||
|
|
||||||
param(
|
param(
|
||||||
[System.UInt64] $MinimumSize = 8gb ,
|
[System.UInt64] $MinimumSize = 16gb ,
|
||||||
[System.UInt64] $MaximumSize = 8gb ,
|
[System.UInt64] $MaximumSize = 16gb ,
|
||||||
[System.String] $DiskRoot = "D:"
|
[System.String] $DiskRoot = "D:"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
name: Build and Upload assets
|
name: Build and upload assets
|
||||||
on:
|
on:
|
||||||
release:
|
release:
|
||||||
types: [published]
|
types: [published]
|
||||||
@ -9,35 +9,28 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest, windows-latest, macos-latest ]
|
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||||
name: Building For ${{ matrix.os }}
|
name: Building, ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- name: Fix windows CRLF
|
- name: Fix CRLF on Windows
|
||||||
|
if: runner.os == 'Windows'
|
||||||
run: git config --global core.autocrlf false
|
run: git config --global core.autocrlf false
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
# We need to increase the page size because the tests run out of memory on github CI windows.
|
- name: Setup Go
|
||||||
# Use the powershell script from this github action: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
|
uses: actions/setup-go@v5
|
||||||
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
|
|
||||||
- name: Increase page size on windows
|
|
||||||
if: runner.os == 'Windows'
|
|
||||||
shell: powershell
|
|
||||||
run: powershell -command .\.github\workflows\SetPageFileSize.ps1
|
|
||||||
|
|
||||||
|
|
||||||
- name: Set up Go 1.x
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
with:
|
||||||
go-version: 1.16
|
go-version: 1.21
|
||||||
|
|
||||||
- name: Build on linux
|
- name: Build on Linux
|
||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
# `-extldflags=-static` - means static link everything, `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
|
# `-extldflags=-static` - means static link everything,
|
||||||
|
# `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
|
||||||
# `-s -w` strips the binary to produce smaller size binaries
|
# `-s -w` strips the binary to produce smaller size binaries
|
||||||
run: |
|
run: |
|
||||||
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./...
|
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./cmd/...
|
||||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||||
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||||
zip -r "${archive}" ./bin/*
|
zip -r "${archive}" ./bin/*
|
||||||
@ -48,7 +41,7 @@ jobs:
|
|||||||
if: runner.os == 'Windows'
|
if: runner.os == 'Windows'
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
go build -v -ldflags="-s -w" -o bin/ ./...
|
go build -v -ldflags="-s -w" -o bin/ ./cmd/...
|
||||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||||
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||||
powershell "Compress-Archive bin/* \"${archive}\""
|
powershell "Compress-Archive bin/* \"${archive}\""
|
||||||
@ -58,15 +51,14 @@ jobs:
|
|||||||
- name: Build on MacOS
|
- name: Build on MacOS
|
||||||
if: runner.os == 'macOS'
|
if: runner.os == 'macOS'
|
||||||
run: |
|
run: |
|
||||||
go build -v -ldflags="-s -w" -o ./bin/ ./...
|
go build -v -ldflags="-s -w" -o ./bin/ ./cmd/...
|
||||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||||
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||||
zip -r "${archive}" ./bin/*
|
zip -r "${archive}" ./bin/*
|
||||||
echo "archive=${archive}" >> $GITHUB_ENV
|
echo "archive=${archive}" >> $GITHUB_ENV
|
||||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Upload release asset
|
||||||
- name: Upload Release Asset
|
|
||||||
uses: actions/upload-release-asset@v1
|
uses: actions/upload-release-asset@v1
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
73
.github/workflows/go.yml
vendored
73
.github/workflows/go.yml
vendored
@ -1,73 +0,0 @@
|
|||||||
name: Go
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
pull_request:
|
|
||||||
# edtited - "title, body, or the base branch of the PR is modified"
|
|
||||||
# synchronize - "commit(s) pushed to the pull request"
|
|
||||||
types: [opened, synchronize, edited, reopened]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
os: [ ubuntu-16.04, macos-10.15, windows-2019 ]
|
|
||||||
name: Testing on on ${{ matrix.os }}
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Fix windows CRLF
|
|
||||||
run: git config --global core.autocrlf false
|
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
# We need to increase the page size because the tests run out of memory on github CI windows.
|
|
||||||
# Use the powershell script from this github action: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
|
|
||||||
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
|
|
||||||
- name: Increase page size on windows
|
|
||||||
if: runner.os == 'Windows'
|
|
||||||
shell: powershell
|
|
||||||
run: powershell -command .\.github\workflows\SetPageFileSize.ps1
|
|
||||||
|
|
||||||
|
|
||||||
- name: Set up Go 1.x
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.16
|
|
||||||
|
|
||||||
|
|
||||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
|
||||||
- name: Go Cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
|
|
||||||
- name: Test
|
|
||||||
shell: bash
|
|
||||||
run: ./build_and_test.sh -v
|
|
||||||
|
|
||||||
coverage:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
name: Produce code coverage
|
|
||||||
steps:
|
|
||||||
- name: Check out code into the Go module directory
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Set up Go 1.x
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.16
|
|
||||||
|
|
||||||
- name: Delete the stability tests from coverage
|
|
||||||
run: rm -r stability-tests
|
|
||||||
|
|
||||||
- name: Create coverage file
|
|
||||||
run: go test -v -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./...
|
|
||||||
|
|
||||||
- name: Upload coverage file
|
|
||||||
run: bash <(curl -s https://codecov.io/bash)
|
|
@ -1,4 +1,4 @@
|
|||||||
name: Go-Race
|
name: Race
|
||||||
|
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
@ -7,22 +7,22 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
race_test:
|
race_test:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
branch: [ master, latest ]
|
branch: [master, latest]
|
||||||
name: Race detection on ${{ matrix.branch }}
|
name: Race detection on ${{ matrix.branch }}
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Go 1.x
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.15
|
go-version: 1.23
|
||||||
|
|
||||||
- name: Set scheduled branch name
|
- name: Set scheduled branch name
|
||||||
shell: bash
|
shell: bash
|
||||||
@ -46,4 +46,4 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
git checkout "${{ env.run_on }}"
|
git checkout "${{ env.run_on }}"
|
||||||
git status
|
git status
|
||||||
go test -race ./...
|
go test -timeout 20m -race ./...
|
92
.github/workflows/tests.yaml
vendored
Normal file
92
.github/workflows/tests.yaml
vendored
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
name: Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
# edtited - because base branch can be modified
|
||||||
|
# synchronize - update commits on PR
|
||||||
|
types: [opened, synchronize, edited]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest, macos-latest]
|
||||||
|
name: Tests, ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- name: Fix CRLF on Windows
|
||||||
|
if: runner.os == 'Windows'
|
||||||
|
run: git config --global core.autocrlf false
|
||||||
|
|
||||||
|
- name: Check out code into the Go module directory
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
# Increase the pagefile size on Windows to aviod running out of memory
|
||||||
|
- name: Increase pagefile size on Windows
|
||||||
|
if: runner.os == 'Windows'
|
||||||
|
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
|
||||||
|
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||||
|
- name: Go Cache
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
shell: bash
|
||||||
|
run: ./build_and_test.sh -v
|
||||||
|
|
||||||
|
stability-test-fast:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Fast stability tests, ${{ github.head_ref }}
|
||||||
|
steps:
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Install kaspad
|
||||||
|
run: go install ./...
|
||||||
|
|
||||||
|
- name: Install golint
|
||||||
|
run: go get -u golang.org/x/lint/golint
|
||||||
|
|
||||||
|
- name: Run fast stability tests
|
||||||
|
working-directory: stability-tests
|
||||||
|
run: ./install_and_test.sh
|
||||||
|
|
||||||
|
coverage:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Produce code coverage
|
||||||
|
steps:
|
||||||
|
- name: Check out code into the Go module directory
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
|
||||||
|
- name: Delete the stability tests from coverage
|
||||||
|
run: rm -r stability-tests
|
||||||
|
|
||||||
|
- name: Create coverage file
|
||||||
|
run: go test -v -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./...
|
||||||
|
|
||||||
|
- name: Upload coverage file
|
||||||
|
run: bash <(curl -s https://codecov.io/bash)
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -53,6 +53,7 @@ _testmain.go
|
|||||||
debug
|
debug
|
||||||
debug.test
|
debug.test
|
||||||
__debug_bin
|
__debug_bin
|
||||||
|
*__debug_*
|
||||||
|
|
||||||
# CI
|
# CI
|
||||||
version.txt
|
version.txt
|
||||||
|
43
CODE_OF_CONDUCT.md
Normal file
43
CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to creating a positive environment include:
|
||||||
|
|
||||||
|
* Using welcoming and inclusive language
|
||||||
|
* Being respectful of differing viewpoints and experiences
|
||||||
|
* Gracefully accepting constructive criticism
|
||||||
|
* Focusing on what is best for the community
|
||||||
|
* Showing empathy towards other community members
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||||
|
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||||
|
|
||||||
|
## Our Responsibilities
|
||||||
|
|
||||||
|
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainers on this [Google form][gform]. The project maintainers will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project maintainers are obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||||
|
|
||||||
|
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||||
|
|
||||||
|
[gform]: https://forms.gle/dnKXMJL7VxdUjt3x5
|
||||||
|
[homepage]: http://contributor-covenant.org
|
||||||
|
[version]: http://contributor-covenant.org/version/1/4/
|
@ -12,8 +12,7 @@ If you want to make a big change it's better to discuss it first by opening an i
|
|||||||
|
|
||||||
## Pull Request process
|
## Pull Request process
|
||||||
|
|
||||||
Any pull request should be opened against the development branch of the target version. The development branch format is
|
Any pull request should be opened against the development branch `dev`.
|
||||||
as follows: `vx.y.z-dev`, for example: `v0.8.5-dev`.
|
|
||||||
|
|
||||||
All pull requests should pass the checks written in `build_and_test.sh`, so it's recommended to run this script before
|
All pull requests should pass the checks written in `build_and_test.sh`, so it's recommended to run this script before
|
||||||
submitting your PR.
|
submitting your PR.
|
21
README.md
21
README.md
@ -1,16 +1,15 @@
|
|||||||
|
# DEPRECATED
|
||||||
|
|
||||||
Kaspad
|
The full node reference implementation was [rewritten in Rust](https://github.com/kaspanet/rusty-kaspa), as a result, the Go implementation is now deprecated.
|
||||||
====
|
|
||||||
Warning: This is pre-alpha software. There's no guarantee anything works.
|
PLEASE NOTE: Any pull requests or issues that will be opened in this repository will be closed without treatment, except for issues or pull requests related to the kaspawallet, which remains maintained. In any other case, please use the [Rust implementation](https://github.com/kaspanet/rusty-kaspa) instead.
|
||||||
====
|
|
||||||
|
# Kaspad
|
||||||
|
|
||||||
[](https://choosealicense.com/licenses/isc/)
|
[](https://choosealicense.com/licenses/isc/)
|
||||||
[](http://godoc.org/github.com/kaspanet/kaspad)
|
[](http://godoc.org/github.com/kaspanet/kaspad)
|
||||||
|
|
||||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
Kaspad was the reference full node Kaspa implementation written in Go (golang).
|
||||||
|
|
||||||
This project is currently under active development and is in a pre-Alpha state.
|
|
||||||
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
|
|
||||||
|
|
||||||
## What is kaspa
|
## What is kaspa
|
||||||
|
|
||||||
@ -18,7 +17,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Go 1.16 or later.
|
Go 1.23 or later.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@ -45,7 +44,6 @@ $ go install . ./cmd/...
|
|||||||
not already add the bin directory to your system path during Go installation,
|
not already add the bin directory to your system path during Go installation,
|
||||||
you are encouraged to do so now.
|
you are encouraged to do so now.
|
||||||
|
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
Kaspad has several configuration options available to tweak how it runs, but all
|
Kaspad has several configuration options available to tweak how it runs, but all
|
||||||
@ -56,6 +54,7 @@ $ kaspad
|
|||||||
```
|
```
|
||||||
|
|
||||||
## Discord
|
## Discord
|
||||||
|
|
||||||
Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
|
Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
|
||||||
|
|
||||||
## Issue Tracker
|
## Issue Tracker
|
||||||
@ -63,6 +62,8 @@ Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
|
|||||||
The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues)
|
The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues)
|
||||||
is used for this project.
|
is used for this project.
|
||||||
|
|
||||||
|
Issue priorities may be seen at https://github.com/orgs/kaspanet/projects/4
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
The [documentation](https://github.com/kaspanet/docs) is a work-in-progress
|
The [documentation](https://github.com/kaspanet/docs) is a work-in-progress
|
||||||
|
10
app/app.go
10
app/app.go
@ -20,7 +20,10 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/version"
|
"github.com/kaspanet/kaspad/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
const leveldbCacheSizeMiB = 256
|
const (
|
||||||
|
leveldbCacheSizeMiB = 256
|
||||||
|
defaultDataDirname = "datadir2"
|
||||||
|
)
|
||||||
|
|
||||||
var desiredLimits = &limits.DesiredLimits{
|
var desiredLimits = &limits.DesiredLimits{
|
||||||
FileLimitWant: 2048,
|
FileLimitWant: 2048,
|
||||||
@ -84,6 +87,7 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
|
|||||||
if app.cfg.Profile != "" {
|
if app.cfg.Profile != "" {
|
||||||
profiling.Start(app.cfg.Profile, log)
|
profiling.Start(app.cfg.Profile, log)
|
||||||
}
|
}
|
||||||
|
profiling.TrackHeap(app.cfg.AppDir, log)
|
||||||
|
|
||||||
// Return now if an interrupt signal was triggered.
|
// Return now if an interrupt signal was triggered.
|
||||||
if signal.InterruptRequested(interrupt) {
|
if signal.InterruptRequested(interrupt) {
|
||||||
@ -101,7 +105,7 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
|
|||||||
// Open the database
|
// Open the database
|
||||||
databaseContext, err := openDB(app.cfg)
|
databaseContext, err := openDB(app.cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Errorf("Loading database failed: %+v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,7 +163,7 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
|
|||||||
|
|
||||||
// dbPath returns the path to the block database given a database type.
|
// dbPath returns the path to the block database given a database type.
|
||||||
func databasePath(cfg *config.Config) string {
|
func databasePath(cfg *config.Config) string {
|
||||||
return filepath.Join(cfg.AppDir, "data")
|
return filepath.Join(cfg.AppDir, defaultDataDirname)
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeDatabase(cfg *config.Config) error {
|
func removeDatabase(cfg *config.Config) error {
|
||||||
|
@ -6,7 +6,7 @@ supported kaspa messages to and from the appmessage. This package does not deal
|
|||||||
with the specifics of message handling such as what to do when a message is
|
with the specifics of message handling such as what to do when a message is
|
||||||
received. This provides the caller with a high level of flexibility.
|
received. This provides the caller with a high level of flexibility.
|
||||||
|
|
||||||
Kaspa Message Overview
|
# Kaspa Message Overview
|
||||||
|
|
||||||
The kaspa protocol consists of exchanging messages between peers. Each
|
The kaspa protocol consists of exchanging messages between peers. Each
|
||||||
message is preceded by a header which identifies information about it such as
|
message is preceded by a header which identifies information about it such as
|
||||||
@ -22,7 +22,7 @@ messages, all of the details of marshalling and unmarshalling to and from the
|
|||||||
appmessage using kaspa encoding are handled so the caller doesn't have to concern
|
appmessage using kaspa encoding are handled so the caller doesn't have to concern
|
||||||
themselves with the specifics.
|
themselves with the specifics.
|
||||||
|
|
||||||
Message Interaction
|
# Message Interaction
|
||||||
|
|
||||||
The following provides a quick summary of how the kaspa messages are intended
|
The following provides a quick summary of how the kaspa messages are intended
|
||||||
to interact with one another. As stated above, these interactions are not
|
to interact with one another. As stated above, these interactions are not
|
||||||
@ -45,13 +45,13 @@ interactions in no particular order.
|
|||||||
notfound message (MsgNotFound)
|
notfound message (MsgNotFound)
|
||||||
ping message (MsgPing) pong message (MsgPong)
|
ping message (MsgPing) pong message (MsgPong)
|
||||||
|
|
||||||
Common Parameters
|
# Common Parameters
|
||||||
|
|
||||||
There are several common parameters that arise when using this package to read
|
There are several common parameters that arise when using this package to read
|
||||||
and write kaspa messages. The following sections provide a quick overview of
|
and write kaspa messages. The following sections provide a quick overview of
|
||||||
these parameters so the next sections can build on them.
|
these parameters so the next sections can build on them.
|
||||||
|
|
||||||
Protocol Version
|
# Protocol Version
|
||||||
|
|
||||||
The protocol version should be negotiated with the remote peer at a higher
|
The protocol version should be negotiated with the remote peer at a higher
|
||||||
level than this package via the version (MsgVersion) message exchange, however,
|
level than this package via the version (MsgVersion) message exchange, however,
|
||||||
@ -60,18 +60,18 @@ latest protocol version this package supports and is typically the value to use
|
|||||||
for all outbound connections before a potentially lower protocol version is
|
for all outbound connections before a potentially lower protocol version is
|
||||||
negotiated.
|
negotiated.
|
||||||
|
|
||||||
Kaspa Network
|
# Kaspa Network
|
||||||
|
|
||||||
The kaspa network is a magic number which is used to identify the start of a
|
The kaspa network is a magic number which is used to identify the start of a
|
||||||
message and which kaspa network the message applies to. This package provides
|
message and which kaspa network the message applies to. This package provides
|
||||||
the following constants:
|
the following constants:
|
||||||
|
|
||||||
appmessage.Mainnet
|
appmessage.Mainnet
|
||||||
appmessage.Testnet (Test network)
|
appmessage.Testnet (Test network)
|
||||||
appmessage.Simnet (Simulation test network)
|
appmessage.Simnet (Simulation test network)
|
||||||
appmessage.Devnet (Development network)
|
appmessage.Devnet (Development network)
|
||||||
|
|
||||||
Determining Message Type
|
# Determining Message Type
|
||||||
|
|
||||||
As discussed in the kaspa message overview section, this package reads
|
As discussed in the kaspa message overview section, this package reads
|
||||||
and writes kaspa messages using a generic interface named Message. In
|
and writes kaspa messages using a generic interface named Message. In
|
||||||
@ -89,7 +89,7 @@ switch or type assertion. An example of a type switch follows:
|
|||||||
fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount)
|
fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
Reading Messages
|
# Reading Messages
|
||||||
|
|
||||||
In order to unmarshall kaspa messages from the appmessage, use the ReadMessage
|
In order to unmarshall kaspa messages from the appmessage, use the ReadMessage
|
||||||
function. It accepts any io.Reader, but typically this will be a net.Conn to
|
function. It accepts any io.Reader, but typically this will be a net.Conn to
|
||||||
@ -104,7 +104,7 @@ a remote node running a kaspa peer. Example syntax is:
|
|||||||
// Log and handle the error
|
// Log and handle the error
|
||||||
}
|
}
|
||||||
|
|
||||||
Writing Messages
|
# Writing Messages
|
||||||
|
|
||||||
In order to marshall kaspa messages to the appmessage, use the WriteMessage
|
In order to marshall kaspa messages to the appmessage, use the WriteMessage
|
||||||
function. It accepts any io.Writer, but typically this will be a net.Conn to
|
function. It accepts any io.Writer, but typically this will be a net.Conn to
|
||||||
@ -122,7 +122,7 @@ from a remote peer is:
|
|||||||
// Log and handle the error
|
// Log and handle the error
|
||||||
}
|
}
|
||||||
|
|
||||||
Errors
|
# Errors
|
||||||
|
|
||||||
Errors returned by this package are either the raw errors provided by underlying
|
Errors returned by this package are either the raw errors provided by underlying
|
||||||
calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and
|
calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and
|
||||||
|
@ -2,6 +2,10 @@ package appmessage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||||
@ -28,13 +32,17 @@ func DomainBlockToMsgBlock(domainBlock *externalapi.DomainBlock) *MsgBlock {
|
|||||||
func DomainBlockHeaderToBlockHeader(domainBlockHeader externalapi.BlockHeader) *MsgBlockHeader {
|
func DomainBlockHeaderToBlockHeader(domainBlockHeader externalapi.BlockHeader) *MsgBlockHeader {
|
||||||
return &MsgBlockHeader{
|
return &MsgBlockHeader{
|
||||||
Version: domainBlockHeader.Version(),
|
Version: domainBlockHeader.Version(),
|
||||||
ParentHashes: domainBlockHeader.ParentHashes(),
|
Parents: domainBlockHeader.Parents(),
|
||||||
HashMerkleRoot: domainBlockHeader.HashMerkleRoot(),
|
HashMerkleRoot: domainBlockHeader.HashMerkleRoot(),
|
||||||
AcceptedIDMerkleRoot: domainBlockHeader.AcceptedIDMerkleRoot(),
|
AcceptedIDMerkleRoot: domainBlockHeader.AcceptedIDMerkleRoot(),
|
||||||
UTXOCommitment: domainBlockHeader.UTXOCommitment(),
|
UTXOCommitment: domainBlockHeader.UTXOCommitment(),
|
||||||
Timestamp: mstime.UnixMilliseconds(domainBlockHeader.TimeInMilliseconds()),
|
Timestamp: mstime.UnixMilliseconds(domainBlockHeader.TimeInMilliseconds()),
|
||||||
Bits: domainBlockHeader.Bits(),
|
Bits: domainBlockHeader.Bits(),
|
||||||
Nonce: domainBlockHeader.Nonce(),
|
Nonce: domainBlockHeader.Nonce(),
|
||||||
|
BlueScore: domainBlockHeader.BlueScore(),
|
||||||
|
DAAScore: domainBlockHeader.DAAScore(),
|
||||||
|
BlueWork: domainBlockHeader.BlueWork(),
|
||||||
|
PruningPoint: domainBlockHeader.PruningPoint(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,13 +63,17 @@ func MsgBlockToDomainBlock(msgBlock *MsgBlock) *externalapi.DomainBlock {
|
|||||||
func BlockHeaderToDomainBlockHeader(blockHeader *MsgBlockHeader) externalapi.BlockHeader {
|
func BlockHeaderToDomainBlockHeader(blockHeader *MsgBlockHeader) externalapi.BlockHeader {
|
||||||
return blockheader.NewImmutableBlockHeader(
|
return blockheader.NewImmutableBlockHeader(
|
||||||
blockHeader.Version,
|
blockHeader.Version,
|
||||||
blockHeader.ParentHashes,
|
blockHeader.Parents,
|
||||||
blockHeader.HashMerkleRoot,
|
blockHeader.HashMerkleRoot,
|
||||||
blockHeader.AcceptedIDMerkleRoot,
|
blockHeader.AcceptedIDMerkleRoot,
|
||||||
blockHeader.UTXOCommitment,
|
blockHeader.UTXOCommitment,
|
||||||
blockHeader.Timestamp.UnixMilliseconds(),
|
blockHeader.Timestamp.UnixMilliseconds(),
|
||||||
blockHeader.Bits,
|
blockHeader.Bits,
|
||||||
blockHeader.Nonce,
|
blockHeader.Nonce,
|
||||||
|
blockHeader.DAAScore,
|
||||||
|
blockHeader.BlueScore,
|
||||||
|
blockHeader.BlueWork,
|
||||||
|
blockHeader.PruningPoint,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,6 +112,7 @@ func domainTransactionInputToTxIn(domainTransactionInput *externalapi.DomainTran
|
|||||||
PreviousOutpoint: *domainOutpointToOutpoint(domainTransactionInput.PreviousOutpoint),
|
PreviousOutpoint: *domainOutpointToOutpoint(domainTransactionInput.PreviousOutpoint),
|
||||||
SignatureScript: domainTransactionInput.SignatureScript,
|
SignatureScript: domainTransactionInput.SignatureScript,
|
||||||
Sequence: domainTransactionInput.Sequence,
|
Sequence: domainTransactionInput.Sequence,
|
||||||
|
SigOpCount: domainTransactionInput.SigOpCount,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,6 +161,7 @@ func txInToDomainTransactionInput(txIn *TxIn) *externalapi.DomainTransactionInpu
|
|||||||
return &externalapi.DomainTransactionInput{
|
return &externalapi.DomainTransactionInput{
|
||||||
PreviousOutpoint: *outpointToDomainOutpoint(&txIn.PreviousOutpoint), //TODO
|
PreviousOutpoint: *outpointToDomainOutpoint(&txIn.PreviousOutpoint), //TODO
|
||||||
SignatureScript: txIn.SignatureScript,
|
SignatureScript: txIn.SignatureScript,
|
||||||
|
SigOpCount: txIn.SigOpCount,
|
||||||
Sequence: txIn.Sequence,
|
Sequence: txIn.Sequence,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -163,14 +177,10 @@ func outpointToDomainOutpoint(outpoint *Outpoint) *externalapi.DomainOutpoint {
|
|||||||
func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externalapi.DomainTransaction, error) {
|
func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externalapi.DomainTransaction, error) {
|
||||||
inputs := make([]*externalapi.DomainTransactionInput, len(rpcTransaction.Inputs))
|
inputs := make([]*externalapi.DomainTransactionInput, len(rpcTransaction.Inputs))
|
||||||
for i, input := range rpcTransaction.Inputs {
|
for i, input := range rpcTransaction.Inputs {
|
||||||
transactionID, err := transactionid.FromString(input.PreviousOutpoint.TransactionID)
|
previousOutpoint, err := RPCOutpointToDomainOutpoint(input.PreviousOutpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
previousOutpoint := &externalapi.DomainOutpoint{
|
|
||||||
TransactionID: *transactionID,
|
|
||||||
Index: input.PreviousOutpoint.Index,
|
|
||||||
}
|
|
||||||
signatureScript, err := hex.DecodeString(input.SignatureScript)
|
signatureScript, err := hex.DecodeString(input.SignatureScript)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -179,6 +189,7 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
|||||||
PreviousOutpoint: *previousOutpoint,
|
PreviousOutpoint: *previousOutpoint,
|
||||||
SignatureScript: signatureScript,
|
SignatureScript: signatureScript,
|
||||||
Sequence: input.Sequence,
|
Sequence: input.Sequence,
|
||||||
|
SigOpCount: input.SigOpCount,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
outputs := make([]*externalapi.DomainTransactionOutput, len(rpcTransaction.Outputs))
|
outputs := make([]*externalapi.DomainTransactionOutput, len(rpcTransaction.Outputs))
|
||||||
@ -203,16 +214,47 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &externalapi.DomainTransaction{
|
return &externalapi.DomainTransaction{
|
||||||
Version: rpcTransaction.Version,
|
Version: rpcTransaction.Version,
|
||||||
Inputs: inputs,
|
Inputs: inputs,
|
||||||
Outputs: outputs,
|
Outputs: outputs,
|
||||||
LockTime: rpcTransaction.LockTime,
|
LockTime: rpcTransaction.LockTime,
|
||||||
SubnetworkID: *subnetworkID,
|
SubnetworkID: *subnetworkID,
|
||||||
Gas: rpcTransaction.LockTime,
|
Gas: rpcTransaction.Gas,
|
||||||
Payload: payload,
|
MassCommitment: rpcTransaction.Mass,
|
||||||
|
Payload: payload,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RPCOutpointToDomainOutpoint converts RPCOutpoint to DomainOutpoint
|
||||||
|
func RPCOutpointToDomainOutpoint(outpoint *RPCOutpoint) (*externalapi.DomainOutpoint, error) {
|
||||||
|
transactionID, err := transactionid.FromString(outpoint.TransactionID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &externalapi.DomainOutpoint{
|
||||||
|
TransactionID: *transactionID,
|
||||||
|
Index: outpoint.Index,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPCUTXOEntryToUTXOEntry converts RPCUTXOEntry to UTXOEntry
|
||||||
|
func RPCUTXOEntryToUTXOEntry(entry *RPCUTXOEntry) (externalapi.UTXOEntry, error) {
|
||||||
|
script, err := hex.DecodeString(entry.ScriptPublicKey.Script)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return utxo.NewUTXOEntry(
|
||||||
|
entry.Amount,
|
||||||
|
&externalapi.ScriptPublicKey{
|
||||||
|
Script: script,
|
||||||
|
Version: entry.ScriptPublicKey.Version,
|
||||||
|
},
|
||||||
|
entry.IsCoinbase,
|
||||||
|
entry.BlockDAAScore,
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
// DomainTransactionToRPCTransaction converts DomainTransactions to RPCTransactions
|
// DomainTransactionToRPCTransaction converts DomainTransactions to RPCTransactions
|
||||||
func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransaction) *RPCTransaction {
|
func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransaction) *RPCTransaction {
|
||||||
inputs := make([]*RPCTransactionInput, len(transaction.Inputs))
|
inputs := make([]*RPCTransactionInput, len(transaction.Inputs))
|
||||||
@ -227,6 +269,7 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
|||||||
PreviousOutpoint: previousOutpoint,
|
PreviousOutpoint: previousOutpoint,
|
||||||
SignatureScript: signatureScript,
|
SignatureScript: signatureScript,
|
||||||
Sequence: input.Sequence,
|
Sequence: input.Sequence,
|
||||||
|
SigOpCount: input.SigOpCount,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
outputs := make([]*RPCTransactionOutput, len(transaction.Outputs))
|
outputs := make([]*RPCTransactionOutput, len(transaction.Outputs))
|
||||||
@ -245,7 +288,8 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
|||||||
Outputs: outputs,
|
Outputs: outputs,
|
||||||
LockTime: transaction.LockTime,
|
LockTime: transaction.LockTime,
|
||||||
SubnetworkID: subnetworkID,
|
SubnetworkID: subnetworkID,
|
||||||
Gas: transaction.LockTime,
|
Gas: transaction.Gas,
|
||||||
|
Mass: transaction.MassCommitment,
|
||||||
Payload: payload,
|
Payload: payload,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -257,22 +301,27 @@ func OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(
|
|||||||
|
|
||||||
domainOutpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, len(outpointAndUTXOEntryPairs))
|
domainOutpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, len(outpointAndUTXOEntryPairs))
|
||||||
for i, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs {
|
for i, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs {
|
||||||
domainOutpointAndUTXOEntryPairs[i] = &externalapi.OutpointAndUTXOEntryPair{
|
domainOutpointAndUTXOEntryPairs[i] = outpointAndUTXOEntryPairToDomainOutpointAndUTXOEntryPair(outpointAndUTXOEntryPair)
|
||||||
Outpoint: &externalapi.DomainOutpoint{
|
|
||||||
TransactionID: outpointAndUTXOEntryPair.Outpoint.TxID,
|
|
||||||
Index: outpointAndUTXOEntryPair.Outpoint.Index,
|
|
||||||
},
|
|
||||||
UTXOEntry: utxo.NewUTXOEntry(
|
|
||||||
outpointAndUTXOEntryPair.UTXOEntry.Amount,
|
|
||||||
outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey,
|
|
||||||
outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase,
|
|
||||||
outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return domainOutpointAndUTXOEntryPairs
|
return domainOutpointAndUTXOEntryPairs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func outpointAndUTXOEntryPairToDomainOutpointAndUTXOEntryPair(
|
||||||
|
outpointAndUTXOEntryPair *OutpointAndUTXOEntryPair) *externalapi.OutpointAndUTXOEntryPair {
|
||||||
|
return &externalapi.OutpointAndUTXOEntryPair{
|
||||||
|
Outpoint: &externalapi.DomainOutpoint{
|
||||||
|
TransactionID: outpointAndUTXOEntryPair.Outpoint.TxID,
|
||||||
|
Index: outpointAndUTXOEntryPair.Outpoint.Index,
|
||||||
|
},
|
||||||
|
UTXOEntry: utxo.NewUTXOEntry(
|
||||||
|
outpointAndUTXOEntryPair.UTXOEntry.Amount,
|
||||||
|
outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey,
|
||||||
|
outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase,
|
||||||
|
outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs converts
|
// DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs converts
|
||||||
// domain OutpointAndUTXOEntryPairs to OutpointAndUTXOEntryPairs
|
// domain OutpointAndUTXOEntryPairs to OutpointAndUTXOEntryPairs
|
||||||
func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
|
func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
|
||||||
@ -298,15 +347,25 @@ func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
|
|||||||
|
|
||||||
// DomainBlockToRPCBlock converts DomainBlocks to RPCBlocks
|
// DomainBlockToRPCBlock converts DomainBlocks to RPCBlocks
|
||||||
func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock {
|
func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock {
|
||||||
|
parents := make([]*RPCBlockLevelParents, len(block.Header.Parents()))
|
||||||
|
for i, blockLevelParents := range block.Header.Parents() {
|
||||||
|
parents[i] = &RPCBlockLevelParents{
|
||||||
|
ParentHashes: hashes.ToStrings(blockLevelParents),
|
||||||
|
}
|
||||||
|
}
|
||||||
header := &RPCBlockHeader{
|
header := &RPCBlockHeader{
|
||||||
Version: uint32(block.Header.Version()),
|
Version: uint32(block.Header.Version()),
|
||||||
ParentHashes: hashes.ToStrings(block.Header.ParentHashes()),
|
Parents: parents,
|
||||||
HashMerkleRoot: block.Header.HashMerkleRoot().String(),
|
HashMerkleRoot: block.Header.HashMerkleRoot().String(),
|
||||||
AcceptedIDMerkleRoot: block.Header.AcceptedIDMerkleRoot().String(),
|
AcceptedIDMerkleRoot: block.Header.AcceptedIDMerkleRoot().String(),
|
||||||
UTXOCommitment: block.Header.UTXOCommitment().String(),
|
UTXOCommitment: block.Header.UTXOCommitment().String(),
|
||||||
Timestamp: block.Header.TimeInMilliseconds(),
|
Timestamp: block.Header.TimeInMilliseconds(),
|
||||||
Bits: block.Header.Bits(),
|
Bits: block.Header.Bits(),
|
||||||
Nonce: block.Header.Nonce(),
|
Nonce: block.Header.Nonce(),
|
||||||
|
DAAScore: block.Header.DAAScore(),
|
||||||
|
BlueScore: block.Header.BlueScore(),
|
||||||
|
BlueWork: block.Header.BlueWork().Text(16),
|
||||||
|
PruningPoint: block.Header.PruningPoint().String(),
|
||||||
}
|
}
|
||||||
transactions := make([]*RPCTransaction, len(block.Transactions))
|
transactions := make([]*RPCTransaction, len(block.Transactions))
|
||||||
for i, transaction := range block.Transactions {
|
for i, transaction := range block.Transactions {
|
||||||
@ -320,13 +379,16 @@ func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock {
|
|||||||
|
|
||||||
// RPCBlockToDomainBlock converts `block` into a DomainBlock
|
// RPCBlockToDomainBlock converts `block` into a DomainBlock
|
||||||
func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
|
func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
|
||||||
parentHashes := make([]*externalapi.DomainHash, len(block.Header.ParentHashes))
|
parents := make([]externalapi.BlockLevelParents, len(block.Header.Parents))
|
||||||
for i, parentHash := range block.Header.ParentHashes {
|
for i, blockLevelParents := range block.Header.Parents {
|
||||||
domainParentHashes, err := externalapi.NewDomainHashFromString(parentHash)
|
parents[i] = make(externalapi.BlockLevelParents, len(blockLevelParents.ParentHashes))
|
||||||
if err != nil {
|
for j, parentHash := range blockLevelParents.ParentHashes {
|
||||||
return nil, err
|
var err error
|
||||||
|
parents[i][j], err = externalapi.NewDomainHashFromString(parentHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
parentHashes[i] = domainParentHashes
|
|
||||||
}
|
}
|
||||||
hashMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.HashMerkleRoot)
|
hashMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.HashMerkleRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -340,15 +402,27 @@ func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
blueWork, success := new(big.Int).SetString(block.Header.BlueWork, 16)
|
||||||
|
if !success {
|
||||||
|
return nil, errors.Errorf("failed to parse blue work: %s", block.Header.BlueWork)
|
||||||
|
}
|
||||||
|
pruningPoint, err := externalapi.NewDomainHashFromString(block.Header.PruningPoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
header := blockheader.NewImmutableBlockHeader(
|
header := blockheader.NewImmutableBlockHeader(
|
||||||
uint16(block.Header.Version),
|
uint16(block.Header.Version),
|
||||||
parentHashes,
|
parents,
|
||||||
hashMerkleRoot,
|
hashMerkleRoot,
|
||||||
acceptedIDMerkleRoot,
|
acceptedIDMerkleRoot,
|
||||||
utxoCommitment,
|
utxoCommitment,
|
||||||
block.Header.Timestamp,
|
block.Header.Timestamp,
|
||||||
block.Header.Bits,
|
block.Header.Bits,
|
||||||
block.Header.Nonce)
|
block.Header.Nonce,
|
||||||
|
block.Header.DAAScore,
|
||||||
|
block.Header.BlueScore,
|
||||||
|
blueWork,
|
||||||
|
pruningPoint)
|
||||||
transactions := make([]*externalapi.DomainTransaction, len(block.Transactions))
|
transactions := make([]*externalapi.DomainTransaction, len(block.Transactions))
|
||||||
for i, transaction := range block.Transactions {
|
for i, transaction := range block.Transactions {
|
||||||
domainTransaction, err := RPCTransactionToDomainTransaction(transaction)
|
domainTransaction, err := RPCTransactionToDomainTransaction(transaction)
|
||||||
@ -362,3 +436,169 @@ func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
|
|||||||
Transactions: transactions,
|
Transactions: transactions,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BlockWithTrustedDataToDomainBlockWithTrustedData converts *MsgBlockWithTrustedData to *externalapi.BlockWithTrustedData
|
||||||
|
func BlockWithTrustedDataToDomainBlockWithTrustedData(block *MsgBlockWithTrustedData) *externalapi.BlockWithTrustedData {
|
||||||
|
daaWindow := make([]*externalapi.TrustedDataDataDAAHeader, len(block.DAAWindow))
|
||||||
|
for i, daaBlock := range block.DAAWindow {
|
||||||
|
daaWindow[i] = &externalapi.TrustedDataDataDAAHeader{
|
||||||
|
Header: BlockHeaderToDomainBlockHeader(&daaBlock.Block.Header),
|
||||||
|
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, len(block.GHOSTDAGData))
|
||||||
|
for i, datum := range block.GHOSTDAGData {
|
||||||
|
ghostdagData[i] = &externalapi.BlockGHOSTDAGDataHashPair{
|
||||||
|
Hash: datum.Hash,
|
||||||
|
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(datum.GHOSTDAGData),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &externalapi.BlockWithTrustedData{
|
||||||
|
Block: MsgBlockToDomainBlock(block.Block),
|
||||||
|
DAAWindow: daaWindow,
|
||||||
|
GHOSTDAGData: ghostdagData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader converts *TrustedDataDAAHeader to *externalapi.TrustedDataDataDAAHeader
|
||||||
|
func TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(daaBlock *TrustedDataDAAHeader) *externalapi.TrustedDataDataDAAHeader {
|
||||||
|
return &externalapi.TrustedDataDataDAAHeader{
|
||||||
|
Header: BlockHeaderToDomainBlockHeader(daaBlock.Header),
|
||||||
|
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GHOSTDAGHashPairToDomainGHOSTDAGHashPair converts *BlockGHOSTDAGDataHashPair to *externalapi.BlockGHOSTDAGDataHashPair
|
||||||
|
func GHOSTDAGHashPairToDomainGHOSTDAGHashPair(datum *BlockGHOSTDAGDataHashPair) *externalapi.BlockGHOSTDAGDataHashPair {
|
||||||
|
return &externalapi.BlockGHOSTDAGDataHashPair{
|
||||||
|
Hash: datum.Hash,
|
||||||
|
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(datum.GHOSTDAGData),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ghostdagDataToDomainGHOSTDAGData(data *BlockGHOSTDAGData) *externalapi.BlockGHOSTDAGData {
|
||||||
|
bluesAnticoneSizes := make(map[externalapi.DomainHash]externalapi.KType, len(data.BluesAnticoneSizes))
|
||||||
|
for _, pair := range data.BluesAnticoneSizes {
|
||||||
|
bluesAnticoneSizes[*pair.BlueHash] = pair.AnticoneSize
|
||||||
|
}
|
||||||
|
return externalapi.NewBlockGHOSTDAGData(
|
||||||
|
data.BlueScore,
|
||||||
|
data.BlueWork,
|
||||||
|
data.SelectedParent,
|
||||||
|
data.MergeSetBlues,
|
||||||
|
data.MergeSetReds,
|
||||||
|
bluesAnticoneSizes,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func domainGHOSTDAGDataGHOSTDAGData(data *externalapi.BlockGHOSTDAGData) *BlockGHOSTDAGData {
|
||||||
|
bluesAnticoneSizes := make([]*BluesAnticoneSizes, 0, len(data.BluesAnticoneSizes()))
|
||||||
|
for blueHash, anticoneSize := range data.BluesAnticoneSizes() {
|
||||||
|
blueHashCopy := blueHash
|
||||||
|
bluesAnticoneSizes = append(bluesAnticoneSizes, &BluesAnticoneSizes{
|
||||||
|
BlueHash: &blueHashCopy,
|
||||||
|
AnticoneSize: anticoneSize,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &BlockGHOSTDAGData{
|
||||||
|
BlueScore: data.BlueScore(),
|
||||||
|
BlueWork: data.BlueWork(),
|
||||||
|
SelectedParent: data.SelectedParent(),
|
||||||
|
MergeSetBlues: data.MergeSetBlues(),
|
||||||
|
MergeSetReds: data.MergeSetReds(),
|
||||||
|
BluesAnticoneSizes: bluesAnticoneSizes,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DomainBlockWithTrustedDataToBlockWithTrustedData converts *externalapi.BlockWithTrustedData to *MsgBlockWithTrustedData
|
||||||
|
func DomainBlockWithTrustedDataToBlockWithTrustedData(block *externalapi.BlockWithTrustedData) *MsgBlockWithTrustedData {
|
||||||
|
daaWindow := make([]*TrustedDataDataDAABlock, len(block.DAAWindow))
|
||||||
|
for i, daaBlock := range block.DAAWindow {
|
||||||
|
daaWindow[i] = &TrustedDataDataDAABlock{
|
||||||
|
Block: &MsgBlock{
|
||||||
|
Header: *DomainBlockHeaderToBlockHeader(daaBlock.Header),
|
||||||
|
},
|
||||||
|
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ghostdagData := make([]*BlockGHOSTDAGDataHashPair, len(block.GHOSTDAGData))
|
||||||
|
for i, datum := range block.GHOSTDAGData {
|
||||||
|
ghostdagData[i] = &BlockGHOSTDAGDataHashPair{
|
||||||
|
Hash: datum.Hash,
|
||||||
|
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(datum.GHOSTDAGData),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &MsgBlockWithTrustedData{
|
||||||
|
Block: DomainBlockToMsgBlock(block.Block),
|
||||||
|
DAAScore: block.Block.Header.DAAScore(),
|
||||||
|
DAAWindow: daaWindow,
|
||||||
|
GHOSTDAGData: ghostdagData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DomainBlockWithTrustedDataToBlockWithTrustedDataV4 converts a set of *externalapi.DomainBlock, daa window indices and ghostdag data indices
|
||||||
|
// to *MsgBlockWithTrustedDataV4
|
||||||
|
func DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block *externalapi.DomainBlock, daaWindowIndices, ghostdagDataIndices []uint64) *MsgBlockWithTrustedDataV4 {
|
||||||
|
return &MsgBlockWithTrustedDataV4{
|
||||||
|
Block: DomainBlockToMsgBlock(block),
|
||||||
|
DAAWindowIndices: daaWindowIndices,
|
||||||
|
GHOSTDAGDataIndices: ghostdagDataIndices,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DomainTrustedDataToTrustedData converts *externalapi.BlockWithTrustedData to *MsgBlockWithTrustedData
|
||||||
|
func DomainTrustedDataToTrustedData(domainDAAWindow []*externalapi.TrustedDataDataDAAHeader, domainGHOSTDAGData []*externalapi.BlockGHOSTDAGDataHashPair) *MsgTrustedData {
|
||||||
|
daaWindow := make([]*TrustedDataDAAHeader, len(domainDAAWindow))
|
||||||
|
for i, daaBlock := range domainDAAWindow {
|
||||||
|
daaWindow[i] = &TrustedDataDAAHeader{
|
||||||
|
Header: DomainBlockHeaderToBlockHeader(daaBlock.Header),
|
||||||
|
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ghostdagData := make([]*BlockGHOSTDAGDataHashPair, len(domainGHOSTDAGData))
|
||||||
|
for i, datum := range domainGHOSTDAGData {
|
||||||
|
ghostdagData[i] = &BlockGHOSTDAGDataHashPair{
|
||||||
|
Hash: datum.Hash,
|
||||||
|
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(datum.GHOSTDAGData),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &MsgTrustedData{
|
||||||
|
DAAWindow: daaWindow,
|
||||||
|
GHOSTDAGData: ghostdagData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MsgPruningPointProofToDomainPruningPointProof converts *MsgPruningPointProof to *externalapi.PruningPointProof
|
||||||
|
func MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage *MsgPruningPointProof) *externalapi.PruningPointProof {
|
||||||
|
headers := make([][]externalapi.BlockHeader, len(pruningPointProofMessage.Headers))
|
||||||
|
for blockLevel, blockLevelParents := range pruningPointProofMessage.Headers {
|
||||||
|
headers[blockLevel] = make([]externalapi.BlockHeader, len(blockLevelParents))
|
||||||
|
for i, header := range blockLevelParents {
|
||||||
|
headers[blockLevel][i] = BlockHeaderToDomainBlockHeader(header)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &externalapi.PruningPointProof{
|
||||||
|
Headers: headers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DomainPruningPointProofToMsgPruningPointProof converts *externalapi.PruningPointProof to *MsgPruningPointProof
|
||||||
|
func DomainPruningPointProofToMsgPruningPointProof(pruningPointProof *externalapi.PruningPointProof) *MsgPruningPointProof {
|
||||||
|
headers := make([][]*MsgBlockHeader, len(pruningPointProof.Headers))
|
||||||
|
for blockLevel, blockLevelParents := range pruningPointProof.Headers {
|
||||||
|
headers[blockLevel] = make([]*MsgBlockHeader, len(blockLevelParents))
|
||||||
|
for i, header := range blockLevelParents {
|
||||||
|
headers[blockLevel][i] = DomainBlockHeaderToBlockHeader(header)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &MsgPruningPointProof{
|
||||||
|
Headers: headers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -38,6 +38,10 @@ type RPCError struct {
|
|||||||
Message string
|
Message string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (err RPCError) Error() string {
|
||||||
|
return err.Message
|
||||||
|
}
|
||||||
|
|
||||||
// RPCErrorf formats according to a format specifier and returns the string
|
// RPCErrorf formats according to a format specifier and returns the string
|
||||||
// as an RPCError.
|
// as an RPCError.
|
||||||
func RPCErrorf(format string, args ...interface{}) *RPCError {
|
func RPCErrorf(format string, args ...interface{}) *RPCError {
|
||||||
|
@ -45,24 +45,34 @@ const (
|
|||||||
CmdRequestRelayBlocks
|
CmdRequestRelayBlocks
|
||||||
CmdInvTransaction
|
CmdInvTransaction
|
||||||
CmdRequestTransactions
|
CmdRequestTransactions
|
||||||
CmdIBDBlock
|
|
||||||
CmdDoneHeaders
|
CmdDoneHeaders
|
||||||
CmdTransactionNotFound
|
CmdTransactionNotFound
|
||||||
CmdReject
|
CmdReject
|
||||||
CmdHeader
|
|
||||||
CmdRequestNextHeaders
|
CmdRequestNextHeaders
|
||||||
CmdRequestPruningPointUTXOSetAndBlock
|
CmdRequestPruningPointUTXOSet
|
||||||
CmdPruningPointUTXOSetChunk
|
CmdPruningPointUTXOSetChunk
|
||||||
CmdRequestIBDBlocks
|
|
||||||
CmdUnexpectedPruningPoint
|
CmdUnexpectedPruningPoint
|
||||||
CmdRequestPruningPointHash
|
|
||||||
CmdPruningPointHash
|
|
||||||
CmdIBDBlockLocator
|
CmdIBDBlockLocator
|
||||||
CmdIBDBlockLocatorHighestHash
|
CmdIBDBlockLocatorHighestHash
|
||||||
CmdIBDBlockLocatorHighestHashNotFound
|
CmdIBDBlockLocatorHighestHashNotFound
|
||||||
CmdBlockHeaders
|
CmdBlockHeaders
|
||||||
CmdRequestNextPruningPointUTXOSetChunk
|
CmdRequestNextPruningPointUTXOSetChunk
|
||||||
CmdDonePruningPointUTXOSetChunks
|
CmdDonePruningPointUTXOSetChunks
|
||||||
|
CmdBlockWithTrustedData
|
||||||
|
CmdDoneBlocksWithTrustedData
|
||||||
|
CmdRequestPruningPointAndItsAnticone
|
||||||
|
CmdIBDBlock
|
||||||
|
CmdRequestIBDBlocks
|
||||||
|
CmdPruningPoints
|
||||||
|
CmdRequestPruningPointProof
|
||||||
|
CmdPruningPointProof
|
||||||
|
CmdReady
|
||||||
|
CmdTrustedData
|
||||||
|
CmdBlockWithTrustedDataV4
|
||||||
|
CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||||
|
CmdRequestIBDChainBlockLocator
|
||||||
|
CmdIBDChainBlockLocator
|
||||||
|
CmdRequestAnticone
|
||||||
|
|
||||||
// rpc
|
// rpc
|
||||||
CmdGetCurrentNetworkRequestMessage
|
CmdGetCurrentNetworkRequestMessage
|
||||||
@ -121,6 +131,8 @@ const (
|
|||||||
CmdStopNotifyingUTXOsChangedResponseMessage
|
CmdStopNotifyingUTXOsChangedResponseMessage
|
||||||
CmdGetUTXOsByAddressesRequestMessage
|
CmdGetUTXOsByAddressesRequestMessage
|
||||||
CmdGetUTXOsByAddressesResponseMessage
|
CmdGetUTXOsByAddressesResponseMessage
|
||||||
|
CmdGetBalanceByAddressRequestMessage
|
||||||
|
CmdGetBalanceByAddressResponseMessage
|
||||||
CmdGetVirtualSelectedParentBlueScoreRequestMessage
|
CmdGetVirtualSelectedParentBlueScoreRequestMessage
|
||||||
CmdGetVirtualSelectedParentBlueScoreResponseMessage
|
CmdGetVirtualSelectedParentBlueScoreResponseMessage
|
||||||
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
|
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
|
||||||
@ -137,9 +149,24 @@ const (
|
|||||||
CmdPruningPointUTXOSetOverrideNotificationMessage
|
CmdPruningPointUTXOSetOverrideNotificationMessage
|
||||||
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage
|
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage
|
||||||
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage
|
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage
|
||||||
|
CmdEstimateNetworkHashesPerSecondRequestMessage
|
||||||
|
CmdEstimateNetworkHashesPerSecondResponseMessage
|
||||||
CmdNotifyVirtualDaaScoreChangedRequestMessage
|
CmdNotifyVirtualDaaScoreChangedRequestMessage
|
||||||
CmdNotifyVirtualDaaScoreChangedResponseMessage
|
CmdNotifyVirtualDaaScoreChangedResponseMessage
|
||||||
CmdVirtualDaaScoreChangedNotificationMessage
|
CmdVirtualDaaScoreChangedNotificationMessage
|
||||||
|
CmdGetBalancesByAddressesRequestMessage
|
||||||
|
CmdGetBalancesByAddressesResponseMessage
|
||||||
|
CmdNotifyNewBlockTemplateRequestMessage
|
||||||
|
CmdNotifyNewBlockTemplateResponseMessage
|
||||||
|
CmdNewBlockTemplateNotificationMessage
|
||||||
|
CmdGetMempoolEntriesByAddressesRequestMessage
|
||||||
|
CmdGetMempoolEntriesByAddressesResponseMessage
|
||||||
|
CmdGetCoinSupplyRequestMessage
|
||||||
|
CmdGetCoinSupplyResponseMessage
|
||||||
|
CmdGetFeeEstimateRequestMessage
|
||||||
|
CmdGetFeeEstimateResponseMessage
|
||||||
|
CmdSubmitTransactionReplacementRequestMessage
|
||||||
|
CmdSubmitTransactionReplacementResponseMessage
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||||
@ -148,7 +175,7 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
|||||||
CmdVerAck: "VerAck",
|
CmdVerAck: "VerAck",
|
||||||
CmdRequestAddresses: "RequestAddresses",
|
CmdRequestAddresses: "RequestAddresses",
|
||||||
CmdAddresses: "Addresses",
|
CmdAddresses: "Addresses",
|
||||||
CmdRequestHeaders: "RequestHeaders",
|
CmdRequestHeaders: "CmdRequestHeaders",
|
||||||
CmdBlock: "Block",
|
CmdBlock: "Block",
|
||||||
CmdTx: "Tx",
|
CmdTx: "Tx",
|
||||||
CmdPing: "Ping",
|
CmdPing: "Ping",
|
||||||
@ -159,24 +186,34 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
|||||||
CmdRequestRelayBlocks: "RequestRelayBlocks",
|
CmdRequestRelayBlocks: "RequestRelayBlocks",
|
||||||
CmdInvTransaction: "InvTransaction",
|
CmdInvTransaction: "InvTransaction",
|
||||||
CmdRequestTransactions: "RequestTransactions",
|
CmdRequestTransactions: "RequestTransactions",
|
||||||
CmdIBDBlock: "IBDBlock",
|
|
||||||
CmdDoneHeaders: "DoneHeaders",
|
CmdDoneHeaders: "DoneHeaders",
|
||||||
CmdTransactionNotFound: "TransactionNotFound",
|
CmdTransactionNotFound: "TransactionNotFound",
|
||||||
CmdReject: "Reject",
|
CmdReject: "Reject",
|
||||||
CmdHeader: "Header",
|
|
||||||
CmdRequestNextHeaders: "RequestNextHeaders",
|
CmdRequestNextHeaders: "RequestNextHeaders",
|
||||||
CmdRequestPruningPointUTXOSetAndBlock: "RequestPruningPointUTXOSetAndBlock",
|
CmdRequestPruningPointUTXOSet: "RequestPruningPointUTXOSet",
|
||||||
CmdPruningPointUTXOSetChunk: "PruningPointUTXOSetChunk",
|
CmdPruningPointUTXOSetChunk: "PruningPointUTXOSetChunk",
|
||||||
CmdRequestIBDBlocks: "RequestIBDBlocks",
|
|
||||||
CmdUnexpectedPruningPoint: "UnexpectedPruningPoint",
|
CmdUnexpectedPruningPoint: "UnexpectedPruningPoint",
|
||||||
CmdRequestPruningPointHash: "RequestPruningPointHashHash",
|
|
||||||
CmdPruningPointHash: "PruningPointHash",
|
|
||||||
CmdIBDBlockLocator: "IBDBlockLocator",
|
CmdIBDBlockLocator: "IBDBlockLocator",
|
||||||
CmdIBDBlockLocatorHighestHash: "IBDBlockLocatorHighestHash",
|
CmdIBDBlockLocatorHighestHash: "IBDBlockLocatorHighestHash",
|
||||||
CmdIBDBlockLocatorHighestHashNotFound: "IBDBlockLocatorHighestHashNotFound",
|
CmdIBDBlockLocatorHighestHashNotFound: "IBDBlockLocatorHighestHashNotFound",
|
||||||
CmdBlockHeaders: "BlockHeaders",
|
CmdBlockHeaders: "BlockHeaders",
|
||||||
CmdRequestNextPruningPointUTXOSetChunk: "RequestNextPruningPointUTXOSetChunk",
|
CmdRequestNextPruningPointUTXOSetChunk: "RequestNextPruningPointUTXOSetChunk",
|
||||||
CmdDonePruningPointUTXOSetChunks: "DonePruningPointUTXOSetChunks",
|
CmdDonePruningPointUTXOSetChunks: "DonePruningPointUTXOSetChunks",
|
||||||
|
CmdBlockWithTrustedData: "BlockWithTrustedData",
|
||||||
|
CmdDoneBlocksWithTrustedData: "DoneBlocksWithTrustedData",
|
||||||
|
CmdRequestPruningPointAndItsAnticone: "RequestPruningPointAndItsAnticoneHeaders",
|
||||||
|
CmdIBDBlock: "IBDBlock",
|
||||||
|
CmdRequestIBDBlocks: "RequestIBDBlocks",
|
||||||
|
CmdPruningPoints: "PruningPoints",
|
||||||
|
CmdRequestPruningPointProof: "RequestPruningPointProof",
|
||||||
|
CmdPruningPointProof: "PruningPointProof",
|
||||||
|
CmdReady: "Ready",
|
||||||
|
CmdTrustedData: "TrustedData",
|
||||||
|
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
|
||||||
|
CmdRequestNextPruningPointAndItsAnticoneBlocks: "RequestNextPruningPointAndItsAnticoneBlocks",
|
||||||
|
CmdRequestIBDChainBlockLocator: "RequestIBDChainBlockLocator",
|
||||||
|
CmdIBDChainBlockLocator: "IBDChainBlockLocator",
|
||||||
|
CmdRequestAnticone: "RequestAnticone",
|
||||||
}
|
}
|
||||||
|
|
||||||
// RPCMessageCommandToString maps all MessageCommands to their string representation
|
// RPCMessageCommandToString maps all MessageCommands to their string representation
|
||||||
@ -235,6 +272,8 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
|||||||
CmdStopNotifyingUTXOsChangedResponseMessage: "StopNotifyingUTXOsChangedResponse",
|
CmdStopNotifyingUTXOsChangedResponseMessage: "StopNotifyingUTXOsChangedResponse",
|
||||||
CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest",
|
CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest",
|
||||||
CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse",
|
CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse",
|
||||||
|
CmdGetBalanceByAddressRequestMessage: "GetBalanceByAddressRequest",
|
||||||
|
CmdGetBalanceByAddressResponseMessage: "GetBalancesByAddressResponse",
|
||||||
CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest",
|
CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest",
|
||||||
CmdGetVirtualSelectedParentBlueScoreResponseMessage: "GetVirtualSelectedParentBlueScoreResponse",
|
CmdGetVirtualSelectedParentBlueScoreResponseMessage: "GetVirtualSelectedParentBlueScoreResponse",
|
||||||
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest",
|
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest",
|
||||||
@ -251,9 +290,24 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
|||||||
CmdPruningPointUTXOSetOverrideNotificationMessage: "PruningPointUTXOSetOverrideNotification",
|
CmdPruningPointUTXOSetOverrideNotificationMessage: "PruningPointUTXOSetOverrideNotification",
|
||||||
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: "StopNotifyingPruningPointUTXOSetOverrideRequest",
|
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: "StopNotifyingPruningPointUTXOSetOverrideRequest",
|
||||||
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage: "StopNotifyingPruningPointUTXOSetOverrideResponse",
|
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage: "StopNotifyingPruningPointUTXOSetOverrideResponse",
|
||||||
|
CmdEstimateNetworkHashesPerSecondRequestMessage: "EstimateNetworkHashesPerSecondRequest",
|
||||||
|
CmdEstimateNetworkHashesPerSecondResponseMessage: "EstimateNetworkHashesPerSecondResponse",
|
||||||
CmdNotifyVirtualDaaScoreChangedRequestMessage: "NotifyVirtualDaaScoreChangedRequest",
|
CmdNotifyVirtualDaaScoreChangedRequestMessage: "NotifyVirtualDaaScoreChangedRequest",
|
||||||
CmdNotifyVirtualDaaScoreChangedResponseMessage: "NotifyVirtualDaaScoreChangedResponse",
|
CmdNotifyVirtualDaaScoreChangedResponseMessage: "NotifyVirtualDaaScoreChangedResponse",
|
||||||
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
|
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
|
||||||
|
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
|
||||||
|
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
|
||||||
|
CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest",
|
||||||
|
CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse",
|
||||||
|
CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification",
|
||||||
|
CmdGetMempoolEntriesByAddressesRequestMessage: "GetMempoolEntriesByAddressesRequest",
|
||||||
|
CmdGetMempoolEntriesByAddressesResponseMessage: "GetMempoolEntriesByAddressesResponse",
|
||||||
|
CmdGetCoinSupplyRequestMessage: "GetCoinSupplyRequest",
|
||||||
|
CmdGetCoinSupplyResponseMessage: "GetCoinSupplyResponse",
|
||||||
|
CmdGetFeeEstimateRequestMessage: "GetFeeEstimateRequest",
|
||||||
|
CmdGetFeeEstimateResponseMessage: "GetFeeEstimateResponse",
|
||||||
|
CmdSubmitTransactionReplacementRequestMessage: "SubmitTransactionReplacementRequest",
|
||||||
|
CmdSubmitTransactionReplacementResponseMessage: "SubmitTransactionReplacementResponse",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Message is an interface that describes a kaspa message. A type that
|
// Message is an interface that describes a kaspa message. A type that
|
||||||
|
@ -18,16 +18,21 @@ import (
|
|||||||
|
|
||||||
// TestBlock tests the MsgBlock API.
|
// TestBlock tests the MsgBlock API.
|
||||||
func TestBlock(t *testing.T) {
|
func TestBlock(t *testing.T) {
|
||||||
pver := ProtocolVersion
|
pver := uint32(4)
|
||||||
|
|
||||||
// Block 1 header.
|
// Block 1 header.
|
||||||
parentHashes := blockOne.Header.ParentHashes
|
parents := blockOne.Header.Parents
|
||||||
hashMerkleRoot := blockOne.Header.HashMerkleRoot
|
hashMerkleRoot := blockOne.Header.HashMerkleRoot
|
||||||
acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot
|
acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot
|
||||||
utxoCommitment := blockOne.Header.UTXOCommitment
|
utxoCommitment := blockOne.Header.UTXOCommitment
|
||||||
bits := blockOne.Header.Bits
|
bits := blockOne.Header.Bits
|
||||||
nonce := blockOne.Header.Nonce
|
nonce := blockOne.Header.Nonce
|
||||||
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
|
daaScore := blockOne.Header.DAAScore
|
||||||
|
blueScore := blockOne.Header.BlueScore
|
||||||
|
blueWork := blockOne.Header.BlueWork
|
||||||
|
pruningPoint := blockOne.Header.PruningPoint
|
||||||
|
bh := NewBlockHeader(1, parents, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce,
|
||||||
|
daaScore, blueScore, blueWork, pruningPoint)
|
||||||
|
|
||||||
// Ensure the command is expected value.
|
// Ensure the command is expected value.
|
||||||
wantCmd := MessageCommand(5)
|
wantCmd := MessageCommand(5)
|
||||||
@ -127,11 +132,11 @@ func TestConvertToPartial(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//blockOne is the first block in the mainnet block DAG.
|
// blockOne is the first block in the mainnet block DAG.
|
||||||
var blockOne = MsgBlock{
|
var blockOne = MsgBlock{
|
||||||
Header: MsgBlockHeader{
|
Header: MsgBlockHeader{
|
||||||
Version: 0,
|
Version: 0,
|
||||||
ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
|
Parents: []externalapi.BlockLevelParents{[]*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}},
|
||||||
HashMerkleRoot: mainnetGenesisMerkleRoot,
|
HashMerkleRoot: mainnetGenesisMerkleRoot,
|
||||||
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
|
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
|
||||||
UTXOCommitment: exampleUTXOCommitment,
|
UTXOCommitment: exampleUTXOCommitment,
|
||||||
|
@ -5,13 +5,12 @@
|
|||||||
package appmessage
|
package appmessage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math/big"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/util/mstime"
|
"github.com/kaspanet/kaspad/util/mstime"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
|
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
|
||||||
@ -39,8 +38,8 @@ type MsgBlockHeader struct {
|
|||||||
// Version of the block. This is not the same as the protocol version.
|
// Version of the block. This is not the same as the protocol version.
|
||||||
Version uint16
|
Version uint16
|
||||||
|
|
||||||
// Hashes of the parent block headers in the blockDAG.
|
// Parents are the parent block hashes of the block in the DAG per superblock level.
|
||||||
ParentHashes []*externalapi.DomainHash
|
Parents []externalapi.BlockLevelParents
|
||||||
|
|
||||||
// HashMerkleRoot is the merkle tree reference to hash of all transactions for the block.
|
// HashMerkleRoot is the merkle tree reference to hash of all transactions for the block.
|
||||||
HashMerkleRoot *externalapi.DomainHash
|
HashMerkleRoot *externalapi.DomainHash
|
||||||
@ -60,15 +59,16 @@ type MsgBlockHeader struct {
|
|||||||
|
|
||||||
// Nonce used to generate the block.
|
// Nonce used to generate the block.
|
||||||
Nonce uint64
|
Nonce uint64
|
||||||
}
|
|
||||||
|
|
||||||
// NumParentBlocks return the number of entries in ParentHashes
|
// DAASCore is the DAA score of the block.
|
||||||
func (h *MsgBlockHeader) NumParentBlocks() byte {
|
DAAScore uint64
|
||||||
numParents := len(h.ParentHashes)
|
|
||||||
if numParents > math.MaxUint8 {
|
BlueScore uint64
|
||||||
panic(errors.Errorf("number of parents is %d, which is more than one byte can fit", numParents))
|
|
||||||
}
|
// BlueWork is the blue work of the block.
|
||||||
return byte(numParents)
|
BlueWork *big.Int
|
||||||
|
|
||||||
|
PruningPoint *externalapi.DomainHash
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockHash computes the block identifier hash for the given block header.
|
// BlockHash computes the block identifier hash for the given block header.
|
||||||
@ -76,33 +76,27 @@ func (h *MsgBlockHeader) BlockHash() *externalapi.DomainHash {
|
|||||||
return consensushashing.HeaderHash(BlockHeaderToDomainBlockHeader(h))
|
return consensushashing.HeaderHash(BlockHeaderToDomainBlockHeader(h))
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsGenesis returns true iff this block is a genesis block
|
|
||||||
func (h *MsgBlockHeader) IsGenesis() bool {
|
|
||||||
return h.NumParentBlocks() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Command returns the protocol command string for the message. This is part
|
|
||||||
// of the Message interface implementation.
|
|
||||||
func (h *MsgBlockHeader) Command() MessageCommand {
|
|
||||||
return CmdHeader
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBlockHeader returns a new MsgBlockHeader using the provided version, previous
|
// NewBlockHeader returns a new MsgBlockHeader using the provided version, previous
|
||||||
// block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the
|
// block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the
|
||||||
// block with defaults or calclulated values for the remaining fields.
|
// block with defaults or calclulated values for the remaining fields.
|
||||||
func NewBlockHeader(version uint16, parentHashes []*externalapi.DomainHash, hashMerkleRoot *externalapi.DomainHash,
|
func NewBlockHeader(version uint16, parents []externalapi.BlockLevelParents, hashMerkleRoot *externalapi.DomainHash,
|
||||||
acceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce uint64) *MsgBlockHeader {
|
acceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce,
|
||||||
|
daaScore, blueScore uint64, blueWork *big.Int, pruningPoint *externalapi.DomainHash) *MsgBlockHeader {
|
||||||
|
|
||||||
// Limit the timestamp to one millisecond precision since the protocol
|
// Limit the timestamp to one millisecond precision since the protocol
|
||||||
// doesn't support better.
|
// doesn't support better.
|
||||||
return &MsgBlockHeader{
|
return &MsgBlockHeader{
|
||||||
Version: version,
|
Version: version,
|
||||||
ParentHashes: parentHashes,
|
Parents: parents,
|
||||||
HashMerkleRoot: hashMerkleRoot,
|
HashMerkleRoot: hashMerkleRoot,
|
||||||
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
|
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
|
||||||
UTXOCommitment: utxoCommitment,
|
UTXOCommitment: utxoCommitment,
|
||||||
Timestamp: mstime.Now(),
|
Timestamp: mstime.Now(),
|
||||||
Bits: bits,
|
Bits: bits,
|
||||||
Nonce: nonce,
|
Nonce: nonce,
|
||||||
|
DAAScore: daaScore,
|
||||||
|
BlueScore: blueScore,
|
||||||
|
BlueWork: blueWork,
|
||||||
|
PruningPoint: pruningPoint,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,29 +5,34 @@
|
|||||||
package appmessage
|
package appmessage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/util/mstime"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestBlockHeader tests the MsgBlockHeader API.
|
// TestBlockHeader tests the MsgBlockHeader API.
|
||||||
func TestBlockHeader(t *testing.T) {
|
func TestBlockHeader(t *testing.T) {
|
||||||
nonce := uint64(0xba4d87a69924a93d)
|
nonce := uint64(0xba4d87a69924a93d)
|
||||||
|
|
||||||
hashes := []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}
|
parents := []externalapi.BlockLevelParents{[]*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}}
|
||||||
|
|
||||||
merkleHash := mainnetGenesisMerkleRoot
|
merkleHash := mainnetGenesisMerkleRoot
|
||||||
acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot
|
acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot
|
||||||
bits := uint32(0x1d00ffff)
|
bits := uint32(0x1d00ffff)
|
||||||
bh := NewBlockHeader(1, hashes, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce)
|
daaScore := uint64(123)
|
||||||
|
blueScore := uint64(456)
|
||||||
|
blueWork := big.NewInt(789)
|
||||||
|
pruningPoint := simnetGenesisHash
|
||||||
|
bh := NewBlockHeader(1, parents, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce,
|
||||||
|
daaScore, blueScore, blueWork, pruningPoint)
|
||||||
|
|
||||||
// Ensure we get the same data back out.
|
// Ensure we get the same data back out.
|
||||||
if !reflect.DeepEqual(bh.ParentHashes, hashes) {
|
if !reflect.DeepEqual(bh.Parents, parents) {
|
||||||
t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v",
|
t.Errorf("NewBlockHeader: wrong parents - got %v, want %v",
|
||||||
spew.Sprint(bh.ParentHashes), spew.Sprint(hashes))
|
spew.Sprint(bh.Parents), spew.Sprint(parents))
|
||||||
}
|
}
|
||||||
if bh.HashMerkleRoot != merkleHash {
|
if bh.HashMerkleRoot != merkleHash {
|
||||||
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
|
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
|
||||||
@ -41,44 +46,20 @@ func TestBlockHeader(t *testing.T) {
|
|||||||
t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v",
|
t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v",
|
||||||
bh.Nonce, nonce)
|
bh.Nonce, nonce)
|
||||||
}
|
}
|
||||||
}
|
if bh.DAAScore != daaScore {
|
||||||
|
t.Errorf("NewBlockHeader: wrong daaScore - got %v, want %v",
|
||||||
func TestIsGenesis(t *testing.T) {
|
bh.DAAScore, daaScore)
|
||||||
nonce := uint64(123123) // 0x1e0f3
|
|
||||||
bits := uint32(0x1d00ffff)
|
|
||||||
timestamp := mstime.UnixMilliseconds(0x495fab29000)
|
|
||||||
|
|
||||||
baseBlockHdr := &MsgBlockHeader{
|
|
||||||
Version: 1,
|
|
||||||
ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
|
|
||||||
HashMerkleRoot: mainnetGenesisMerkleRoot,
|
|
||||||
Timestamp: timestamp,
|
|
||||||
Bits: bits,
|
|
||||||
Nonce: nonce,
|
|
||||||
}
|
}
|
||||||
genesisBlockHdr := &MsgBlockHeader{
|
if bh.BlueScore != blueScore {
|
||||||
Version: 1,
|
t.Errorf("NewBlockHeader: wrong blueScore - got %v, want %v",
|
||||||
ParentHashes: []*externalapi.DomainHash{},
|
bh.BlueScore, blueScore)
|
||||||
HashMerkleRoot: mainnetGenesisMerkleRoot,
|
|
||||||
Timestamp: timestamp,
|
|
||||||
Bits: bits,
|
|
||||||
Nonce: nonce,
|
|
||||||
}
|
}
|
||||||
|
if bh.BlueWork != blueWork {
|
||||||
tests := []struct {
|
t.Errorf("NewBlockHeader: wrong blueWork - got %v, want %v",
|
||||||
in *MsgBlockHeader // Block header to encode
|
bh.BlueWork, blueWork)
|
||||||
isGenesis bool // Expected result for call of .IsGenesis
|
|
||||||
}{
|
|
||||||
{genesisBlockHdr, true},
|
|
||||||
{baseBlockHdr, false},
|
|
||||||
}
|
}
|
||||||
|
if !bh.PruningPoint.Equal(pruningPoint) {
|
||||||
t.Logf("Running %d tests", len(tests))
|
t.Errorf("NewBlockHeader: wrong pruningPoint - got %v, want %v",
|
||||||
for i, test := range tests {
|
bh.PruningPoint, pruningPoint)
|
||||||
isGenesis := test.in.IsGenesis()
|
|
||||||
if isGenesis != test.isGenesis {
|
|
||||||
t.Errorf("MsgBlockHeader.IsGenesis: #%d got: %t, want: %t",
|
|
||||||
i, isGenesis, test.isGenesis)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
54
app/appmessage/p2p_msgblockwithtrusteddata.go
Normal file
54
app/appmessage/p2p_msgblockwithtrusteddata.go
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MsgBlockWithTrustedData represents a kaspa BlockWithTrustedData message
|
||||||
|
type MsgBlockWithTrustedData struct {
|
||||||
|
baseMessage
|
||||||
|
|
||||||
|
Block *MsgBlock
|
||||||
|
DAAScore uint64
|
||||||
|
DAAWindow []*TrustedDataDataDAABlock
|
||||||
|
GHOSTDAGData []*BlockGHOSTDAGDataHashPair
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *MsgBlockWithTrustedData) Command() MessageCommand {
|
||||||
|
return CmdBlockWithTrustedData
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgBlockWithTrustedData returns a new MsgBlockWithTrustedData.
|
||||||
|
func NewMsgBlockWithTrustedData() *MsgBlockWithTrustedData {
|
||||||
|
return &MsgBlockWithTrustedData{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrustedDataDataDAABlock is an appmessage representation of externalapi.TrustedDataDataDAABlock
|
||||||
|
type TrustedDataDataDAABlock struct {
|
||||||
|
Block *MsgBlock
|
||||||
|
GHOSTDAGData *BlockGHOSTDAGData
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockGHOSTDAGData is an appmessage representation of externalapi.BlockGHOSTDAGData
|
||||||
|
type BlockGHOSTDAGData struct {
|
||||||
|
BlueScore uint64
|
||||||
|
BlueWork *big.Int
|
||||||
|
SelectedParent *externalapi.DomainHash
|
||||||
|
MergeSetBlues []*externalapi.DomainHash
|
||||||
|
MergeSetReds []*externalapi.DomainHash
|
||||||
|
BluesAnticoneSizes []*BluesAnticoneSizes
|
||||||
|
}
|
||||||
|
|
||||||
|
// BluesAnticoneSizes is an appmessage representation of the BluesAnticoneSizes part of GHOSTDAG data.
|
||||||
|
type BluesAnticoneSizes struct {
|
||||||
|
BlueHash *externalapi.DomainHash
|
||||||
|
AnticoneSize externalapi.KType
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockGHOSTDAGDataHashPair is an appmessage representation of externalapi.BlockGHOSTDAGDataHashPair
|
||||||
|
type BlockGHOSTDAGDataHashPair struct {
|
||||||
|
Hash *externalapi.DomainHash
|
||||||
|
GHOSTDAGData *BlockGHOSTDAGData
|
||||||
|
}
|
20
app/appmessage/p2p_msgblockwithtrusteddatav4.go
Normal file
20
app/appmessage/p2p_msgblockwithtrusteddatav4.go
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MsgBlockWithTrustedDataV4 represents a kaspa BlockWithTrustedDataV4 message
|
||||||
|
type MsgBlockWithTrustedDataV4 struct {
|
||||||
|
baseMessage
|
||||||
|
|
||||||
|
Block *MsgBlock
|
||||||
|
DAAWindowIndices []uint64
|
||||||
|
GHOSTDAGDataIndices []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *MsgBlockWithTrustedDataV4) Command() MessageCommand {
|
||||||
|
return CmdBlockWithTrustedDataV4
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgBlockWithTrustedDataV4 returns a new MsgBlockWithTrustedDataV4.
|
||||||
|
func NewMsgBlockWithTrustedDataV4() *MsgBlockWithTrustedDataV4 {
|
||||||
|
return &MsgBlockWithTrustedDataV4{}
|
||||||
|
}
|
21
app/appmessage/p2p_msgdoneblockswithmetadata.go
Normal file
21
app/appmessage/p2p_msgdoneblockswithmetadata.go
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MsgDoneBlocksWithTrustedData implements the Message interface and represents a kaspa
|
||||||
|
// DoneBlocksWithTrustedData message
|
||||||
|
//
|
||||||
|
// This message has no payload.
|
||||||
|
type MsgDoneBlocksWithTrustedData struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message. This is part
|
||||||
|
// of the Message interface implementation.
|
||||||
|
func (msg *MsgDoneBlocksWithTrustedData) Command() MessageCommand {
|
||||||
|
return CmdDoneBlocksWithTrustedData
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgDoneBlocksWithTrustedData returns a new kaspa DoneBlocksWithTrustedData message that conforms to the
|
||||||
|
// Message interface.
|
||||||
|
func NewMsgDoneBlocksWithTrustedData() *MsgDoneBlocksWithTrustedData {
|
||||||
|
return &MsgDoneBlocksWithTrustedData{}
|
||||||
|
}
|
@ -0,0 +1,16 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MsgRequestPruningPointAndItsAnticone represents a kaspa RequestPruningPointAndItsAnticone message
|
||||||
|
type MsgRequestPruningPointAndItsAnticone struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *MsgRequestPruningPointAndItsAnticone) Command() MessageCommand {
|
||||||
|
return CmdRequestPruningPointAndItsAnticone
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgRequestPruningPointAndItsAnticone returns a new MsgRequestPruningPointAndItsAnticone.
|
||||||
|
func NewMsgRequestPruningPointAndItsAnticone() *MsgRequestPruningPointAndItsAnticone {
|
||||||
|
return &MsgRequestPruningPointAndItsAnticone{}
|
||||||
|
}
|
@ -1,65 +0,0 @@
|
|||||||
// Copyright (c) 2013-2016 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package appmessage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIBDBlock tests the MsgIBDBlock API.
|
|
||||||
func TestIBDBlock(t *testing.T) {
|
|
||||||
pver := ProtocolVersion
|
|
||||||
|
|
||||||
// Block 1 header.
|
|
||||||
parentHashes := blockOne.Header.ParentHashes
|
|
||||||
hashMerkleRoot := blockOne.Header.HashMerkleRoot
|
|
||||||
acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot
|
|
||||||
utxoCommitment := blockOne.Header.UTXOCommitment
|
|
||||||
bits := blockOne.Header.Bits
|
|
||||||
nonce := blockOne.Header.Nonce
|
|
||||||
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
|
|
||||||
|
|
||||||
// Ensure the command is expected value.
|
|
||||||
wantCmd := MessageCommand(15)
|
|
||||||
msg := NewMsgIBDBlock(NewMsgBlock(bh))
|
|
||||||
if cmd := msg.Command(); cmd != wantCmd {
|
|
||||||
t.Errorf("NewMsgIBDBlock: wrong command - got %v want %v",
|
|
||||||
cmd, wantCmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure max payload is expected value for latest protocol version.
|
|
||||||
wantPayload := uint32(1024 * 1024 * 32)
|
|
||||||
maxPayload := msg.MaxPayloadLength(pver)
|
|
||||||
if maxPayload != wantPayload {
|
|
||||||
t.Errorf("MaxPayloadLength: wrong max payload length for "+
|
|
||||||
"protocol version %d - got %v, want %v", pver,
|
|
||||||
maxPayload, wantPayload)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure we get the same block header data back out.
|
|
||||||
if !reflect.DeepEqual(&msg.Header, bh) {
|
|
||||||
t.Errorf("NewMsgIBDBlock: wrong block header - got %v, want %v",
|
|
||||||
spew.Sdump(&msg.Header), spew.Sdump(bh))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure transactions are added properly.
|
|
||||||
tx := blockOne.Transactions[0].Copy()
|
|
||||||
msg.AddTransaction(tx)
|
|
||||||
if !reflect.DeepEqual(msg.Transactions, blockOne.Transactions) {
|
|
||||||
t.Errorf("AddTransaction: wrong transactions - got %v, want %v",
|
|
||||||
spew.Sdump(msg.Transactions),
|
|
||||||
spew.Sdump(blockOne.Transactions))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure transactions are properly cleared.
|
|
||||||
msg.ClearTransactions()
|
|
||||||
if len(msg.Transactions) != 0 {
|
|
||||||
t.Errorf("ClearTransactions: wrong transactions - got %v, want %v",
|
|
||||||
len(msg.Transactions), 0)
|
|
||||||
}
|
|
||||||
}
|
|
27
app/appmessage/p2p_msgibdchainblocklocator.go
Normal file
27
app/appmessage/p2p_msgibdchainblocklocator.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MsgIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||||
|
// locator message. It is used to find the blockLocator of a peer that is
|
||||||
|
// syncing with you.
|
||||||
|
type MsgIBDChainBlockLocator struct {
|
||||||
|
baseMessage
|
||||||
|
BlockLocatorHashes []*externalapi.DomainHash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message. This is part
|
||||||
|
// of the Message interface implementation.
|
||||||
|
func (msg *MsgIBDChainBlockLocator) Command() MessageCommand {
|
||||||
|
return CmdIBDChainBlockLocator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgIBDChainBlockLocator returns a new kaspa locator message that conforms to
|
||||||
|
// the Message interface. See MsgBlockLocator for details.
|
||||||
|
func NewMsgIBDChainBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgIBDChainBlockLocator {
|
||||||
|
return &MsgIBDChainBlockLocator{
|
||||||
|
BlockLocatorHashes: locatorHashes,
|
||||||
|
}
|
||||||
|
}
|
@ -1,23 +0,0 @@
|
|||||||
package appmessage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MsgPruningPointHashMessage represents a kaspa PruningPointHash message
|
|
||||||
type MsgPruningPointHashMessage struct {
|
|
||||||
baseMessage
|
|
||||||
Hash *externalapi.DomainHash
|
|
||||||
}
|
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
|
||||||
func (msg *MsgPruningPointHashMessage) Command() MessageCommand {
|
|
||||||
return CmdPruningPointHash
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPruningPointHashMessage returns a new kaspa PruningPointHash message
|
|
||||||
func NewPruningPointHashMessage(hash *externalapi.DomainHash) *MsgPruningPointHashMessage {
|
|
||||||
return &MsgPruningPointHashMessage{
|
|
||||||
Hash: hash,
|
|
||||||
}
|
|
||||||
}
|
|
20
app/appmessage/p2p_msgpruningpointproof.go
Normal file
20
app/appmessage/p2p_msgpruningpointproof.go
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MsgPruningPointProof represents a kaspa PruningPointProof message
|
||||||
|
type MsgPruningPointProof struct {
|
||||||
|
baseMessage
|
||||||
|
|
||||||
|
Headers [][]*MsgBlockHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *MsgPruningPointProof) Command() MessageCommand {
|
||||||
|
return CmdPruningPointProof
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgPruningPointProof returns a new MsgPruningPointProof.
|
||||||
|
func NewMsgPruningPointProof(headers [][]*MsgBlockHeader) *MsgPruningPointProof {
|
||||||
|
return &MsgPruningPointProof{
|
||||||
|
Headers: headers,
|
||||||
|
}
|
||||||
|
}
|
20
app/appmessage/p2p_msgpruningpoints.go
Normal file
20
app/appmessage/p2p_msgpruningpoints.go
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MsgPruningPoints represents a kaspa PruningPoints message
|
||||||
|
type MsgPruningPoints struct {
|
||||||
|
baseMessage
|
||||||
|
|
||||||
|
Headers []*MsgBlockHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *MsgPruningPoints) Command() MessageCommand {
|
||||||
|
return CmdPruningPoints
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgPruningPoints returns a new MsgPruningPoints.
|
||||||
|
func NewMsgPruningPoints(headers []*MsgBlockHeader) *MsgPruningPoints {
|
||||||
|
return &MsgPruningPoints{
|
||||||
|
Headers: headers,
|
||||||
|
}
|
||||||
|
}
|
33
app/appmessage/p2p_msgrequestanticone.go
Normal file
33
app/appmessage/p2p_msgrequestanticone.go
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
// Copyright (c) 2013-2016 The btcsuite developers
|
||||||
|
// Use of this source code is governed by an ISC
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package appmessage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MsgRequestAnticone implements the Message interface and represents a kaspa
|
||||||
|
// RequestHeaders message. It is used to request the set past(ContextHash) \cap anticone(BlockHash)
|
||||||
|
type MsgRequestAnticone struct {
|
||||||
|
baseMessage
|
||||||
|
BlockHash *externalapi.DomainHash
|
||||||
|
ContextHash *externalapi.DomainHash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message. This is part
|
||||||
|
// of the Message interface implementation.
|
||||||
|
func (msg *MsgRequestAnticone) Command() MessageCommand {
|
||||||
|
return CmdRequestAnticone
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgRequestAnticone returns a new kaspa RequestPastDiff message that conforms to the
|
||||||
|
// Message interface using the passed parameters and defaults for the remaining
|
||||||
|
// fields.
|
||||||
|
func NewMsgRequestAnticone(blockHash, contextHash *externalapi.DomainHash) *MsgRequestAnticone {
|
||||||
|
return &MsgRequestAnticone{
|
||||||
|
BlockHash: blockHash,
|
||||||
|
ContextHash: contextHash,
|
||||||
|
}
|
||||||
|
}
|
@ -10,7 +10,6 @@ import (
|
|||||||
// The locator is returned via a locator message (MsgBlockLocator).
|
// The locator is returned via a locator message (MsgBlockLocator).
|
||||||
type MsgRequestBlockLocator struct {
|
type MsgRequestBlockLocator struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
LowHash *externalapi.DomainHash
|
|
||||||
HighHash *externalapi.DomainHash
|
HighHash *externalapi.DomainHash
|
||||||
Limit uint32
|
Limit uint32
|
||||||
}
|
}
|
||||||
@ -24,9 +23,8 @@ func (msg *MsgRequestBlockLocator) Command() MessageCommand {
|
|||||||
// NewMsgRequestBlockLocator returns a new RequestBlockLocator message that conforms to the
|
// NewMsgRequestBlockLocator returns a new RequestBlockLocator message that conforms to the
|
||||||
// Message interface using the passed parameters and defaults for the remaining
|
// Message interface using the passed parameters and defaults for the remaining
|
||||||
// fields.
|
// fields.
|
||||||
func NewMsgRequestBlockLocator(lowHash, highHash *externalapi.DomainHash, limit uint32) *MsgRequestBlockLocator {
|
func NewMsgRequestBlockLocator(highHash *externalapi.DomainHash, limit uint32) *MsgRequestBlockLocator {
|
||||||
return &MsgRequestBlockLocator{
|
return &MsgRequestBlockLocator{
|
||||||
LowHash: lowHash,
|
|
||||||
HighHash: highHash,
|
HighHash: highHash,
|
||||||
Limit: limit,
|
Limit: limit,
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@ func TestRequestBlockLocator(t *testing.T) {
|
|||||||
|
|
||||||
// Ensure the command is expected value.
|
// Ensure the command is expected value.
|
||||||
wantCmd := MessageCommand(9)
|
wantCmd := MessageCommand(9)
|
||||||
msg := NewMsgRequestBlockLocator(highHash, &externalapi.DomainHash{}, 0)
|
msg := NewMsgRequestBlockLocator(highHash, 0)
|
||||||
if cmd := msg.Command(); cmd != wantCmd {
|
if cmd := msg.Command(); cmd != wantCmd {
|
||||||
t.Errorf("NewMsgRequestBlockLocator: wrong command - got %v want %v",
|
t.Errorf("NewMsgRequestBlockLocator: wrong command - got %v want %v",
|
||||||
cmd, wantCmd)
|
cmd, wantCmd)
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestRequstIBDBlocks tests the MsgRequestHeaders API.
|
// TestRequstIBDBlocks tests the MsgRequestIBDBlocks API.
|
||||||
func TestRequstIBDBlocks(t *testing.T) {
|
func TestRequstIBDBlocks(t *testing.T) {
|
||||||
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
|
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
|
||||||
lowHash, err := externalapi.NewDomainHashFromString(hashStr)
|
lowHash, err := externalapi.NewDomainHashFromString(hashStr)
|
||||||
@ -27,14 +27,14 @@ func TestRequstIBDBlocks(t *testing.T) {
|
|||||||
// Ensure we get the same data back out.
|
// Ensure we get the same data back out.
|
||||||
msg := NewMsgRequstHeaders(lowHash, highHash)
|
msg := NewMsgRequstHeaders(lowHash, highHash)
|
||||||
if !msg.HighHash.Equal(highHash) {
|
if !msg.HighHash.Equal(highHash) {
|
||||||
t.Errorf("NewMsgRequstHeaders: wrong high hash - got %v, want %v",
|
t.Errorf("NewMsgRequstIBDBlocks: wrong high hash - got %v, want %v",
|
||||||
msg.HighHash, highHash)
|
msg.HighHash, highHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the command is expected value.
|
// Ensure the command is expected value.
|
||||||
wantCmd := MessageCommand(4)
|
wantCmd := MessageCommand(4)
|
||||||
if cmd := msg.Command(); cmd != wantCmd {
|
if cmd := msg.Command(); cmd != wantCmd {
|
||||||
t.Errorf("NewMsgRequstHeaders: wrong command - got %v want %v",
|
t.Errorf("NewMsgRequstIBDBlocks: wrong command - got %v want %v",
|
||||||
cmd, wantCmd)
|
cmd, wantCmd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
31
app/appmessage/p2p_msgrequestibdchainblocklocator.go
Normal file
31
app/appmessage/p2p_msgrequestibdchainblocklocator.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MsgRequestIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||||
|
// IBDRequestChainBlockLocator message. It is used to request a block locator between low
|
||||||
|
// and high hash.
|
||||||
|
// The locator is returned via a locator message (MsgIBDChainBlockLocator).
|
||||||
|
type MsgRequestIBDChainBlockLocator struct {
|
||||||
|
baseMessage
|
||||||
|
HighHash *externalapi.DomainHash
|
||||||
|
LowHash *externalapi.DomainHash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message. This is part
|
||||||
|
// of the Message interface implementation.
|
||||||
|
func (msg *MsgRequestIBDChainBlockLocator) Command() MessageCommand {
|
||||||
|
return CmdRequestIBDChainBlockLocator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgIBDRequestChainBlockLocator returns a new IBDRequestChainBlockLocator message that conforms to the
|
||||||
|
// Message interface using the passed parameters and defaults for the remaining
|
||||||
|
// fields.
|
||||||
|
func NewMsgIBDRequestChainBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestIBDChainBlockLocator {
|
||||||
|
return &MsgRequestIBDChainBlockLocator{
|
||||||
|
HighHash: highHash,
|
||||||
|
LowHash: lowHash,
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,22 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MsgRequestNextPruningPointAndItsAnticoneBlocks implements the Message interface and represents a kaspa
|
||||||
|
// RequestNextPruningPointAndItsAnticoneBlocks message. It is used to notify the IBD syncer peer to send
|
||||||
|
// more blocks from the pruning anticone.
|
||||||
|
//
|
||||||
|
// This message has no payload.
|
||||||
|
type MsgRequestNextPruningPointAndItsAnticoneBlocks struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message. This is part
|
||||||
|
// of the Message interface implementation.
|
||||||
|
func (msg *MsgRequestNextPruningPointAndItsAnticoneBlocks) Command() MessageCommand {
|
||||||
|
return CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgRequestNextPruningPointAndItsAnticoneBlocks returns a new kaspa RequestNextPruningPointAndItsAnticoneBlocks message that conforms to the
|
||||||
|
// Message interface.
|
||||||
|
func NewMsgRequestNextPruningPointAndItsAnticoneBlocks() *MsgRequestNextPruningPointAndItsAnticoneBlocks {
|
||||||
|
return &MsgRequestNextPruningPointAndItsAnticoneBlocks{}
|
||||||
|
}
|
16
app/appmessage/p2p_msgrequestpruningpointproof.go
Normal file
16
app/appmessage/p2p_msgrequestpruningpointproof.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MsgRequestPruningPointProof represents a kaspa RequestPruningPointProof message
|
||||||
|
type MsgRequestPruningPointProof struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *MsgRequestPruningPointProof) Command() MessageCommand {
|
||||||
|
return CmdRequestPruningPointProof
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgRequestPruningPointProof returns a new MsgRequestPruningPointProof.
|
||||||
|
func NewMsgRequestPruningPointProof() *MsgRequestPruningPointProof {
|
||||||
|
return &MsgRequestPruningPointProof{}
|
||||||
|
}
|
@ -4,20 +4,20 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MsgRequestPruningPointUTXOSetAndBlock represents a kaspa RequestPruningPointUTXOSetAndBlock message
|
// MsgRequestPruningPointUTXOSet represents a kaspa RequestPruningPointUTXOSet message
|
||||||
type MsgRequestPruningPointUTXOSetAndBlock struct {
|
type MsgRequestPruningPointUTXOSet struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
PruningPointHash *externalapi.DomainHash
|
PruningPointHash *externalapi.DomainHash
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
func (msg *MsgRequestPruningPointUTXOSetAndBlock) Command() MessageCommand {
|
func (msg *MsgRequestPruningPointUTXOSet) Command() MessageCommand {
|
||||||
return CmdRequestPruningPointUTXOSetAndBlock
|
return CmdRequestPruningPointUTXOSet
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMsgRequestPruningPointUTXOSetAndBlock returns a new MsgRequestPruningPointUTXOSetAndBlock
|
// NewMsgRequestPruningPointUTXOSet returns a new MsgRequestPruningPointUTXOSet
|
||||||
func NewMsgRequestPruningPointUTXOSetAndBlock(pruningPointHash *externalapi.DomainHash) *MsgRequestPruningPointUTXOSetAndBlock {
|
func NewMsgRequestPruningPointUTXOSet(pruningPointHash *externalapi.DomainHash) *MsgRequestPruningPointUTXOSet {
|
||||||
return &MsgRequestPruningPointUTXOSetAndBlock{
|
return &MsgRequestPruningPointUTXOSet{
|
||||||
PruningPointHash: pruningPointHash,
|
PruningPointHash: pruningPointHash,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
25
app/appmessage/p2p_msgtrusteddata.go
Normal file
25
app/appmessage/p2p_msgtrusteddata.go
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MsgTrustedData represents a kaspa TrustedData message
|
||||||
|
type MsgTrustedData struct {
|
||||||
|
baseMessage
|
||||||
|
|
||||||
|
DAAWindow []*TrustedDataDAAHeader
|
||||||
|
GHOSTDAGData []*BlockGHOSTDAGDataHashPair
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *MsgTrustedData) Command() MessageCommand {
|
||||||
|
return CmdTrustedData
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgTrustedData returns a new MsgTrustedData.
|
||||||
|
func NewMsgTrustedData() *MsgTrustedData {
|
||||||
|
return &MsgTrustedData{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrustedDataDAAHeader is an appmessage representation of externalapi.TrustedDataDataDAAHeader
|
||||||
|
type TrustedDataDAAHeader struct {
|
||||||
|
Header *MsgBlockHeader
|
||||||
|
GHOSTDAGData *BlockGHOSTDAGData
|
||||||
|
}
|
@ -90,16 +90,18 @@ type TxIn struct {
|
|||||||
PreviousOutpoint Outpoint
|
PreviousOutpoint Outpoint
|
||||||
SignatureScript []byte
|
SignatureScript []byte
|
||||||
Sequence uint64
|
Sequence uint64
|
||||||
|
SigOpCount byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTxIn returns a new kaspa transaction input with the provided
|
// NewTxIn returns a new kaspa transaction input with the provided
|
||||||
// previous outpoint point and signature script with a default sequence of
|
// previous outpoint point and signature script with a default sequence of
|
||||||
// MaxTxInSequenceNum.
|
// MaxTxInSequenceNum.
|
||||||
func NewTxIn(prevOut *Outpoint, signatureScript []byte, sequence uint64) *TxIn {
|
func NewTxIn(prevOut *Outpoint, signatureScript []byte, sequence uint64, sigOpCount byte) *TxIn {
|
||||||
return &TxIn{
|
return &TxIn{
|
||||||
PreviousOutpoint: *prevOut,
|
PreviousOutpoint: *prevOut,
|
||||||
SignatureScript: signatureScript,
|
SignatureScript: signatureScript,
|
||||||
Sequence: sequence,
|
Sequence: sequence,
|
||||||
|
SigOpCount: sigOpCount,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -206,6 +208,7 @@ func (msg *MsgTx) Copy() *MsgTx {
|
|||||||
PreviousOutpoint: newOutpoint,
|
PreviousOutpoint: newOutpoint,
|
||||||
SignatureScript: newScript,
|
SignatureScript: newScript,
|
||||||
Sequence: oldTxIn.Sequence,
|
Sequence: oldTxIn.Sequence,
|
||||||
|
SigOpCount: oldTxIn.SigOpCount,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, append this fully copied txin.
|
// Finally, append this fully copied txin.
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
// TestTx tests the MsgTx API.
|
// TestTx tests the MsgTx API.
|
||||||
func TestTx(t *testing.T) {
|
func TestTx(t *testing.T) {
|
||||||
pver := ProtocolVersion
|
pver := uint32(4)
|
||||||
|
|
||||||
txIDStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
|
txIDStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
|
||||||
txID, err := transactionid.FromString(txIDStr)
|
txID, err := transactionid.FromString(txIDStr)
|
||||||
@ -68,7 +68,7 @@ func TestTx(t *testing.T) {
|
|||||||
|
|
||||||
// Ensure we get the same transaction input back out.
|
// Ensure we get the same transaction input back out.
|
||||||
sigScript := []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}
|
sigScript := []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}
|
||||||
txIn := NewTxIn(prevOut, sigScript, constants.MaxTxInSequenceNum)
|
txIn := NewTxIn(prevOut, sigScript, constants.MaxTxInSequenceNum, 1)
|
||||||
if !reflect.DeepEqual(&txIn.PreviousOutpoint, prevOut) {
|
if !reflect.DeepEqual(&txIn.PreviousOutpoint, prevOut) {
|
||||||
t.Errorf("NewTxIn: wrong prev outpoint - got %v, want %v",
|
t.Errorf("NewTxIn: wrong prev outpoint - got %v, want %v",
|
||||||
spew.Sprint(&txIn.PreviousOutpoint),
|
spew.Sprint(&txIn.PreviousOutpoint),
|
||||||
@ -133,8 +133,8 @@ func TestTx(t *testing.T) {
|
|||||||
|
|
||||||
// TestTxHash tests the ability to generate the hash of a transaction accurately.
|
// TestTxHash tests the ability to generate the hash of a transaction accurately.
|
||||||
func TestTxHashAndID(t *testing.T) {
|
func TestTxHashAndID(t *testing.T) {
|
||||||
txHash1Str := "93663e597f6c968d32d229002f76408edf30d6a0151ff679fc729812d8cb2acc"
|
txHash1Str := "b06f8b650115b5cf4d59499e10764a9312742930cb43c9b4ff6495d76f332ed7"
|
||||||
txID1Str := "24079c6d2bdf602fc389cc307349054937744a9c8dc0f07c023e6af0e949a4e7"
|
txID1Str := "e20225c3d065ee41743607ee627db44d01ef396dc9779b05b2caf55bac50e12d"
|
||||||
wantTxID1, err := transactionid.FromString(txID1Str)
|
wantTxID1, err := transactionid.FromString(txID1Str)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("NewTxIDFromStr: %v", err)
|
t.Fatalf("NewTxIDFromStr: %v", err)
|
||||||
@ -185,7 +185,7 @@ func TestTxHashAndID(t *testing.T) {
|
|||||||
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
|
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
|
||||||
}
|
}
|
||||||
|
|
||||||
hash2Str := "8dafd1bec24527d8e3b443ceb0a3b92fffc0d60026317f890b2faf5e9afc177a"
|
hash2Str := "fa16a8ce88d52ca1ff45187bbba0d33044e9f5fe309e8d0b22d4812dcf1782b7"
|
||||||
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
|
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("NewTxIDFromStr: %v", err)
|
t.Errorf("NewTxIDFromStr: %v", err)
|
||||||
|
@ -82,12 +82,12 @@ func (msg *MsgVersion) Command() MessageCommand {
|
|||||||
// Message interface using the passed parameters and defaults for the remaining
|
// Message interface using the passed parameters and defaults for the remaining
|
||||||
// fields.
|
// fields.
|
||||||
func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
|
func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
|
||||||
subnetworkID *externalapi.DomainSubnetworkID) *MsgVersion {
|
subnetworkID *externalapi.DomainSubnetworkID, protocolVersion uint32) *MsgVersion {
|
||||||
|
|
||||||
// Limit the timestamp to one millisecond precision since the protocol
|
// Limit the timestamp to one millisecond precision since the protocol
|
||||||
// doesn't support better.
|
// doesn't support better.
|
||||||
return &MsgVersion{
|
return &MsgVersion{
|
||||||
ProtocolVersion: ProtocolVersion,
|
ProtocolVersion: protocolVersion,
|
||||||
Network: network,
|
Network: network,
|
||||||
Services: 0,
|
Services: 0,
|
||||||
Timestamp: mstime.Now(),
|
Timestamp: mstime.Now(),
|
||||||
|
@ -15,7 +15,7 @@ import (
|
|||||||
|
|
||||||
// TestVersion tests the MsgVersion API.
|
// TestVersion tests the MsgVersion API.
|
||||||
func TestVersion(t *testing.T) {
|
func TestVersion(t *testing.T) {
|
||||||
pver := ProtocolVersion
|
pver := uint32(4)
|
||||||
|
|
||||||
// Create version message data.
|
// Create version message data.
|
||||||
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
|
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
|
||||||
@ -26,7 +26,7 @@ func TestVersion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ensure we get the correct data back out.
|
// Ensure we get the correct data back out.
|
||||||
msg := NewMsgVersion(me, generatedID, "mainnet", nil)
|
msg := NewMsgVersion(me, generatedID, "mainnet", nil, 4)
|
||||||
if msg.ProtocolVersion != pver {
|
if msg.ProtocolVersion != pver {
|
||||||
t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v",
|
t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v",
|
||||||
msg.ProtocolVersion, pver)
|
msg.ProtocolVersion, pver)
|
||||||
|
@ -5,8 +5,9 @@
|
|||||||
package appmessage
|
package appmessage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/util/mstime"
|
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
|
"github.com/kaspanet/kaspad/util/mstime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NetAddress defines information about a peer on the network including the time
|
// NetAddress defines information about a peer on the network including the time
|
||||||
@ -57,3 +58,7 @@ func NewNetAddressTimestamp(
|
|||||||
func NewNetAddress(addr *net.TCPAddr) *NetAddress {
|
func NewNetAddress(addr *net.TCPAddr) *NetAddress {
|
||||||
return NewNetAddressIPPort(addr.IP, uint16(addr.Port))
|
return NewNetAddressIPPort(addr.IP, uint16(addr.Port))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (na NetAddress) String() string {
|
||||||
|
return na.TCPAddress().String()
|
||||||
|
}
|
||||||
|
22
app/appmessage/p2p_ready.go
Normal file
22
app/appmessage/p2p_ready.go
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MsgReady implements the Message interface and represents a kaspa
|
||||||
|
// Ready message. It is used to notify that the peer is ready to receive
|
||||||
|
// messages.
|
||||||
|
//
|
||||||
|
// This message has no payload.
|
||||||
|
type MsgReady struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message. This is part
|
||||||
|
// of the Message interface implementation.
|
||||||
|
func (msg *MsgReady) Command() MessageCommand {
|
||||||
|
return CmdReady
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgReady returns a new kaspa Ready message that conforms to the
|
||||||
|
// Message interface.
|
||||||
|
func NewMsgReady() *MsgReady {
|
||||||
|
return &MsgReady{}
|
||||||
|
}
|
@ -1,16 +0,0 @@
|
|||||||
package appmessage
|
|
||||||
|
|
||||||
// MsgRequestPruningPointHashMessage represents a kaspa RequestPruningPointHashMessage message
|
|
||||||
type MsgRequestPruningPointHashMessage struct {
|
|
||||||
baseMessage
|
|
||||||
}
|
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
|
||||||
func (msg *MsgRequestPruningPointHashMessage) Command() MessageCommand {
|
|
||||||
return CmdRequestPruningPointHash
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMsgRequestPruningPointHashMessage returns a new kaspa RequestPruningPointHash message
|
|
||||||
func NewMsgRequestPruningPointHashMessage() *MsgRequestPruningPointHashMessage {
|
|
||||||
return &MsgRequestPruningPointHashMessage{}
|
|
||||||
}
|
|
@ -11,9 +11,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ProtocolVersion is the latest protocol version this package supports.
|
|
||||||
ProtocolVersion uint32 = 1
|
|
||||||
|
|
||||||
// DefaultServices describes the default services that are supported by
|
// DefaultServices describes the default services that are supported by
|
||||||
// the server.
|
// the server.
|
||||||
DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF
|
DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF
|
||||||
|
43
app/appmessage/rpc_estimate_network_hashes_per_second.go
Normal file
43
app/appmessage/rpc_estimate_network_hashes_per_second.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// EstimateNetworkHashesPerSecondRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type EstimateNetworkHashesPerSecondRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
StartHash string
|
||||||
|
WindowSize uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *EstimateNetworkHashesPerSecondRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdEstimateNetworkHashesPerSecondRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEstimateNetworkHashesPerSecondRequestMessage returns a instance of the message
|
||||||
|
func NewEstimateNetworkHashesPerSecondRequestMessage(startHash string, windowSize uint32) *EstimateNetworkHashesPerSecondRequestMessage {
|
||||||
|
return &EstimateNetworkHashesPerSecondRequestMessage{
|
||||||
|
StartHash: startHash,
|
||||||
|
WindowSize: windowSize,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EstimateNetworkHashesPerSecondResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type EstimateNetworkHashesPerSecondResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
NetworkHashesPerSecond uint64
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *EstimateNetworkHashesPerSecondResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdEstimateNetworkHashesPerSecondResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEstimateNetworkHashesPerSecondResponseMessage returns a instance of the message
|
||||||
|
func NewEstimateNetworkHashesPerSecondResponseMessage(networkHashesPerSecond uint64) *EstimateNetworkHashesPerSecondResponseMessage {
|
||||||
|
return &EstimateNetworkHashesPerSecondResponseMessage{
|
||||||
|
NetworkHashesPerSecond: networkHashesPerSecond,
|
||||||
|
}
|
||||||
|
}
|
47
app/appmessage/rpc_fee_estimate.go
Normal file
47
app/appmessage/rpc_fee_estimate.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// GetFeeEstimateRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetFeeEstimateRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetFeeEstimateRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdGetFeeEstimateRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetFeeEstimateRequestMessage returns a instance of the message
|
||||||
|
func NewGetFeeEstimateRequestMessage() *GetFeeEstimateRequestMessage {
|
||||||
|
return &GetFeeEstimateRequestMessage{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type RPCFeeRateBucket struct {
|
||||||
|
Feerate float64
|
||||||
|
EstimatedSeconds float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type RPCFeeEstimate struct {
|
||||||
|
PriorityBucket RPCFeeRateBucket
|
||||||
|
NormalBuckets []RPCFeeRateBucket
|
||||||
|
LowBuckets []RPCFeeRateBucket
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCoinSupplyResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetFeeEstimateResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Estimate RPCFeeEstimate
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetFeeEstimateResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdGetFeeEstimateResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetFeeEstimateResponseMessage returns a instance of the message
|
||||||
|
func NewGetFeeEstimateResponseMessage() *GetFeeEstimateResponseMessage {
|
||||||
|
return &GetFeeEstimateResponseMessage{}
|
||||||
|
}
|
41
app/appmessage/rpc_get_balance_by_address.go
Normal file
41
app/appmessage/rpc_get_balance_by_address.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// GetBalanceByAddressRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetBalanceByAddressRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Address string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetBalanceByAddressRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdGetBalanceByAddressRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetBalanceByAddressRequest returns a instance of the message
|
||||||
|
func NewGetBalanceByAddressRequest(address string) *GetBalanceByAddressRequestMessage {
|
||||||
|
return &GetBalanceByAddressRequestMessage{
|
||||||
|
Address: address,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBalanceByAddressResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetBalanceByAddressResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Balance uint64
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetBalanceByAddressResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdGetBalanceByAddressResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetBalanceByAddressResponse returns an instance of the message
|
||||||
|
func NewGetBalanceByAddressResponse(Balance uint64) *GetBalanceByAddressResponseMessage {
|
||||||
|
return &GetBalanceByAddressResponseMessage{
|
||||||
|
Balance: Balance,
|
||||||
|
}
|
||||||
|
}
|
47
app/appmessage/rpc_get_balances_by_addresses.go
Normal file
47
app/appmessage/rpc_get_balances_by_addresses.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// GetBalancesByAddressesRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetBalancesByAddressesRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Addresses []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetBalancesByAddressesRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdGetBalancesByAddressesRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetBalancesByAddressesRequest returns a instance of the message
|
||||||
|
func NewGetBalancesByAddressesRequest(addresses []string) *GetBalancesByAddressesRequestMessage {
|
||||||
|
return &GetBalancesByAddressesRequestMessage{
|
||||||
|
Addresses: addresses,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BalancesByAddressesEntry represents the balance of some address
|
||||||
|
type BalancesByAddressesEntry struct {
|
||||||
|
Address string
|
||||||
|
Balance uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBalancesByAddressesResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetBalancesByAddressesResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Entries []*BalancesByAddressesEntry
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetBalancesByAddressesResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdGetBalancesByAddressesResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetBalancesByAddressesResponse returns an instance of the message
|
||||||
|
func NewGetBalancesByAddressesResponse(entries []*BalancesByAddressesEntry) *GetBalancesByAddressesResponseMessage {
|
||||||
|
return &GetBalancesByAddressesResponseMessage{
|
||||||
|
Entries: entries,
|
||||||
|
}
|
||||||
|
}
|
@ -4,8 +4,8 @@ package appmessage
|
|||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type GetBlockRequestMessage struct {
|
type GetBlockRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
Hash string
|
Hash string
|
||||||
IncludeTransactionVerboseData bool
|
IncludeTransactions bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -14,10 +14,10 @@ func (msg *GetBlockRequestMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetBlockRequestMessage returns a instance of the message
|
// NewGetBlockRequestMessage returns a instance of the message
|
||||||
func NewGetBlockRequestMessage(hash string, includeTransactionVerboseData bool) *GetBlockRequestMessage {
|
func NewGetBlockRequestMessage(hash string, includeTransactions bool) *GetBlockRequestMessage {
|
||||||
return &GetBlockRequestMessage{
|
return &GetBlockRequestMessage{
|
||||||
Hash: hash,
|
Hash: hash,
|
||||||
IncludeTransactionVerboseData: includeTransactionVerboseData,
|
IncludeTransactions: includeTransactions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ package appmessage
|
|||||||
type GetBlockTemplateRequestMessage struct {
|
type GetBlockTemplateRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
PayAddress string
|
PayAddress string
|
||||||
|
ExtraData string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -13,9 +14,10 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetBlockTemplateRequestMessage returns a instance of the message
|
// NewGetBlockTemplateRequestMessage returns a instance of the message
|
||||||
func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
|
func NewGetBlockTemplateRequestMessage(payAddress, extraData string) *GetBlockTemplateRequestMessage {
|
||||||
return &GetBlockTemplateRequestMessage{
|
return &GetBlockTemplateRequestMessage{
|
||||||
PayAddress: payAddress,
|
PayAddress: payAddress,
|
||||||
|
ExtraData: extraData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,9 +4,9 @@ package appmessage
|
|||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type GetBlocksRequestMessage struct {
|
type GetBlocksRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
LowHash string
|
LowHash string
|
||||||
IncludeBlocks bool
|
IncludeBlocks bool
|
||||||
IncludeTransactionVerboseData bool
|
IncludeTransactions bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -16,11 +16,11 @@ func (msg *GetBlocksRequestMessage) Command() MessageCommand {
|
|||||||
|
|
||||||
// NewGetBlocksRequestMessage returns a instance of the message
|
// NewGetBlocksRequestMessage returns a instance of the message
|
||||||
func NewGetBlocksRequestMessage(lowHash string, includeBlocks bool,
|
func NewGetBlocksRequestMessage(lowHash string, includeBlocks bool,
|
||||||
includeTransactionVerboseData bool) *GetBlocksRequestMessage {
|
includeTransactions bool) *GetBlocksRequestMessage {
|
||||||
return &GetBlocksRequestMessage{
|
return &GetBlocksRequestMessage{
|
||||||
LowHash: lowHash,
|
LowHash: lowHash,
|
||||||
IncludeBlocks: includeBlocks,
|
IncludeBlocks: includeBlocks,
|
||||||
IncludeTransactionVerboseData: includeTransactionVerboseData,
|
IncludeTransactions: includeTransactions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
40
app/appmessage/rpc_get_coin_supply.go
Normal file
40
app/appmessage/rpc_get_coin_supply.go
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// GetCoinSupplyRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetCoinSupplyRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetCoinSupplyRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdGetCoinSupplyRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetCoinSupplyRequestMessage returns a instance of the message
|
||||||
|
func NewGetCoinSupplyRequestMessage() *GetCoinSupplyRequestMessage {
|
||||||
|
return &GetCoinSupplyRequestMessage{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCoinSupplyResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetCoinSupplyResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
MaxSompi uint64
|
||||||
|
CirculatingSompi uint64
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetCoinSupplyResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdGetCoinSupplyResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetCoinSupplyResponseMessage returns a instance of the message
|
||||||
|
func NewGetCoinSupplyResponseMessage(maxSompi uint64, circulatingSompi uint64) *GetCoinSupplyResponseMessage {
|
||||||
|
return &GetCoinSupplyResponseMessage{
|
||||||
|
MaxSompi: maxSompi,
|
||||||
|
CirculatingSompi: circulatingSompi,
|
||||||
|
}
|
||||||
|
}
|
@ -20,8 +20,11 @@ func NewGetInfoRequestMessage() *GetInfoRequestMessage {
|
|||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type GetInfoResponseMessage struct {
|
type GetInfoResponseMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
P2PID string
|
P2PID string
|
||||||
MempoolSize uint64
|
MempoolSize uint64
|
||||||
|
ServerVersion string
|
||||||
|
IsUtxoIndexed bool
|
||||||
|
IsSynced bool
|
||||||
|
|
||||||
Error *RPCError
|
Error *RPCError
|
||||||
}
|
}
|
||||||
@ -32,9 +35,12 @@ func (msg *GetInfoResponseMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetInfoResponseMessage returns a instance of the message
|
// NewGetInfoResponseMessage returns a instance of the message
|
||||||
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64) *GetInfoResponseMessage {
|
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string, isUtxoIndexed bool, isSynced bool) *GetInfoResponseMessage {
|
||||||
return &GetInfoResponseMessage{
|
return &GetInfoResponseMessage{
|
||||||
P2PID: p2pID,
|
P2PID: p2pID,
|
||||||
MempoolSize: mempoolSize,
|
MempoolSize: mempoolSize,
|
||||||
|
ServerVersion: serverVersion,
|
||||||
|
IsUtxoIndexed: isUtxoIndexed,
|
||||||
|
IsSynced: isSynced,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,8 @@ package appmessage
|
|||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type GetMempoolEntriesRequestMessage struct {
|
type GetMempoolEntriesRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
|
IncludeOrphanPool bool
|
||||||
|
FilterTransactionPool bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -12,8 +14,11 @@ func (msg *GetMempoolEntriesRequestMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetMempoolEntriesRequestMessage returns a instance of the message
|
// NewGetMempoolEntriesRequestMessage returns a instance of the message
|
||||||
func NewGetMempoolEntriesRequestMessage() *GetMempoolEntriesRequestMessage {
|
func NewGetMempoolEntriesRequestMessage(includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesRequestMessage {
|
||||||
return &GetMempoolEntriesRequestMessage{}
|
return &GetMempoolEntriesRequestMessage{
|
||||||
|
IncludeOrphanPool: includeOrphanPool,
|
||||||
|
FilterTransactionPool: filterTransactionPool,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMempoolEntriesResponseMessage is an appmessage corresponding to
|
// GetMempoolEntriesResponseMessage is an appmessage corresponding to
|
||||||
|
52
app/appmessage/rpc_get_mempool_entries_by_addresses.go
Normal file
52
app/appmessage/rpc_get_mempool_entries_by_addresses.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// MempoolEntryByAddress represents MempoolEntries associated with some address
|
||||||
|
type MempoolEntryByAddress struct {
|
||||||
|
Address string
|
||||||
|
Receiving []*MempoolEntry
|
||||||
|
Sending []*MempoolEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMempoolEntriesByAddressesRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetMempoolEntriesByAddressesRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Addresses []string
|
||||||
|
IncludeOrphanPool bool
|
||||||
|
FilterTransactionPool bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetMempoolEntriesByAddressesRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdGetMempoolEntriesByAddressesRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetMempoolEntriesByAddressesRequestMessage returns a instance of the message
|
||||||
|
func NewGetMempoolEntriesByAddressesRequestMessage(addresses []string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesByAddressesRequestMessage {
|
||||||
|
return &GetMempoolEntriesByAddressesRequestMessage{
|
||||||
|
Addresses: addresses,
|
||||||
|
IncludeOrphanPool: includeOrphanPool,
|
||||||
|
FilterTransactionPool: filterTransactionPool,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMempoolEntriesByAddressesResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type GetMempoolEntriesByAddressesResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Entries []*MempoolEntryByAddress
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *GetMempoolEntriesByAddressesResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdGetMempoolEntriesByAddressesResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetMempoolEntriesByAddressesResponseMessage returns a instance of the message
|
||||||
|
func NewGetMempoolEntriesByAddressesResponseMessage(entries []*MempoolEntryByAddress) *GetMempoolEntriesByAddressesResponseMessage {
|
||||||
|
return &GetMempoolEntriesByAddressesResponseMessage{
|
||||||
|
Entries: entries,
|
||||||
|
}
|
||||||
|
}
|
@ -4,7 +4,9 @@ package appmessage
|
|||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type GetMempoolEntryRequestMessage struct {
|
type GetMempoolEntryRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
TxID string
|
TxID string
|
||||||
|
IncludeOrphanPool bool
|
||||||
|
FilterTransactionPool bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -13,8 +15,12 @@ func (msg *GetMempoolEntryRequestMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetMempoolEntryRequestMessage returns a instance of the message
|
// NewGetMempoolEntryRequestMessage returns a instance of the message
|
||||||
func NewGetMempoolEntryRequestMessage(txID string) *GetMempoolEntryRequestMessage {
|
func NewGetMempoolEntryRequestMessage(txID string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntryRequestMessage {
|
||||||
return &GetMempoolEntryRequestMessage{TxID: txID}
|
return &GetMempoolEntryRequestMessage{
|
||||||
|
TxID: txID,
|
||||||
|
IncludeOrphanPool: includeOrphanPool,
|
||||||
|
FilterTransactionPool: filterTransactionPool,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMempoolEntryResponseMessage is an appmessage corresponding to
|
// GetMempoolEntryResponseMessage is an appmessage corresponding to
|
||||||
@ -30,6 +36,7 @@ type GetMempoolEntryResponseMessage struct {
|
|||||||
type MempoolEntry struct {
|
type MempoolEntry struct {
|
||||||
Fee uint64
|
Fee uint64
|
||||||
Transaction *RPCTransaction
|
Transaction *RPCTransaction
|
||||||
|
IsOrphan bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -38,11 +45,12 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetMempoolEntryResponseMessage returns a instance of the message
|
// NewGetMempoolEntryResponseMessage returns a instance of the message
|
||||||
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
|
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction, isOrphan bool) *GetMempoolEntryResponseMessage {
|
||||||
return &GetMempoolEntryResponseMessage{
|
return &GetMempoolEntryResponseMessage{
|
||||||
Entry: &MempoolEntry{
|
Entry: &MempoolEntry{
|
||||||
Fee: fee,
|
Fee: fee,
|
||||||
Transaction: transaction,
|
Transaction: transaction,
|
||||||
|
IsOrphan: isOrphan,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,8 @@ package appmessage
|
|||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
|
type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
StartHash string
|
StartHash string
|
||||||
|
IncludeAcceptedTransactionIDs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -13,18 +14,29 @@ func (msg *GetVirtualSelectedParentChainFromBlockRequestMessage) Command() Messa
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
|
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
|
||||||
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *GetVirtualSelectedParentChainFromBlockRequestMessage {
|
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(
|
||||||
|
startHash string, includeAcceptedTransactionIDs bool) *GetVirtualSelectedParentChainFromBlockRequestMessage {
|
||||||
|
|
||||||
return &GetVirtualSelectedParentChainFromBlockRequestMessage{
|
return &GetVirtualSelectedParentChainFromBlockRequestMessage{
|
||||||
StartHash: startHash,
|
StartHash: startHash,
|
||||||
|
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AcceptedTransactionIDs is a part of the GetVirtualSelectedParentChainFromBlockResponseMessage and
|
||||||
|
// VirtualSelectedParentChainChangedNotificationMessage appmessages
|
||||||
|
type AcceptedTransactionIDs struct {
|
||||||
|
AcceptingBlockHash string
|
||||||
|
AcceptedTransactionIDs []string
|
||||||
|
}
|
||||||
|
|
||||||
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
|
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
|
||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
|
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
RemovedChainBlockHashes []string
|
RemovedChainBlockHashes []string
|
||||||
AddedChainBlocks []*ChainBlock
|
AddedChainBlockHashes []string
|
||||||
|
AcceptedTransactionIDs []*AcceptedTransactionIDs
|
||||||
|
|
||||||
Error *RPCError
|
Error *RPCError
|
||||||
}
|
}
|
||||||
@ -35,11 +47,12 @@ func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() Mess
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
|
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
|
||||||
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes []string,
|
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes,
|
||||||
addedChainBlocks []*ChainBlock) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
addedChainBlockHashes []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
||||||
|
|
||||||
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
|
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
|
||||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||||
AddedChainBlocks: addedChainBlocks,
|
AddedChainBlockHashes: addedChainBlockHashes,
|
||||||
|
AcceptedTransactionIDs: acceptedTransactionIDs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// NotifyNewBlockTemplateRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type NotifyNewBlockTemplateRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *NotifyNewBlockTemplateRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdNotifyNewBlockTemplateRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNotifyNewBlockTemplateRequestMessage returns an instance of the message
|
||||||
|
func NewNotifyNewBlockTemplateRequestMessage() *NotifyNewBlockTemplateRequestMessage {
|
||||||
|
return &NotifyNewBlockTemplateRequestMessage{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyNewBlockTemplateResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type NotifyNewBlockTemplateResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *NotifyNewBlockTemplateResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdNotifyNewBlockTemplateResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNotifyNewBlockTemplateResponseMessage returns an instance of the message
|
||||||
|
func NewNotifyNewBlockTemplateResponseMessage() *NotifyNewBlockTemplateResponseMessage {
|
||||||
|
return &NotifyNewBlockTemplateResponseMessage{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBlockTemplateNotificationMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type NewBlockTemplateNotificationMessage struct {
|
||||||
|
baseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *NewBlockTemplateNotificationMessage) Command() MessageCommand {
|
||||||
|
return CmdNewBlockTemplateNotificationMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNewBlockTemplateNotificationMessage returns an instance of the message
|
||||||
|
func NewNewBlockTemplateNotificationMessage() *NewBlockTemplateNotificationMessage {
|
||||||
|
return &NewBlockTemplateNotificationMessage{}
|
||||||
|
}
|
@ -4,6 +4,7 @@ package appmessage
|
|||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
|
type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
|
IncludeAcceptedTransactionIDs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -11,9 +12,13 @@ func (msg *NotifyVirtualSelectedParentChainChangedRequestMessage) Command() Mess
|
|||||||
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
|
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns a instance of the message
|
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns an instance of the message
|
||||||
func NewNotifyVirtualSelectedParentChainChangedRequestMessage() *NotifyVirtualSelectedParentChainChangedRequestMessage {
|
func NewNotifyVirtualSelectedParentChainChangedRequestMessage(
|
||||||
return &NotifyVirtualSelectedParentChainChangedRequestMessage{}
|
includeAcceptedTransactionIDs bool) *NotifyVirtualSelectedParentChainChangedRequestMessage {
|
||||||
|
|
||||||
|
return &NotifyVirtualSelectedParentChainChangedRequestMessage{
|
||||||
|
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
|
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
|
||||||
@ -38,19 +43,8 @@ func NewNotifyVirtualSelectedParentChainChangedResponseMessage() *NotifyVirtualS
|
|||||||
type VirtualSelectedParentChainChangedNotificationMessage struct {
|
type VirtualSelectedParentChainChangedNotificationMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
RemovedChainBlockHashes []string
|
RemovedChainBlockHashes []string
|
||||||
AddedChainBlocks []*ChainBlock
|
AddedChainBlockHashes []string
|
||||||
}
|
AcceptedTransactionIDs []*AcceptedTransactionIDs
|
||||||
|
|
||||||
// ChainBlock represents a DAG chain-block
|
|
||||||
type ChainBlock struct {
|
|
||||||
Hash string
|
|
||||||
AcceptedBlocks []*AcceptedBlock
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcceptedBlock represents a block accepted into the DAG
|
|
||||||
type AcceptedBlock struct {
|
|
||||||
Hash string
|
|
||||||
AcceptedTransactionIDs []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -59,11 +53,12 @@ func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() Messa
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
|
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
|
||||||
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes []string,
|
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes,
|
||||||
addedChainBlocks []*ChainBlock) *VirtualSelectedParentChainChangedNotificationMessage {
|
addedChainBlocks []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *VirtualSelectedParentChainChangedNotificationMessage {
|
||||||
|
|
||||||
return &VirtualSelectedParentChainChangedNotificationMessage{
|
return &VirtualSelectedParentChainChangedNotificationMessage{
|
||||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||||
AddedChainBlocks: addedChainBlocks,
|
AddedChainBlockHashes: addedChainBlocks,
|
||||||
|
AcceptedTransactionIDs: acceptedTransactionIDs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,8 @@ package appmessage
|
|||||||
// its respective RPC message
|
// its respective RPC message
|
||||||
type SubmitBlockRequestMessage struct {
|
type SubmitBlockRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
Block *RPCBlock
|
Block *RPCBlock
|
||||||
|
AllowNonDAABlocks bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -13,9 +14,10 @@ func (msg *SubmitBlockRequestMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSubmitBlockRequestMessage returns a instance of the message
|
// NewSubmitBlockRequestMessage returns a instance of the message
|
||||||
func NewSubmitBlockRequestMessage(block *RPCBlock) *SubmitBlockRequestMessage {
|
func NewSubmitBlockRequestMessage(block *RPCBlock, allowNonDAABlocks bool) *SubmitBlockRequestMessage {
|
||||||
return &SubmitBlockRequestMessage{
|
return &SubmitBlockRequestMessage{
|
||||||
Block: block,
|
Block: block,
|
||||||
|
AllowNonDAABlocks: allowNonDAABlocks,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,7 +55,7 @@ func (msg *SubmitBlockResponseMessage) Command() MessageCommand {
|
|||||||
return CmdSubmitBlockResponseMessage
|
return CmdSubmitBlockResponseMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSubmitBlockResponseMessage returns a instance of the message
|
// NewSubmitBlockResponseMessage returns an instance of the message
|
||||||
func NewSubmitBlockResponseMessage() *SubmitBlockResponseMessage {
|
func NewSubmitBlockResponseMessage() *SubmitBlockResponseMessage {
|
||||||
return &SubmitBlockResponseMessage{}
|
return &SubmitBlockResponseMessage{}
|
||||||
}
|
}
|
||||||
@ -70,22 +72,34 @@ type RPCBlock struct {
|
|||||||
// used over RPC
|
// used over RPC
|
||||||
type RPCBlockHeader struct {
|
type RPCBlockHeader struct {
|
||||||
Version uint32
|
Version uint32
|
||||||
ParentHashes []string
|
Parents []*RPCBlockLevelParents
|
||||||
HashMerkleRoot string
|
HashMerkleRoot string
|
||||||
AcceptedIDMerkleRoot string
|
AcceptedIDMerkleRoot string
|
||||||
UTXOCommitment string
|
UTXOCommitment string
|
||||||
Timestamp int64
|
Timestamp int64
|
||||||
Bits uint32
|
Bits uint32
|
||||||
Nonce uint64
|
Nonce uint64
|
||||||
|
DAAScore uint64
|
||||||
|
BlueScore uint64
|
||||||
|
BlueWork string
|
||||||
|
PruningPoint string
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPCBlockLevelParents holds parent hashes for one block level
|
||||||
|
type RPCBlockLevelParents struct {
|
||||||
|
ParentHashes []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// RPCBlockVerboseData holds verbose data about a block
|
// RPCBlockVerboseData holds verbose data about a block
|
||||||
type RPCBlockVerboseData struct {
|
type RPCBlockVerboseData struct {
|
||||||
Hash string
|
Hash string
|
||||||
Difficulty float64
|
Difficulty float64
|
||||||
SelectedParentHash string
|
SelectedParentHash string
|
||||||
TransactionIDs []string
|
TransactionIDs []string
|
||||||
IsHeaderOnly bool
|
IsHeaderOnly bool
|
||||||
BlueScore uint64
|
BlueScore uint64
|
||||||
ChildrenHashes []string
|
ChildrenHashes []string
|
||||||
|
MergeSetBluesHashes []string
|
||||||
|
MergeSetRedsHashes []string
|
||||||
|
IsChainBlock bool
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ package appmessage
|
|||||||
type SubmitTransactionRequestMessage struct {
|
type SubmitTransactionRequestMessage struct {
|
||||||
baseMessage
|
baseMessage
|
||||||
Transaction *RPCTransaction
|
Transaction *RPCTransaction
|
||||||
|
AllowOrphan bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns the protocol command string for the message
|
// Command returns the protocol command string for the message
|
||||||
@ -13,9 +14,10 @@ func (msg *SubmitTransactionRequestMessage) Command() MessageCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSubmitTransactionRequestMessage returns a instance of the message
|
// NewSubmitTransactionRequestMessage returns a instance of the message
|
||||||
func NewSubmitTransactionRequestMessage(transaction *RPCTransaction) *SubmitTransactionRequestMessage {
|
func NewSubmitTransactionRequestMessage(transaction *RPCTransaction, allowOrphan bool) *SubmitTransactionRequestMessage {
|
||||||
return &SubmitTransactionRequestMessage{
|
return &SubmitTransactionRequestMessage{
|
||||||
Transaction: transaction,
|
Transaction: transaction,
|
||||||
|
AllowOrphan: allowOrphan,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,6 +52,7 @@ type RPCTransaction struct {
|
|||||||
SubnetworkID string
|
SubnetworkID string
|
||||||
Gas uint64
|
Gas uint64
|
||||||
Payload string
|
Payload string
|
||||||
|
Mass uint64
|
||||||
VerboseData *RPCTransactionVerboseData
|
VerboseData *RPCTransactionVerboseData
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,6 +62,7 @@ type RPCTransactionInput struct {
|
|||||||
PreviousOutpoint *RPCOutpoint
|
PreviousOutpoint *RPCOutpoint
|
||||||
SignatureScript string
|
SignatureScript string
|
||||||
Sequence uint64
|
Sequence uint64
|
||||||
|
SigOpCount byte
|
||||||
VerboseData *RPCTransactionInputVerboseData
|
VerboseData *RPCTransactionInputVerboseData
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -96,7 +100,7 @@ type RPCUTXOEntry struct {
|
|||||||
type RPCTransactionVerboseData struct {
|
type RPCTransactionVerboseData struct {
|
||||||
TransactionID string
|
TransactionID string
|
||||||
Hash string
|
Hash string
|
||||||
Size uint64
|
Mass uint64
|
||||||
BlockHash string
|
BlockHash string
|
||||||
BlockTime uint64
|
BlockTime uint64
|
||||||
}
|
}
|
||||||
|
42
app/appmessage/rpc_submit_transaction_replacement.go
Normal file
42
app/appmessage/rpc_submit_transaction_replacement.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
package appmessage
|
||||||
|
|
||||||
|
// SubmitTransactionReplacementRequestMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type SubmitTransactionReplacementRequestMessage struct {
|
||||||
|
baseMessage
|
||||||
|
Transaction *RPCTransaction
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *SubmitTransactionReplacementRequestMessage) Command() MessageCommand {
|
||||||
|
return CmdSubmitTransactionReplacementRequestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSubmitTransactionReplacementRequestMessage returns a instance of the message
|
||||||
|
func NewSubmitTransactionReplacementRequestMessage(transaction *RPCTransaction) *SubmitTransactionReplacementRequestMessage {
|
||||||
|
return &SubmitTransactionReplacementRequestMessage{
|
||||||
|
Transaction: transaction,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubmitTransactionReplacementResponseMessage is an appmessage corresponding to
|
||||||
|
// its respective RPC message
|
||||||
|
type SubmitTransactionReplacementResponseMessage struct {
|
||||||
|
baseMessage
|
||||||
|
TransactionID string
|
||||||
|
ReplacedTransaction *RPCTransaction
|
||||||
|
|
||||||
|
Error *RPCError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns the protocol command string for the message
|
||||||
|
func (msg *SubmitTransactionReplacementResponseMessage) Command() MessageCommand {
|
||||||
|
return CmdSubmitTransactionReplacementResponseMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSubmitTransactionReplacementResponseMessage returns a instance of the message
|
||||||
|
func NewSubmitTransactionReplacementResponseMessage(transactionID string) *SubmitTransactionReplacementResponseMessage {
|
||||||
|
return &SubmitTransactionReplacementResponseMessage{
|
||||||
|
TransactionID: transactionID,
|
||||||
|
}
|
||||||
|
}
|
@ -4,7 +4,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
|
||||||
|
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol"
|
"github.com/kaspanet/kaspad/app/protocol"
|
||||||
"github.com/kaspanet/kaspad/app/rpc"
|
"github.com/kaspanet/kaspad/app/rpc"
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
@ -14,7 +17,6 @@ import (
|
|||||||
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
|
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/dnsseed"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||||
"github.com/kaspanet/kaspad/util/panics"
|
"github.com/kaspanet/kaspad/util/panics"
|
||||||
@ -46,8 +48,6 @@ func (a *ComponentManager) Start() {
|
|||||||
panics.Exit(log, fmt.Sprintf("Error starting the net adapter: %+v", err))
|
panics.Exit(log, fmt.Sprintf("Error starting the net adapter: %+v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
a.maybeSeedFromDNS()
|
|
||||||
|
|
||||||
a.connectionManager.Start()
|
a.connectionManager.Start()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -69,6 +69,7 @@ func (a *ComponentManager) Stop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
a.protocolManager.Close()
|
a.protocolManager.Close()
|
||||||
|
close(a.protocolManager.Context().Domain().ConsensusEventsChannel())
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -83,8 +84,11 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
|
|||||||
IsArchival: cfg.IsArchivalNode,
|
IsArchival: cfg.IsArchivalNode,
|
||||||
EnableSanityCheckPruningUTXOSet: cfg.EnableSanityCheckPruningUTXOSet,
|
EnableSanityCheckPruningUTXOSet: cfg.EnableSanityCheckPruningUTXOSet,
|
||||||
}
|
}
|
||||||
|
mempoolConfig := mempool.DefaultConfig(&consensusConfig.Params)
|
||||||
|
mempoolConfig.MaximumOrphanTransactionCount = cfg.MaxOrphanTxs
|
||||||
|
mempoolConfig.MinimumRelayTransactionFee = cfg.MinRelayTxFee
|
||||||
|
|
||||||
domain, err := domain.New(&consensusConfig, db)
|
domain, err := domain.New(&consensusConfig, mempoolConfig, db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -101,7 +105,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
|
|||||||
|
|
||||||
var utxoIndex *utxoindex.UTXOIndex
|
var utxoIndex *utxoindex.UTXOIndex
|
||||||
if cfg.UTXOIndex {
|
if cfg.UTXOIndex {
|
||||||
utxoIndex, err = utxoindex.New(domain.Consensus(), db)
|
utxoIndex, err = utxoindex.New(domain, db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -117,7 +121,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, interrupt)
|
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, domain.ConsensusEventsChannel(), interrupt)
|
||||||
|
|
||||||
return &ComponentManager{
|
return &ComponentManager{
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
@ -138,6 +142,7 @@ func setupRPC(
|
|||||||
connectionManager *connmanager.ConnectionManager,
|
connectionManager *connmanager.ConnectionManager,
|
||||||
addressManager *addressmanager.AddressManager,
|
addressManager *addressmanager.AddressManager,
|
||||||
utxoIndex *utxoindex.UTXOIndex,
|
utxoIndex *utxoindex.UTXOIndex,
|
||||||
|
consensusEventsChan chan externalapi.ConsensusEvent,
|
||||||
shutDownChan chan<- struct{},
|
shutDownChan chan<- struct{},
|
||||||
) *rpc.Manager {
|
) *rpc.Manager {
|
||||||
|
|
||||||
@ -149,31 +154,15 @@ func setupRPC(
|
|||||||
connectionManager,
|
connectionManager,
|
||||||
addressManager,
|
addressManager,
|
||||||
utxoIndex,
|
utxoIndex,
|
||||||
|
consensusEventsChan,
|
||||||
shutDownChan,
|
shutDownChan,
|
||||||
)
|
)
|
||||||
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
|
protocolManager.SetOnNewBlockTemplateHandler(rpcManager.NotifyNewBlockTemplate)
|
||||||
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
||||||
|
|
||||||
return rpcManager
|
return rpcManager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ComponentManager) maybeSeedFromDNS() {
|
|
||||||
if !a.cfg.DisableDNSSeed {
|
|
||||||
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, false, nil,
|
|
||||||
a.cfg.Lookup, func(addresses []*appmessage.NetAddress) {
|
|
||||||
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
|
|
||||||
// IPs of nodes and not its own IP, we can not know real IP of
|
|
||||||
// source. So we'll take first returned address as source.
|
|
||||||
a.addressManager.AddAddresses(addresses...)
|
|
||||||
})
|
|
||||||
|
|
||||||
dnsseed.SeedFromGRPC(a.cfg.NetParams(), a.cfg.GRPCSeed, false, nil,
|
|
||||||
func(addresses []*appmessage.NetAddress) {
|
|
||||||
a.addressManager.AddAddresses(addresses...)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// P2PNodeID returns the network ID associated with this ComponentManager
|
// P2PNodeID returns the network ID associated with this ComponentManager
|
||||||
func (a *ComponentManager) P2PNodeID() *id.ID {
|
func (a *ComponentManager) P2PNodeID() *id.ID {
|
||||||
return a.netAdapter.ID()
|
return a.netAdapter.ID()
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
|
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -8,7 +10,18 @@ import (
|
|||||||
|
|
||||||
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing
|
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing
|
||||||
// to/from routes.
|
// to/from routes.
|
||||||
const DefaultTimeout = 30 * time.Second
|
const DefaultTimeout = 120 * time.Second
|
||||||
|
|
||||||
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
|
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
|
||||||
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")
|
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")
|
||||||
|
|
||||||
|
type flowExecuteFunc func(peer *peerpkg.Peer)
|
||||||
|
|
||||||
|
// Flow is a a data structure that is used in order to associate a p2p flow to some route in a router.
|
||||||
|
type Flow struct {
|
||||||
|
Name string
|
||||||
|
ExecuteFunc flowExecuteFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlowInitializeFunc is a function that is used in order to initialize a flow
|
||||||
|
type FlowInitializeFunc func(route *routerpkg.Route, peer *peerpkg.Peer) error
|
||||||
|
@ -1,64 +1,62 @@
|
|||||||
package flowcontext
|
package flowcontext
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// OnNewBlock updates the mempool after a new block arrival, and
|
// OnNewBlock updates the mempool after a new block arrival, and
|
||||||
// relays newly unorphaned transactions and possibly rebroadcast
|
// relays newly unorphaned transactions and possibly rebroadcast
|
||||||
// manually added transactions when not in IBD.
|
// manually added transactions when not in IBD.
|
||||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock) error {
|
||||||
blockInsertionResult *externalapi.BlockInsertionResult) error {
|
|
||||||
|
|
||||||
hash := consensushashing.BlockHash(block)
|
hash := consensushashing.BlockHash(block)
|
||||||
log.Debugf("OnNewBlock start for block %s", hash)
|
log.Tracef("OnNewBlock start for block %s", hash)
|
||||||
defer log.Debugf("OnNewBlock end for block %s", hash)
|
defer log.Tracef("OnNewBlock end for block %s", hash)
|
||||||
|
|
||||||
unorphaningResults, err := f.UnorphanBlocks(block)
|
unorphanedBlocks, err := f.UnorphanBlocks(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
|
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphanedBlocks))
|
||||||
|
|
||||||
newBlocks := []*externalapi.DomainBlock{block}
|
newBlocks := []*externalapi.DomainBlock{block}
|
||||||
newBlockInsertionResults := []*externalapi.BlockInsertionResult{blockInsertionResult}
|
newBlocks = append(newBlocks, unorphanedBlocks...)
|
||||||
for _, unorphaningResult := range unorphaningResults {
|
|
||||||
newBlocks = append(newBlocks, unorphaningResult.block)
|
|
||||||
newBlockInsertionResults = append(newBlockInsertionResults, unorphaningResult.blockInsertionResult)
|
|
||||||
}
|
|
||||||
|
|
||||||
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
|
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
|
||||||
for i, newBlock := range newBlocks {
|
for _, newBlock := range newBlocks {
|
||||||
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
|
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
|
||||||
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
|
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
|
||||||
|
|
||||||
if f.onBlockAddedToDAGHandler != nil {
|
|
||||||
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
|
|
||||||
blockInsertionResult = newBlockInsertionResults[i]
|
|
||||||
err := f.onBlockAddedToDAGHandler(newBlock, blockInsertionResult)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
|
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OnNewBlockTemplate calls the handler function whenever a new block template is available for miners.
|
||||||
|
func (f *FlowContext) OnNewBlockTemplate() error {
|
||||||
|
// Clear current template cache. Note we call this even if the handler is nil, in order to keep the
|
||||||
|
// state consistent without dependency on external event registration
|
||||||
|
f.Domain().MiningManager().ClearBlockTemplate()
|
||||||
|
if f.onNewBlockTemplateHandler != nil {
|
||||||
|
return f.onNewBlockTemplateHandler()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set
|
// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set
|
||||||
// resets due to pruning point change via IBD.
|
// resets due to pruning point change via IBD.
|
||||||
func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
|
func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
|
||||||
@ -71,8 +69,6 @@ func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
|
|||||||
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
||||||
addedBlocks []*externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error {
|
addedBlocks []*externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error {
|
||||||
|
|
||||||
f.updateTransactionsToRebroadcast(addedBlocks)
|
|
||||||
|
|
||||||
// Don't relay transactions when in IBD.
|
// Don't relay transactions when in IBD.
|
||||||
if f.IsIBDRunning() {
|
if f.IsIBDRunning() {
|
||||||
return nil
|
return nil
|
||||||
@ -80,7 +76,12 @@ func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
|||||||
|
|
||||||
var txIDsToRebroadcast []*externalapi.DomainTransactionID
|
var txIDsToRebroadcast []*externalapi.DomainTransactionID
|
||||||
if f.shouldRebroadcastTransactions() {
|
if f.shouldRebroadcastTransactions() {
|
||||||
txIDsToRebroadcast = f.txIDsToRebroadcast()
|
txsToRebroadcast, err := f.Domain().MiningManager().RevalidateHighPriorityTransactions()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
txIDsToRebroadcast = consensushashing.TransactionIDs(txsToRebroadcast)
|
||||||
|
f.lastRebroadcastTime = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
txIDsToBroadcast := make([]*externalapi.DomainTransactionID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
|
txIDsToBroadcast := make([]*externalapi.DomainTransactionID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
|
||||||
@ -91,20 +92,12 @@ func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
|||||||
for i, txID := range txIDsToRebroadcast {
|
for i, txID := range txIDsToRebroadcast {
|
||||||
txIDsToBroadcast[offset+i] = txID
|
txIDsToBroadcast[offset+i] = txID
|
||||||
}
|
}
|
||||||
|
return f.EnqueueTransactionIDsForPropagation(txIDsToBroadcast)
|
||||||
if len(txIDsToBroadcast) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if len(txIDsToBroadcast) > appmessage.MaxInvPerTxInvMsg {
|
|
||||||
txIDsToBroadcast = txIDsToBroadcast[:appmessage.MaxInvPerTxInvMsg]
|
|
||||||
}
|
|
||||||
inv := appmessage.NewMsgInvTransaction(txIDsToBroadcast)
|
|
||||||
return f.Broadcast(inv)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SharedRequestedBlocks returns a *blockrelay.SharedRequestedBlocks for sharing
|
// SharedRequestedBlocks returns a *blockrelay.SharedRequestedBlocks for sharing
|
||||||
// data about requested blocks between different peers.
|
// data about requested blocks between different peers.
|
||||||
func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks {
|
func (f *FlowContext) SharedRequestedBlocks() *SharedRequestedBlocks {
|
||||||
return f.sharedRequestedBlocks
|
return f.sharedRequestedBlocks
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,14 +107,18 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
|||||||
return protocolerrors.Errorf(false, "cannot add header only block")
|
return protocolerrors.Errorf(false, "cannot add header only block")
|
||||||
}
|
}
|
||||||
|
|
||||||
blockInsertionResult, err := f.Domain().Consensus().ValidateAndInsertBlock(block)
|
err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||||
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
|
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = f.OnNewBlock(block, blockInsertionResult)
|
err = f.OnNewBlockTemplate()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = f.OnNewBlock(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -146,7 +143,7 @@ func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
f.ibdPeer = ibdPeer
|
f.ibdPeer = ibdPeer
|
||||||
log.Infof("IBD started")
|
log.Infof("IBD started with peer %s", ibdPeer)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -161,7 +158,6 @@ func (f *FlowContext) UnsetIBDRunning() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
f.ibdPeer = nil
|
f.ibdPeer = nil
|
||||||
log.Infof("IBD finished")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IBDPeer returns the current IBD peer or null if the node is not
|
// IBDPeer returns the current IBD peer or null if the node is not
|
||||||
|
@ -2,6 +2,7 @@ package flowcontext
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
@ -9,6 +10,11 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrPingTimeout signifies that a ping operation timed out.
|
||||||
|
ErrPingTimeout = protocolerrors.New(false, "timeout expired on ping")
|
||||||
|
)
|
||||||
|
|
||||||
// HandleError handles an error from a flow,
|
// HandleError handles an error from a flow,
|
||||||
// It sends the error to errChan if isStopping == 0 and increments isStopping
|
// It sends the error to errChan if isStopping == 0 and increments isStopping
|
||||||
//
|
//
|
||||||
@ -21,11 +27,23 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
|
|||||||
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
if errors.Is(err, ErrPingTimeout) {
|
||||||
log.Errorf("error from %s: %s", flowName, err)
|
// Avoid printing the call stack on ping timeouts, since users get panicked and this case is not interesting
|
||||||
|
log.Errorf("error from %s: %s", flowName, err)
|
||||||
|
} else {
|
||||||
|
// Explain to the user that this is not a panic, but only a protocol error with a specific peer
|
||||||
|
logFrame := strings.Repeat("=", 52)
|
||||||
|
log.Errorf("Non-critical peer protocol error from %s, printing the full stack for debug purposes: \n%s\n%+v \n%s",
|
||||||
|
flowName, logFrame, err, logFrame)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||||
errChan <- err
|
errChan <- err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsRecoverableError returns whether the error is recoverable
|
||||||
|
func (*FlowContext) IsRecoverableError(err error) bool {
|
||||||
|
return err == nil || errors.Is(err, router.ErrRouteClosed) || errors.As(err, &protocolerrors.ProtocolError{})
|
||||||
|
}
|
||||||
|
@ -1,16 +1,15 @@
|
|||||||
package flowcontext
|
package flowcontext
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/util/mstime"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/kaspanet/kaspad/util/mstime"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||||
@ -19,9 +18,8 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OnBlockAddedToDAGHandler is a handler function that's triggered
|
// OnNewBlockTemplateHandler is a handler function that's triggered when a new block template is available
|
||||||
// when a block is added to the DAG
|
type OnNewBlockTemplateHandler func() error
|
||||||
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
|
|
||||||
|
|
||||||
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
||||||
// resets due to pruning point change via IBD.
|
// resets due to pruning point change via IBD.
|
||||||
@ -42,16 +40,14 @@ type FlowContext struct {
|
|||||||
|
|
||||||
timeStarted int64
|
timeStarted int64
|
||||||
|
|
||||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
onNewBlockTemplateHandler OnNewBlockTemplateHandler
|
||||||
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
||||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||||
|
|
||||||
transactionsToRebroadcastLock sync.Mutex
|
lastRebroadcastTime time.Time
|
||||||
transactionsToRebroadcast map[externalapi.DomainTransactionID]*externalapi.DomainTransaction
|
sharedRequestedTransactions *SharedRequestedTransactions
|
||||||
lastRebroadcastTime time.Time
|
|
||||||
sharedRequestedTransactions *transactionrelay.SharedRequestedTransactions
|
|
||||||
|
|
||||||
sharedRequestedBlocks *blockrelay.SharedRequestedBlocks
|
sharedRequestedBlocks *SharedRequestedBlocks
|
||||||
|
|
||||||
ibdPeer *peerpkg.Peer
|
ibdPeer *peerpkg.Peer
|
||||||
ibdPeerMutex sync.RWMutex
|
ibdPeerMutex sync.RWMutex
|
||||||
@ -62,6 +58,10 @@ type FlowContext struct {
|
|||||||
orphans map[externalapi.DomainHash]*externalapi.DomainBlock
|
orphans map[externalapi.DomainHash]*externalapi.DomainBlock
|
||||||
orphansMutex sync.RWMutex
|
orphansMutex sync.RWMutex
|
||||||
|
|
||||||
|
transactionIDsToPropagate []*externalapi.DomainTransactionID
|
||||||
|
lastTransactionIDPropagationTime time.Time
|
||||||
|
transactionIDPropagationLock sync.Mutex
|
||||||
|
|
||||||
shutdownChan chan struct{}
|
shutdownChan chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,18 +70,19 @@ func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanage
|
|||||||
netAdapter *netadapter.NetAdapter, connectionManager *connmanager.ConnectionManager) *FlowContext {
|
netAdapter *netadapter.NetAdapter, connectionManager *connmanager.ConnectionManager) *FlowContext {
|
||||||
|
|
||||||
return &FlowContext{
|
return &FlowContext{
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
netAdapter: netAdapter,
|
netAdapter: netAdapter,
|
||||||
domain: domain,
|
domain: domain,
|
||||||
addressManager: addressManager,
|
addressManager: addressManager,
|
||||||
connectionManager: connectionManager,
|
connectionManager: connectionManager,
|
||||||
sharedRequestedTransactions: transactionrelay.NewSharedRequestedTransactions(),
|
sharedRequestedTransactions: NewSharedRequestedTransactions(),
|
||||||
sharedRequestedBlocks: blockrelay.NewSharedRequestedBlocks(),
|
sharedRequestedBlocks: NewSharedRequestedBlocks(),
|
||||||
peers: make(map[id.ID]*peerpkg.Peer),
|
peers: make(map[id.ID]*peerpkg.Peer),
|
||||||
transactionsToRebroadcast: make(map[externalapi.DomainTransactionID]*externalapi.DomainTransaction),
|
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
||||||
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
timeStarted: mstime.Now().UnixMilliseconds(),
|
||||||
timeStarted: mstime.Now().UnixMilliseconds(),
|
transactionIDsToPropagate: []*externalapi.DomainTransactionID{},
|
||||||
shutdownChan: make(chan struct{}),
|
lastTransactionIDPropagationTime: time.Now(),
|
||||||
|
shutdownChan: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -96,9 +97,14 @@ func (f *FlowContext) ShutdownChan() <-chan struct{} {
|
|||||||
return f.shutdownChan
|
return f.shutdownChan
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
// IsNearlySynced returns whether current consensus is considered synced or close to being synced.
|
||||||
func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
|
func (f *FlowContext) IsNearlySynced() (bool, error) {
|
||||||
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
|
return f.Domain().Consensus().IsNearlySynced()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplateHandler handler
|
||||||
|
func (f *FlowContext) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler OnNewBlockTemplateHandler) {
|
||||||
|
f.onNewBlockTemplateHandler = onNewBlockTemplateHandler
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
||||||
|
@ -72,3 +72,10 @@ func (f *FlowContext) Peers() []*peerpkg.Peer {
|
|||||||
}
|
}
|
||||||
return peers
|
return peers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasPeers returns whether there are currently active peers
|
||||||
|
func (f *FlowContext) HasPeers() bool {
|
||||||
|
f.peersMutex.RLock()
|
||||||
|
defer f.peersMutex.RUnlock()
|
||||||
|
return len(f.peers) > 0
|
||||||
|
}
|
||||||
|
@ -15,12 +15,6 @@ import (
|
|||||||
// on: 2^orphanResolutionRange * PHANTOM K.
|
// on: 2^orphanResolutionRange * PHANTOM K.
|
||||||
const maxOrphans = 600
|
const maxOrphans = 600
|
||||||
|
|
||||||
// UnorphaningResult is the result of unorphaning a block
|
|
||||||
type UnorphaningResult struct {
|
|
||||||
block *externalapi.DomainBlock
|
|
||||||
blockInsertionResult *externalapi.BlockInsertionResult
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddOrphan adds the block to the orphan set
|
// AddOrphan adds the block to the orphan set
|
||||||
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
|
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
|
||||||
f.orphansMutex.Lock()
|
f.orphansMutex.Lock()
|
||||||
@ -57,7 +51,7 @@ func (f *FlowContext) IsOrphan(blockHash *externalapi.DomainHash) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
|
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
|
||||||
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*UnorphaningResult, error) {
|
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*externalapi.DomainBlock, error) {
|
||||||
f.orphansMutex.Lock()
|
f.orphansMutex.Lock()
|
||||||
defer f.orphansMutex.Unlock()
|
defer f.orphansMutex.Unlock()
|
||||||
|
|
||||||
@ -66,17 +60,17 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
|||||||
rootBlockHash := consensushashing.BlockHash(rootBlock)
|
rootBlockHash := consensushashing.BlockHash(rootBlock)
|
||||||
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
|
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
|
||||||
|
|
||||||
var unorphaningResults []*UnorphaningResult
|
var unorphanedBlocks []*externalapi.DomainBlock
|
||||||
for len(processQueue) > 0 {
|
for len(processQueue) > 0 {
|
||||||
var orphanHash externalapi.DomainHash
|
var orphanHash externalapi.DomainHash
|
||||||
orphanHash, processQueue = processQueue[0], processQueue[1:]
|
orphanHash, processQueue = processQueue[0], processQueue[1:]
|
||||||
orphanBlock := f.orphans[orphanHash]
|
orphanBlock := f.orphans[orphanHash]
|
||||||
|
|
||||||
log.Debugf("Considering to unorphan block %s with parents %s",
|
log.Debugf("Considering to unorphan block %s with parents %s",
|
||||||
orphanHash, orphanBlock.Header.ParentHashes())
|
orphanHash, orphanBlock.Header.DirectParents())
|
||||||
|
|
||||||
canBeUnorphaned := true
|
canBeUnorphaned := true
|
||||||
for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
|
for _, orphanBlockParentHash := range orphanBlock.Header.DirectParents() {
|
||||||
orphanBlockParentInfo, err := f.domain.Consensus().GetBlockInfo(orphanBlockParentHash)
|
orphanBlockParentInfo, err := f.domain.Consensus().GetBlockInfo(orphanBlockParentHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -90,21 +84,18 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if canBeUnorphaned {
|
if canBeUnorphaned {
|
||||||
blockInsertionResult, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if unorphaningSucceeded {
|
if unorphaningSucceeded {
|
||||||
unorphaningResults = append(unorphaningResults, &UnorphaningResult{
|
unorphanedBlocks = append(unorphanedBlocks, orphanBlock)
|
||||||
block: orphanBlock,
|
|
||||||
blockInsertionResult: blockInsertionResult,
|
|
||||||
})
|
|
||||||
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
|
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return unorphaningResults, nil
|
return unorphanedBlocks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// addChildOrphansToProcessQueue finds all child orphans of `blockHash`
|
// addChildOrphansToProcessQueue finds all child orphans of `blockHash`
|
||||||
@ -133,7 +124,7 @@ func (f *FlowContext) addChildOrphansToProcessQueue(blockHash *externalapi.Domai
|
|||||||
func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash) []externalapi.DomainHash {
|
func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash) []externalapi.DomainHash {
|
||||||
var childOrphans []externalapi.DomainHash
|
var childOrphans []externalapi.DomainHash
|
||||||
for orphanHash, orphanBlock := range f.orphans {
|
for orphanHash, orphanBlock := range f.orphans {
|
||||||
for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
|
for _, orphanBlockParentHash := range orphanBlock.Header.DirectParents() {
|
||||||
if orphanBlockParentHash.Equal(blockHash) {
|
if orphanBlockParentHash.Equal(blockHash) {
|
||||||
childOrphans = append(childOrphans, orphanHash)
|
childOrphans = append(childOrphans, orphanHash)
|
||||||
break
|
break
|
||||||
@ -143,24 +134,24 @@ func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash)
|
|||||||
return childOrphans
|
return childOrphans
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.BlockInsertionResult, bool, error) {
|
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (bool, error) {
|
||||||
orphanBlock, ok := f.orphans[orphanHash]
|
orphanBlock, ok := f.orphans[orphanHash]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
return false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
||||||
}
|
}
|
||||||
delete(f.orphans, orphanHash)
|
delete(f.orphans, orphanHash)
|
||||||
|
|
||||||
blockInsertionResult, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock)
|
err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||||
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
|
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
|
||||||
return nil, false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return nil, false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Unorphaned block %s", orphanHash)
|
log.Infof("Unorphaned block %s", orphanHash)
|
||||||
return blockInsertionResult, true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
|
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
|
||||||
@ -201,7 +192,7 @@ func (f *FlowContext) GetOrphanRoots(orphan *externalapi.DomainHash) ([]*externa
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, parent := range block.Header.ParentHashes() {
|
for _, parent := range block.Header.DirectParents() {
|
||||||
if !addedToQueueSet.Contains(parent) {
|
if !addedToQueueSet.Contains(parent) {
|
||||||
queue = append(queue, parent)
|
queue = append(queue, parent)
|
||||||
addedToQueueSet.Add(parent)
|
addedToQueueSet.Add(parent)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
package blockrelay
|
package flowcontext
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
@ -13,13 +13,15 @@ type SharedRequestedBlocks struct {
|
|||||||
sync.Mutex
|
sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SharedRequestedBlocks) remove(hash *externalapi.DomainHash) {
|
// Remove removes a block from the set.
|
||||||
|
func (s *SharedRequestedBlocks) Remove(hash *externalapi.DomainHash) {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
delete(s.blocks, *hash)
|
delete(s.blocks, *hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SharedRequestedBlocks) removeSet(blockHashes map[externalapi.DomainHash]struct{}) {
|
// RemoveSet removes a set of blocks from the set.
|
||||||
|
func (s *SharedRequestedBlocks) RemoveSet(blockHashes map[externalapi.DomainHash]struct{}) {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
for hash := range blockHashes {
|
for hash := range blockHashes {
|
||||||
@ -27,7 +29,8 @@ func (s *SharedRequestedBlocks) removeSet(blockHashes map[externalapi.DomainHash
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SharedRequestedBlocks) addIfNotExists(hash *externalapi.DomainHash) (exists bool) {
|
// AddIfNotExists adds a block to the set if it doesn't exist yet.
|
||||||
|
func (s *SharedRequestedBlocks) AddIfNotExists(hash *externalapi.DomainHash) (exists bool) {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
_, ok := s.blocks[*hash]
|
_, ok := s.blocks[*hash]
|
@ -1,4 +1,4 @@
|
|||||||
package transactionrelay
|
package flowcontext
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
@ -13,13 +13,15 @@ type SharedRequestedTransactions struct {
|
|||||||
sync.Mutex
|
sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SharedRequestedTransactions) remove(txID *externalapi.DomainTransactionID) {
|
// Remove removes a transaction from the set.
|
||||||
|
func (s *SharedRequestedTransactions) Remove(txID *externalapi.DomainTransactionID) {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
delete(s.transactions, *txID)
|
delete(s.transactions, *txID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SharedRequestedTransactions) removeMany(txIDs []*externalapi.DomainTransactionID) {
|
// RemoveMany removes a set of transactions from the set.
|
||||||
|
func (s *SharedRequestedTransactions) RemoveMany(txIDs []*externalapi.DomainTransactionID) {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
for _, txID := range txIDs {
|
for _, txID := range txIDs {
|
||||||
@ -27,7 +29,8 @@ func (s *SharedRequestedTransactions) removeMany(txIDs []*externalapi.DomainTran
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SharedRequestedTransactions) addIfNotExists(txID *externalapi.DomainTransactionID) (exists bool) {
|
// AddIfNotExists adds a transaction to the set if it doesn't exist yet.
|
||||||
|
func (s *SharedRequestedTransactions) AddIfNotExists(txID *externalapi.DomainTransactionID) (exists bool) {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
_, ok := s.transactions[*txID]
|
_, ok := s.transactions[*txID]
|
@ -1,43 +0,0 @@
|
|||||||
package flowcontext
|
|
||||||
|
|
||||||
import "github.com/kaspanet/kaspad/util/mstime"
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
|
|
||||||
)
|
|
||||||
|
|
||||||
// ShouldMine returns whether it's ok to use block template from this node
|
|
||||||
// for mining purposes.
|
|
||||||
func (f *FlowContext) ShouldMine() (bool, error) {
|
|
||||||
peers := f.Peers()
|
|
||||||
if len(peers) == 0 {
|
|
||||||
log.Debugf("The node is not connected, so ShouldMine returns false")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.IsIBDRunning() {
|
|
||||||
log.Debugf("IBD is running, so ShouldMine returns false")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
virtualSelectedParent, err := f.domain.Consensus().GetVirtualSelectedParent()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
now := mstime.Now().UnixMilliseconds()
|
|
||||||
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
|
|
||||||
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
|
|
||||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
|
|
||||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
|
||||||
return false, nil
|
|
||||||
}
|
|
@ -4,39 +4,22 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AddTransaction adds transaction to the mempool and propagates it.
|
// TransactionIDPropagationInterval is the interval between transaction IDs propagations
|
||||||
func (f *FlowContext) AddTransaction(tx *externalapi.DomainTransaction) error {
|
const TransactionIDPropagationInterval = 500 * time.Millisecond
|
||||||
f.transactionsToRebroadcastLock.Lock()
|
|
||||||
defer f.transactionsToRebroadcastLock.Unlock()
|
|
||||||
|
|
||||||
err := f.Domain().MiningManager().ValidateAndInsertTransaction(tx, false)
|
// AddTransaction adds transaction to the mempool and propagates it.
|
||||||
|
func (f *FlowContext) AddTransaction(tx *externalapi.DomainTransaction, allowOrphan bool) error {
|
||||||
|
acceptedTransactions, err := f.Domain().MiningManager().ValidateAndInsertTransaction(tx, true, allowOrphan)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
transactionID := consensushashing.TransactionID(tx)
|
acceptedTransactionIDs := consensushashing.TransactionIDs(acceptedTransactions)
|
||||||
f.transactionsToRebroadcast[*transactionID] = tx
|
return f.EnqueueTransactionIDsForPropagation(acceptedTransactionIDs)
|
||||||
inv := appmessage.NewMsgInvTransaction([]*externalapi.DomainTransactionID{transactionID})
|
|
||||||
return f.Broadcast(inv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FlowContext) updateTransactionsToRebroadcast(addedBlocks []*externalapi.DomainBlock) {
|
|
||||||
f.transactionsToRebroadcastLock.Lock()
|
|
||||||
defer f.transactionsToRebroadcastLock.Unlock()
|
|
||||||
|
|
||||||
for _, block := range addedBlocks {
|
|
||||||
// Note: if a transaction is included in the DAG but not accepted,
|
|
||||||
// it won't be rebroadcast anymore, although it is not included in
|
|
||||||
// the UTXO set
|
|
||||||
for _, tx := range block.Transactions {
|
|
||||||
delete(f.transactionsToRebroadcast, *consensushashing.TransactionID(tx))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FlowContext) shouldRebroadcastTransactions() bool {
|
func (f *FlowContext) shouldRebroadcastTransactions() bool {
|
||||||
@ -44,22 +27,9 @@ func (f *FlowContext) shouldRebroadcastTransactions() bool {
|
|||||||
return time.Since(f.lastRebroadcastTime) > rebroadcastInterval
|
return time.Since(f.lastRebroadcastTime) > rebroadcastInterval
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FlowContext) txIDsToRebroadcast() []*externalapi.DomainTransactionID {
|
|
||||||
f.transactionsToRebroadcastLock.Lock()
|
|
||||||
defer f.transactionsToRebroadcastLock.Unlock()
|
|
||||||
|
|
||||||
txIDs := make([]*externalapi.DomainTransactionID, len(f.transactionsToRebroadcast))
|
|
||||||
i := 0
|
|
||||||
for _, tx := range f.transactionsToRebroadcast {
|
|
||||||
txIDs[i] = consensushashing.TransactionID(tx)
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return txIDs
|
|
||||||
}
|
|
||||||
|
|
||||||
// SharedRequestedTransactions returns a *transactionrelay.SharedRequestedTransactions for sharing
|
// SharedRequestedTransactions returns a *transactionrelay.SharedRequestedTransactions for sharing
|
||||||
// data about requested transactions between different peers.
|
// data about requested transactions between different peers.
|
||||||
func (f *FlowContext) SharedRequestedTransactions() *transactionrelay.SharedRequestedTransactions {
|
func (f *FlowContext) SharedRequestedTransactions() *SharedRequestedTransactions {
|
||||||
return f.sharedRequestedTransactions
|
return f.sharedRequestedTransactions
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,3 +40,42 @@ func (f *FlowContext) OnTransactionAddedToMempool() {
|
|||||||
f.onTransactionAddedToMempoolHandler()
|
f.onTransactionAddedToMempoolHandler()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnqueueTransactionIDsForPropagation add the given transactions IDs to a set of IDs to
|
||||||
|
// propagate. The IDs will be broadcast to all peers within a single transaction Inv message.
|
||||||
|
// The broadcast itself may happen only during a subsequent call to this method
|
||||||
|
func (f *FlowContext) EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error {
|
||||||
|
f.transactionIDPropagationLock.Lock()
|
||||||
|
defer f.transactionIDPropagationLock.Unlock()
|
||||||
|
|
||||||
|
f.transactionIDsToPropagate = append(f.transactionIDsToPropagate, transactionIDs...)
|
||||||
|
|
||||||
|
return f.maybePropagateTransactions()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FlowContext) maybePropagateTransactions() error {
|
||||||
|
if time.Since(f.lastTransactionIDPropagationTime) < TransactionIDPropagationInterval &&
|
||||||
|
len(f.transactionIDsToPropagate) < appmessage.MaxInvPerTxInvMsg {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(f.transactionIDsToPropagate) > 0 {
|
||||||
|
transactionIDsToBroadcast := f.transactionIDsToPropagate
|
||||||
|
if len(transactionIDsToBroadcast) > appmessage.MaxInvPerTxInvMsg {
|
||||||
|
transactionIDsToBroadcast = f.transactionIDsToPropagate[:len(transactionIDsToBroadcast)]
|
||||||
|
}
|
||||||
|
log.Debugf("Transaction propagation: broadcasting %d transactions", len(transactionIDsToBroadcast))
|
||||||
|
|
||||||
|
inv := appmessage.NewMsgInvTransaction(transactionIDsToBroadcast)
|
||||||
|
err := f.Broadcast(inv)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f.transactionIDsToPropagate = f.transactionIDsToPropagate[len(transactionIDsToBroadcast):]
|
||||||
|
}
|
||||||
|
|
||||||
|
f.lastTransactionIDPropagationTime = time.Now()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -1,29 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) sendGetBlockLocator(lowHash *externalapi.DomainHash,
|
|
||||||
highHash *externalapi.DomainHash, limit uint32) error {
|
|
||||||
|
|
||||||
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(lowHash, highHash, limit)
|
|
||||||
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
|
|
||||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
msgBlockLocator, ok := message.(*appmessage.MsgBlockLocator)
|
|
||||||
if !ok {
|
|
||||||
return nil,
|
|
||||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
|
||||||
}
|
|
||||||
return msgBlockLocator.BlockLocatorHashes, nil
|
|
||||||
}
|
|
@ -1,51 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HandlePruningPointHashRequestsFlowContext is the interface for the context needed for the handlePruningPointHashRequestsFlow flow.
|
|
||||||
type HandlePruningPointHashRequestsFlowContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
type handlePruningPointHashRequestsFlow struct {
|
|
||||||
HandlePruningPointHashRequestsFlowContext
|
|
||||||
incomingRoute, outgoingRoute *router.Route
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlePruningPointHashRequests listens to appmessage.MsgRequestPruningPointHashMessage messages and sends
|
|
||||||
// the pruning point hash as response.
|
|
||||||
func HandlePruningPointHashRequests(context HandlePruningPointHashRequestsFlowContext, incomingRoute,
|
|
||||||
outgoingRoute *router.Route) error {
|
|
||||||
flow := &handlePruningPointHashRequestsFlow{
|
|
||||||
HandlePruningPointHashRequestsFlowContext: context,
|
|
||||||
incomingRoute: incomingRoute,
|
|
||||||
outgoingRoute: outgoingRoute,
|
|
||||||
}
|
|
||||||
|
|
||||||
return flow.start()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handlePruningPointHashRequestsFlow) start() error {
|
|
||||||
for {
|
|
||||||
_, err := flow.incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("Got request for a pruning point hash")
|
|
||||||
|
|
||||||
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.outgoingRoute.Enqueue(appmessage.NewPruningPointHashMessage(pruningPoint))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("Sent pruning point hash %s", pruningPoint)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,144 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HandleRequestPruningPointUTXOSetAndBlockContext is the interface for the context needed for the HandleRequestPruningPointUTXOSetAndBlock flow.
|
|
||||||
type HandleRequestPruningPointUTXOSetAndBlockContext interface {
|
|
||||||
Domain() domain.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
type handleRequestPruningPointUTXOSetAndBlockFlow struct {
|
|
||||||
HandleRequestPruningPointUTXOSetAndBlockContext
|
|
||||||
incomingRoute, outgoingRoute *router.Route
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleRequestPruningPointUTXOSetAndBlock listens to appmessage.MsgRequestPruningPointUTXOSetAndBlock messages and sends
|
|
||||||
// the pruning point UTXO set and block body.
|
|
||||||
func HandleRequestPruningPointUTXOSetAndBlock(context HandleRequestPruningPointUTXOSetAndBlockContext, incomingRoute,
|
|
||||||
outgoingRoute *router.Route) error {
|
|
||||||
flow := &handleRequestPruningPointUTXOSetAndBlockFlow{
|
|
||||||
HandleRequestPruningPointUTXOSetAndBlockContext: context,
|
|
||||||
incomingRoute: incomingRoute,
|
|
||||||
outgoingRoute: outgoingRoute,
|
|
||||||
}
|
|
||||||
|
|
||||||
return flow.start()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) start() error {
|
|
||||||
for {
|
|
||||||
msgRequestPruningPointUTXOSetAndBlock, err := flow.waitForRequestPruningPointUTXOSetAndBlockMessages()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.handleRequestPruningPointUTXOSetAndBlockMessage(msgRequestPruningPointUTXOSetAndBlock)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) handleRequestPruningPointUTXOSetAndBlockMessage(
|
|
||||||
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
|
|
||||||
|
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetAndBlockFlow")
|
|
||||||
defer onEnd()
|
|
||||||
|
|
||||||
log.Debugf("Got request for PruningPointHash UTXOSet and Block")
|
|
||||||
|
|
||||||
err := flow.sendPruningPointBlock(msgRequestPruningPointUTXOSetAndBlock)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSetAndBlock)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) waitForRequestPruningPointUTXOSetAndBlockMessages() (
|
|
||||||
*appmessage.MsgRequestPruningPointUTXOSetAndBlock, error) {
|
|
||||||
|
|
||||||
message, err := flow.incomingRoute.Dequeue()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
msgRequestPruningPointUTXOSetAndBlock, ok := message.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
|
||||||
if !ok {
|
|
||||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSetAndBlock, message.Command())
|
|
||||||
}
|
|
||||||
return msgRequestPruningPointUTXOSetAndBlock, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) sendPruningPointBlock(
|
|
||||||
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
|
|
||||||
|
|
||||||
block, err := flow.Domain().Consensus().GetBlock(msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("Retrieved pruning block %s", msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
|
|
||||||
|
|
||||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(block)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) sendPruningPointUTXOSet(
|
|
||||||
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
|
|
||||||
|
|
||||||
// Send the UTXO set in `step`-sized chunks
|
|
||||||
const step = 1000
|
|
||||||
var fromOutpoint *externalapi.DomainOutpoint
|
|
||||||
chunksSent := 0
|
|
||||||
for {
|
|
||||||
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
|
|
||||||
msgRequestPruningPointUTXOSetAndBlock.PruningPointHash, fromOutpoint, step)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
|
|
||||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("Retrieved %d UTXOs for pruning block %s",
|
|
||||||
len(pruningPointUTXOs), msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
|
|
||||||
|
|
||||||
outpointAndUTXOEntryPairs :=
|
|
||||||
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
|
|
||||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(pruningPointUTXOs) < step {
|
|
||||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
|
||||||
msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
|
|
||||||
|
|
||||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
|
||||||
}
|
|
||||||
|
|
||||||
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
|
|
||||||
chunksSent++
|
|
||||||
|
|
||||||
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
|
|
||||||
if chunksSent%ibdBatchSize == 0 {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
|
|
||||||
if !ok {
|
|
||||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,577 +0,0 @@
|
|||||||
package blockrelay
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.DomainHash) error {
|
|
||||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
|
||||||
if !wasIBDNotRunning {
|
|
||||||
log.Debugf("IBD is already running")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
defer flow.UnsetIBDRunning()
|
|
||||||
|
|
||||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
|
||||||
|
|
||||||
log.Debugf("Syncing headers up to %s", highHash)
|
|
||||||
headersSynced, err := flow.syncHeaders(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !headersSynced {
|
|
||||||
log.Debugf("Aborting IBD because the headers failed to sync")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
log.Debugf("Finished syncing headers up to %s", highHash)
|
|
||||||
|
|
||||||
log.Debugf("Syncing the current pruning point UTXO set")
|
|
||||||
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !syncedPruningPointUTXOSetSuccessfully {
|
|
||||||
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
log.Debugf("Finished syncing the current pruning point UTXO set")
|
|
||||||
|
|
||||||
log.Debugf("Downloading block bodies up to %s", highHash)
|
|
||||||
err = flow.syncMissingBlockBodies(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("Finished downloading block bodies up to %s", highHash)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// syncHeaders attempts to sync headers from the peer. This method may fail
|
|
||||||
// because the peer and us have conflicting pruning points. In that case we
|
|
||||||
// return (false, nil) so that we may stop IBD gracefully.
|
|
||||||
func (flow *handleRelayInvsFlow) syncHeaders(highHash *externalapi.DomainHash) (bool, error) {
|
|
||||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
|
||||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if !highestSharedBlockFound {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
|
||||||
|
|
||||||
err = flow.downloadHeaders(highestSharedBlockHash, highHash)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the highHash has not been received, the peer is misbehaving
|
|
||||||
highHashBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if !highHashBlockInfo.Exists {
|
|
||||||
return false, protocolerrors.Errorf(true, "did not receive "+
|
|
||||||
"highHash header %s from peer %s during header download", highHash, flow.peer)
|
|
||||||
}
|
|
||||||
log.Debugf("Headers downloaded from peer %s", flow.peer)
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
|
||||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
|
||||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
|
||||||
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(
|
|
||||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
|
||||||
|
|
||||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
|
||||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
if !highestHashFound {
|
|
||||||
return nil, false, nil
|
|
||||||
}
|
|
||||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if highestHashIndex == 0 ||
|
|
||||||
// If the block locator contains only two adjacent chain blocks, the
|
|
||||||
// syncer will always find the same highest chain block, so to avoid
|
|
||||||
// an endless loop, we explicitly stop the loop in such situation.
|
|
||||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
|
||||||
|
|
||||||
return highestHash, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
locatorHashAboveHighestHash := highestHash
|
|
||||||
if highestHashIndex > 0 {
|
|
||||||
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
|
||||||
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
|
|
||||||
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
|
|
||||||
"restarting with full block locator")
|
|
||||||
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return blockLocator, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) findHighestHashIndex(
|
|
||||||
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
|
|
||||||
|
|
||||||
highestHashIndex := 0
|
|
||||||
highestHashIndexFound := false
|
|
||||||
for i, blockLocatorHash := range blockLocator {
|
|
||||||
if highestHash.Equal(blockLocatorHash) {
|
|
||||||
highestHashIndex = i
|
|
||||||
highestHashIndexFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !highestHashIndexFound {
|
|
||||||
return 0, protocolerrors.Errorf(true, "highest hash %s "+
|
|
||||||
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
|
|
||||||
}
|
|
||||||
log.Debugf("The index of the highest hash in the original "+
|
|
||||||
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
|
|
||||||
|
|
||||||
return highestHashIndex, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
|
||||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
|
||||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
|
||||||
func (flow *handleRelayInvsFlow) fetchHighestHash(
|
|
||||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
|
||||||
|
|
||||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
|
||||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
switch message := message.(type) {
|
|
||||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
|
||||||
highestHash := message.HighestHash
|
|
||||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
|
||||||
|
|
||||||
return highestHash, true, nil
|
|
||||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
|
||||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
|
||||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
|
||||||
return nil, false, nil
|
|
||||||
default:
|
|
||||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
|
||||||
highHash *externalapi.DomainHash) error {
|
|
||||||
|
|
||||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep a short queue of blockHeadersMessages so that there's
|
|
||||||
// never a moment when the node is not validating and inserting
|
|
||||||
// headers
|
|
||||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
|
||||||
errChan := make(chan error)
|
|
||||||
spawn("handleRelayInvsFlow-downloadHeaders", func() {
|
|
||||||
for {
|
|
||||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if doneIBD {
|
|
||||||
close(blockHeadersMessageChan)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
blockHeadersMessageChan <- blockHeadersMessage
|
|
||||||
|
|
||||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case blockHeadersMessage, ok := <-blockHeadersMessageChan:
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, header := range blockHeadersMessage.BlockHeaders {
|
|
||||||
err = flow.processHeader(header)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case err := <-errChan:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
|
||||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
|
||||||
|
|
||||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
|
||||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneIBD bool, err error) {
|
|
||||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
switch message := message.(type) {
|
|
||||||
case *appmessage.BlockHeadersMessage:
|
|
||||||
return message, false, nil
|
|
||||||
case *appmessage.MsgDoneHeaders:
|
|
||||||
return nil, true, nil
|
|
||||||
default:
|
|
||||||
return nil, false,
|
|
||||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s or %s, got: %s", appmessage.CmdHeader, appmessage.CmdDoneHeaders, message.Command())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) processHeader(msgBlockHeader *appmessage.MsgBlockHeader) error {
|
|
||||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
|
||||||
block := &externalapi.DomainBlock{
|
|
||||||
Header: header,
|
|
||||||
Transactions: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
blockHash := consensushashing.BlockHash(block)
|
|
||||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if blockInfo.Exists {
|
|
||||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
_, err = flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
|
||||||
if err != nil {
|
|
||||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
|
||||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
|
||||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
|
||||||
} else {
|
|
||||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
|
||||||
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) syncPruningPointUTXOSet() (bool, error) {
|
|
||||||
log.Debugf("Checking if a new pruning point is available")
|
|
||||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointHashMessage())
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
msgPruningPointHash, ok := message.(*appmessage.MsgPruningPointHashMessage)
|
|
||||||
if !ok {
|
|
||||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdPruningPointHash, message.Command())
|
|
||||||
}
|
|
||||||
|
|
||||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(msgPruningPointHash.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !blockInfo.Exists {
|
|
||||||
return false, errors.Errorf("The pruning point header is missing")
|
|
||||||
}
|
|
||||||
|
|
||||||
if blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
|
||||||
log.Debugf("Already has the block data of the new suggested pruning point %s", msgPruningPointHash.Hash)
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", msgPruningPointHash.Hash)
|
|
||||||
isValid, err := flow.Domain().Consensus().IsValidPruningPoint(msgPruningPointHash.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isValid {
|
|
||||||
log.Infof("The suggested pruning point %s is incompatible to this node DAG, so stopping IBD with this"+
|
|
||||||
" peer", msgPruningPointHash.Hash)
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Fetching the pruning point UTXO set")
|
|
||||||
succeed, err := flow.fetchMissingUTXOSet(msgPruningPointHash.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !succeed {
|
|
||||||
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Fetched the new pruning point UTXO set")
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) fetchMissingUTXOSet(pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
|
|
||||||
defer func() {
|
|
||||||
err := flow.Domain().Consensus().ClearImportedPruningPointData()
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSetAndBlock(pruningPointHash))
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
block, err := flow.receivePruningPointBlock()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(pruningPointHash)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if !receivedAll {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.Domain().Consensus().ValidateAndInsertImportedPruningPoint(block)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Find a better way to deal with finality conflicts.
|
|
||||||
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.OnPruningPointUTXOSetOverride()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) receivePruningPointBlock() (*externalapi.DomainBlock, error) {
|
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receivePruningPointBlock")
|
|
||||||
defer onEnd()
|
|
||||||
|
|
||||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ibdBlockMessage, ok := message.(*appmessage.MsgIBDBlock)
|
|
||||||
if !ok {
|
|
||||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
|
||||||
}
|
|
||||||
block := appmessage.MsgBlockToDomainBlock(ibdBlockMessage.MsgBlock)
|
|
||||||
|
|
||||||
log.Debugf("Received pruning point block %s", consensushashing.BlockHash(block))
|
|
||||||
|
|
||||||
return block, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) receiveAndInsertPruningPointUTXOSet(
|
|
||||||
pruningPointHash *externalapi.DomainHash) (bool, error) {
|
|
||||||
|
|
||||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
|
||||||
defer onEnd()
|
|
||||||
|
|
||||||
receivedChunkCount := 0
|
|
||||||
receivedUTXOCount := 0
|
|
||||||
for {
|
|
||||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch message := message.(type) {
|
|
||||||
case *appmessage.MsgPruningPointUTXOSetChunk:
|
|
||||||
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
|
||||||
domainOutpointAndUTXOEntryPairs :=
|
|
||||||
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
|
||||||
|
|
||||||
err := flow.Domain().Consensus().AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
receivedChunkCount++
|
|
||||||
if receivedChunkCount%ibdBatchSize == 0 {
|
|
||||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
|
||||||
receivedChunkCount, receivedUTXOCount)
|
|
||||||
|
|
||||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
|
||||||
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
|
||||||
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
|
||||||
return true, nil
|
|
||||||
|
|
||||||
case *appmessage.MsgUnexpectedPruningPoint:
|
|
||||||
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
|
|
||||||
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
|
||||||
return false, nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
|
||||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
|
||||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(hashes) == 0 {
|
|
||||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
|
||||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
|
||||||
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
|
||||||
log.Debugf("No missing block body hashes found.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
|
||||||
var hashesToRequest []*externalapi.DomainHash
|
|
||||||
if offset+ibdBatchSize < len(hashes) {
|
|
||||||
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
|
||||||
} else {
|
|
||||||
hashesToRequest = hashes[offset:]
|
|
||||||
}
|
|
||||||
|
|
||||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, expectedHash := range hashesToRequest {
|
|
||||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
|
||||||
if !ok {
|
|
||||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
|
||||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
|
||||||
}
|
|
||||||
|
|
||||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
|
||||||
blockHash := consensushashing.BlockHash(block)
|
|
||||||
if !expectedHash.Equal(blockHash) {
|
|
||||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = flow.banIfBlockIsHeaderOnly(block)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
|
||||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
|
||||||
}
|
|
||||||
err = flow.OnNewBlock(block, blockInsertionResult)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// dequeueIncomingMessageAndSkipInvs is a convenience method to be used during
|
|
||||||
// IBD. Inv messages are expected to arrive at any given moment, but should be
|
|
||||||
// ignored while we're in IBD
|
|
||||||
func (flow *handleRelayInvsFlow) dequeueIncomingMessageAndSkipInvs(timeout time.Duration) (appmessage.Message, error) {
|
|
||||||
for {
|
|
||||||
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if _, ok := message.(*appmessage.MsgInvRelayBlock); !ok {
|
|
||||||
return message, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -28,7 +28,7 @@ type HandleHandshakeContext interface {
|
|||||||
HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error)
|
HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleHandshake sets up the handshake protocol - It sends a version message and waits for an incoming
|
// HandleHandshake sets up the new_handshake protocol - It sends a version message and waits for an incoming
|
||||||
// version message, as well as a verack for the sent version
|
// version message, as well as a verack for the sent version
|
||||||
func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.NetConnection,
|
func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.NetConnection,
|
||||||
receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
|
receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
|
||||||
@ -98,7 +98,7 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Handshake is different from other flows, since in it should forward router.ErrRouteClosed to errChan
|
// Handshake is different from other flows, since in it should forward router.ErrRouteClosed to errChan
|
||||||
// Therefore we implement a separate handleError for handshake
|
// Therefore we implement a separate handleError for new_handshake
|
||||||
func handleError(err error, flowName string, isStopping *uint32, errChan chan error) {
|
func handleError(err error, flowName string, isStopping *uint32, errChan chan error) {
|
||||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -17,7 +18,9 @@ var (
|
|||||||
|
|
||||||
// minAcceptableProtocolVersion is the lowest protocol version that a
|
// minAcceptableProtocolVersion is the lowest protocol version that a
|
||||||
// connected peer may support.
|
// connected peer may support.
|
||||||
minAcceptableProtocolVersion = appmessage.ProtocolVersion
|
minAcceptableProtocolVersion = uint32(5)
|
||||||
|
|
||||||
|
maxAcceptableProtocolVersion = uint32(5)
|
||||||
)
|
)
|
||||||
|
|
||||||
type receiveVersionFlow struct {
|
type receiveVersionFlow struct {
|
||||||
@ -97,7 +100,12 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
|
|||||||
return nil, protocolerrors.New(false, "incompatible subnetworks")
|
return nil, protocolerrors.New(false, "incompatible subnetworks")
|
||||||
}
|
}
|
||||||
|
|
||||||
flow.peer.UpdateFieldsFromMsgVersion(msgVersion)
|
if flow.Config().ProtocolVersion > maxAcceptableProtocolVersion {
|
||||||
|
return nil, errors.Errorf("%d is a non existing protocol version", flow.Config().ProtocolVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
maxProtocolVersion := flow.Config().ProtocolVersion
|
||||||
|
flow.peer.UpdateFieldsFromMsgVersion(msgVersion, maxProtocolVersion)
|
||||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgVerAck())
|
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgVerAck())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
"github.com/kaspanet/kaspad/version"
|
"github.com/kaspanet/kaspad/version"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -56,15 +57,18 @@ func (flow *sendVersionFlow) start() error {
|
|||||||
// Version message.
|
// Version message.
|
||||||
localAddress := flow.AddressManager().BestLocalAddress(flow.peer.Connection().NetAddress())
|
localAddress := flow.AddressManager().BestLocalAddress(flow.peer.Connection().NetAddress())
|
||||||
subnetworkID := flow.Config().SubnetworkID
|
subnetworkID := flow.Config().SubnetworkID
|
||||||
|
if flow.Config().ProtocolVersion < minAcceptableProtocolVersion {
|
||||||
|
return errors.Errorf("configured protocol version %d is obsolete", flow.Config().ProtocolVersion)
|
||||||
|
}
|
||||||
msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(),
|
msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(),
|
||||||
flow.Config().ActiveNetParams.Name, subnetworkID)
|
flow.Config().ActiveNetParams.Name, subnetworkID, flow.Config().ProtocolVersion)
|
||||||
msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...)
|
msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...)
|
||||||
|
|
||||||
// Advertise the services flag
|
// Advertise the services flag
|
||||||
msg.Services = defaultServices
|
msg.Services = defaultServices
|
||||||
|
|
||||||
// Advertise our max supported protocol version.
|
// Advertise our max supported protocol version.
|
||||||
msg.ProtocolVersion = appmessage.ProtocolVersion
|
msg.ProtocolVersion = flow.Config().ProtocolVersion
|
||||||
|
|
||||||
// Advertise if inv messages for transactions are desired.
|
// Advertise if inv messages for transactions are desired.
|
||||||
msg.DisableRelayTx = flow.Config().BlocksOnly
|
msg.DisableRelayTx = flow.Config().BlocksOnly
|
||||||
|
9
app/protocol/flows/ready/log.go
Normal file
9
app/protocol/flows/ready/log.go
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
package ready
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
|
"github.com/kaspanet/kaspad/util/panics"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logger.RegisterSubSystem("PROT")
|
||||||
|
var spawn = panics.GoroutineWrapperFunc(log)
|
56
app/protocol/flows/ready/ready.go
Normal file
56
app/protocol/flows/ready/ready.go
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
package ready
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HandleReady notify the other peer that peer is ready for messages, and wait for the other peer
|
||||||
|
// to send a ready message before start running the flows.
|
||||||
|
func HandleReady(incomingRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
|
||||||
|
peer *peerpkg.Peer,
|
||||||
|
) error {
|
||||||
|
|
||||||
|
log.Debugf("Sending ready message to %s", peer)
|
||||||
|
|
||||||
|
isStopping := uint32(0)
|
||||||
|
err := outgoingRoute.Enqueue(appmessage.NewMsgReady())
|
||||||
|
if err != nil {
|
||||||
|
return handleError(err, "HandleReady", &isStopping)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return handleError(err, "HandleReady", &isStopping)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Got ready message from %s", peer)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ready is different from other flows, since in it should forward router.ErrRouteClosed to errChan
|
||||||
|
// Therefore we implement a separate handleError for 'ready'
|
||||||
|
func handleError(err error, flowName string, isStopping *uint32) error {
|
||||||
|
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||||
|
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||||
|
log.Errorf("Ready protocol error from %s: %s", flowName, err)
|
||||||
|
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
panic(err)
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
16
app/protocol/flows/v5/blockrelay/batch_size_test.go
Normal file
16
app/protocol/flows/v5/blockrelay/batch_size_test.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
|
||||||
|
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
|
||||||
|
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
|
||||||
|
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
|
||||||
|
if ibdBatchSize >= router.DefaultMaxMessages {
|
||||||
|
t.Fatalf("IBD batch size (%d) must be smaller than router.DefaultMaxMessages (%d)",
|
||||||
|
ibdBatchSize, router.DefaultMaxMessages)
|
||||||
|
}
|
||||||
|
}
|
33
app/protocol/flows/v5/blockrelay/block_locator.go
Normal file
33
app/protocol/flows/v5/blockrelay/block_locator.go
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.DomainHash, limit uint32) error {
|
||||||
|
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, limit)
|
||||||
|
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
|
||||||
|
for {
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch message := message.(type) {
|
||||||
|
case *appmessage.MsgInvRelayBlock:
|
||||||
|
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||||
|
case *appmessage.MsgBlockLocator:
|
||||||
|
return message.BlockLocatorHashes, nil
|
||||||
|
default:
|
||||||
|
return nil,
|
||||||
|
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -33,7 +33,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !blockInfo.Exists {
|
if !blockInfo.HasHeader() {
|
||||||
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
|
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
|
||||||
"with an unknown targetHash %s", targetHash)
|
"with an unknown targetHash %s", targetHash)
|
||||||
}
|
}
|
||||||
@ -44,7 +44,9 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !blockInfo.Exists {
|
|
||||||
|
// The IBD block locator is checking only existing blocks with bodies.
|
||||||
|
if !blockInfo.HasBody() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -4,7 +4,6 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@ -28,18 +27,15 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
|||||||
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
|
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
|
||||||
for i, hash := range msgRequestIBDBlocks.Hashes {
|
for i, hash := range msgRequestIBDBlocks.Hashes {
|
||||||
// Fetch the block from the database.
|
// Fetch the block from the database.
|
||||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
block, found, err := context.Domain().Consensus().GetBlock(hash)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
|
||||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
|
||||||
}
|
|
||||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return protocolerrors.Errorf(false, "IBD block %s not found", hash)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO (Partial nodes): Convert block to partial block if needed
|
// TODO (Partial nodes): Convert block to partial block if needed
|
||||||
|
|
||||||
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
||||||
@ -48,7 +44,7 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Debugf("sent %d out of %d", i, len(msgRequestIBDBlocks.Hashes))
|
log.Debugf("sent %d out of %d", i+1, len(msgRequestIBDBlocks.Hashes))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -0,0 +1,85 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RequestIBDChainBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
|
||||||
|
type RequestIBDChainBlockLocatorContext interface {
|
||||||
|
Domain() domain.Domain
|
||||||
|
}
|
||||||
|
|
||||||
|
type handleRequestIBDChainBlockLocatorFlow struct {
|
||||||
|
RequestIBDChainBlockLocatorContext
|
||||||
|
incomingRoute, outgoingRoute *router.Route
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleRequestIBDChainBlockLocator handles getBlockLocator messages
|
||||||
|
func HandleRequestIBDChainBlockLocator(context RequestIBDChainBlockLocatorContext, incomingRoute *router.Route,
|
||||||
|
outgoingRoute *router.Route) error {
|
||||||
|
|
||||||
|
flow := &handleRequestIBDChainBlockLocatorFlow{
|
||||||
|
RequestIBDChainBlockLocatorContext: context,
|
||||||
|
incomingRoute: incomingRoute,
|
||||||
|
outgoingRoute: outgoingRoute,
|
||||||
|
}
|
||||||
|
return flow.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestIBDChainBlockLocatorFlow) start() error {
|
||||||
|
for {
|
||||||
|
highHash, lowHash, err := flow.receiveRequestIBDChainBlockLocator()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("Received getIBDChainBlockLocator with highHash: %s, lowHash: %s", highHash, lowHash)
|
||||||
|
|
||||||
|
var locator externalapi.BlockLocator
|
||||||
|
if highHash == nil || lowHash == nil {
|
||||||
|
locator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||||
|
} else {
|
||||||
|
locator, err = flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||||
|
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||||
|
// The chain has been modified, signal it by sending an empty locator
|
||||||
|
locator, err = externalapi.BlockLocator{}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("Received error from CreateHeadersSelectedChainBlockLocator: %s", err)
|
||||||
|
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||||
|
"locator between %s and %s", lowHash, highHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.sendIBDChainBlockLocator(locator)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestIBDChainBlockLocatorFlow) receiveRequestIBDChainBlockLocator() (highHash, lowHash *externalapi.DomainHash, err error) {
|
||||||
|
|
||||||
|
message, err := flow.incomingRoute.Dequeue()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
msgGetBlockLocator := message.(*appmessage.MsgRequestIBDChainBlockLocator)
|
||||||
|
|
||||||
|
return msgGetBlockLocator.HighHash, msgGetBlockLocator.LowHash, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestIBDChainBlockLocatorFlow) sendIBDChainBlockLocator(locator externalapi.BlockLocator) error {
|
||||||
|
msgIBDChainBlockLocator := appmessage.NewMsgIBDChainBlockLocator(locator)
|
||||||
|
err := flow.outgoingRoute.Enqueue(msgIBDChainBlockLocator)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,162 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
|
||||||
|
type PruningPointAndItsAnticoneRequestsContext interface {
|
||||||
|
Domain() domain.Domain
|
||||||
|
Config() *config.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
var isBusy uint32
|
||||||
|
|
||||||
|
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
|
||||||
|
// the pruning point and its anticone to the requesting peer.
|
||||||
|
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
|
||||||
|
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||||
|
|
||||||
|
for {
|
||||||
|
err := func() error {
|
||||||
|
_, err := incomingRoute.Dequeue()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
|
||||||
|
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
|
||||||
|
}
|
||||||
|
defer atomic.StoreUint32(&isBusy, 0)
|
||||||
|
|
||||||
|
log.Debugf("Got request for pruning point and its anticone from %s", peer)
|
||||||
|
|
||||||
|
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
|
||||||
|
for i, header := range pruningPointHeaders {
|
||||||
|
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
windowSize := context.Config().NetParams().DifficultyAdjustmentWindowSize
|
||||||
|
daaWindowBlocks := make([]*externalapi.TrustedDataDataDAAHeader, 0, windowSize)
|
||||||
|
daaWindowHashesToIndex := make(map[externalapi.DomainHash]int, windowSize)
|
||||||
|
trustedDataDAABlockIndexes := make(map[externalapi.DomainHash][]uint64)
|
||||||
|
|
||||||
|
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, 0)
|
||||||
|
ghostdagDataHashToIndex := make(map[externalapi.DomainHash]int)
|
||||||
|
trustedDataGHOSTDAGDataIndexes := make(map[externalapi.DomainHash][]uint64)
|
||||||
|
for _, blockHash := range pointAndItsAnticone {
|
||||||
|
blockDAAWindowHashes, err := context.Domain().Consensus().BlockDAAWindowHashes(blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
trustedDataDAABlockIndexes[*blockHash] = make([]uint64, 0, windowSize)
|
||||||
|
for i, daaBlockHash := range blockDAAWindowHashes {
|
||||||
|
index, exists := daaWindowHashesToIndex[*daaBlockHash]
|
||||||
|
if !exists {
|
||||||
|
trustedDataDataDAAHeader, err := context.Domain().Consensus().TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
daaWindowBlocks = append(daaWindowBlocks, trustedDataDataDAAHeader)
|
||||||
|
index = len(daaWindowBlocks) - 1
|
||||||
|
daaWindowHashesToIndex[*daaBlockHash] = index
|
||||||
|
}
|
||||||
|
|
||||||
|
trustedDataDAABlockIndexes[*blockHash] = append(trustedDataDAABlockIndexes[*blockHash], uint64(index))
|
||||||
|
}
|
||||||
|
|
||||||
|
ghostdagDataBlockHashes, err := context.Domain().Consensus().TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
trustedDataGHOSTDAGDataIndexes[*blockHash] = make([]uint64, 0, context.Config().NetParams().K)
|
||||||
|
for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes {
|
||||||
|
index, exists := ghostdagDataHashToIndex[*ghostdagDataBlockHash]
|
||||||
|
if !exists {
|
||||||
|
data, err := context.Domain().Consensus().TrustedGHOSTDAGData(ghostdagDataBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ghostdagData = append(ghostdagData, &externalapi.BlockGHOSTDAGDataHashPair{
|
||||||
|
Hash: ghostdagDataBlockHash,
|
||||||
|
GHOSTDAGData: data,
|
||||||
|
})
|
||||||
|
index = len(ghostdagData) - 1
|
||||||
|
ghostdagDataHashToIndex[*ghostdagDataBlockHash] = index
|
||||||
|
}
|
||||||
|
|
||||||
|
trustedDataGHOSTDAGDataIndexes[*blockHash] = append(trustedDataGHOSTDAGDataIndexes[*blockHash], uint64(index))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = outgoingRoute.Enqueue(appmessage.DomainTrustedDataToTrustedData(daaWindowBlocks, ghostdagData))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, blockHash := range pointAndItsAnticone {
|
||||||
|
block, found, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return protocolerrors.Errorf(false, "pruning point anticone block %s not found", blockHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i+1)%ibdBatchSize == 0 {
|
||||||
|
// No timeout here, as we don't care if the syncee takes its time computing,
|
||||||
|
// since it only blocks this dedicated flow
|
||||||
|
message, err := incomingRoute.Dequeue()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, ok := message.(*appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks); !ok {
|
||||||
|
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks, message.Command())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Sent pruning point and its anticone to %s", peer)
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,40 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PruningPointProofRequestsContext is the interface for the context needed for the HandlePruningPointProofRequests flow.
|
||||||
|
type PruningPointProofRequestsContext interface {
|
||||||
|
Domain() domain.Domain
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlePruningPointProofRequests listens to appmessage.MsgRequestPruningPointProof messages and sends
|
||||||
|
// the pruning point proof to the requesting peer.
|
||||||
|
func HandlePruningPointProofRequests(context PruningPointProofRequestsContext, incomingRoute *router.Route,
|
||||||
|
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||||
|
|
||||||
|
for {
|
||||||
|
_, err := incomingRoute.Dequeue()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Got request for pruning point proof from %s", peer)
|
||||||
|
|
||||||
|
pruningPointProof, err := context.Domain().Consensus().BuildPruningPointProof()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pruningPointProofMessage := appmessage.DomainPruningPointProofToMsgPruningPointProof(pruningPointProof)
|
||||||
|
err = outgoingRoute.Enqueue(pruningPointProofMessage)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Sent pruning point proof to %s", peer)
|
||||||
|
}
|
||||||
|
}
|
@ -5,7 +5,6 @@ import (
|
|||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@ -29,18 +28,15 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
|
|||||||
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
|
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
|
||||||
for _, hash := range getRelayBlocksMessage.Hashes {
|
for _, hash := range getRelayBlocksMessage.Hashes {
|
||||||
// Fetch the block from the database.
|
// Fetch the block from the database.
|
||||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
block, found, err := context.Domain().Consensus().GetBlock(hash)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
|
||||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
|
||||||
}
|
|
||||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return protocolerrors.Errorf(false, "Relay block %s not found", hash)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO (Partial nodes): Convert block to partial block if needed
|
// TODO (Partial nodes): Convert block to partial block if needed
|
||||||
|
|
||||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
@ -3,12 +3,15 @@ package blockrelay
|
|||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/app/appmessage"
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
"github.com/kaspanet/kaspad/domain"
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -23,23 +26,29 @@ var orphanResolutionRange uint32 = 5
|
|||||||
type RelayInvsContext interface {
|
type RelayInvsContext interface {
|
||||||
Domain() domain.Domain
|
Domain() domain.Domain
|
||||||
Config() *config.Config
|
Config() *config.Config
|
||||||
OnNewBlock(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
|
OnNewBlock(block *externalapi.DomainBlock) error
|
||||||
|
OnNewBlockTemplate() error
|
||||||
OnPruningPointUTXOSetOverride() error
|
OnPruningPointUTXOSetOverride() error
|
||||||
SharedRequestedBlocks() *SharedRequestedBlocks
|
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
||||||
Broadcast(message appmessage.Message) error
|
Broadcast(message appmessage.Message) error
|
||||||
AddOrphan(orphanBlock *externalapi.DomainBlock)
|
AddOrphan(orphanBlock *externalapi.DomainBlock)
|
||||||
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
|
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
|
||||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||||
IsIBDRunning() bool
|
IsIBDRunning() bool
|
||||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
IsRecoverableError(err error) bool
|
||||||
UnsetIBDRunning()
|
IsNearlySynced() (bool, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type invRelayBlock struct {
|
||||||
|
Hash *externalapi.DomainHash
|
||||||
|
IsOrphanRoot bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type handleRelayInvsFlow struct {
|
type handleRelayInvsFlow struct {
|
||||||
RelayInvsContext
|
RelayInvsContext
|
||||||
incomingRoute, outgoingRoute *router.Route
|
incomingRoute, outgoingRoute *router.Route
|
||||||
peer *peerpkg.Peer
|
peer *peerpkg.Peer
|
||||||
invsQueue []*appmessage.MsgInvRelayBlock
|
invsQueue []invRelayBlock
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
||||||
@ -52,9 +61,12 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
|
|||||||
incomingRoute: incomingRoute,
|
incomingRoute: incomingRoute,
|
||||||
outgoingRoute: outgoingRoute,
|
outgoingRoute: outgoingRoute,
|
||||||
peer: peer,
|
peer: peer,
|
||||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
invsQueue: make([]invRelayBlock, 0),
|
||||||
}
|
}
|
||||||
return flow.start()
|
err := flow.start()
|
||||||
|
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
||||||
|
close(peer.IBDRequestChannel())
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) start() error {
|
func (flow *handleRelayInvsFlow) start() error {
|
||||||
@ -80,7 +92,18 @@ func (flow *handleRelayInvsFlow) start() error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if flow.IsOrphan(inv.Hash) {
|
if flow.IsOrphan(inv.Hash) {
|
||||||
|
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent {
|
||||||
|
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||||
|
"to the recent pruning point before normal operation can resume.", inv.Hash)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
|
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
|
||||||
err := flow.AddOrphanRootsToQueue(inv.Hash)
|
err := flow.AddOrphanRootsToQueue(inv.Hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -89,10 +112,16 @@ func (flow *handleRelayInvsFlow) start() error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Block relay is disabled during IBD
|
// Block relay is disabled if the node is already during IBD AND considered out of sync
|
||||||
if flow.IsIBDRunning() {
|
if flow.IsIBDRunning() {
|
||||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
isNearlySynced, err := flow.IsNearlySynced()
|
||||||
continue
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !isNearlySynced {
|
||||||
|
log.Debugf("Got block %s while in IBD and the node is out of sync. Continuing...", inv.Hash)
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Requesting block %s", inv.Hash)
|
log.Debugf("Requesting block %s", inv.Hash)
|
||||||
@ -110,8 +139,41 @@ func (flow *handleRelayInvsFlow) start() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) {
|
||||||
|
log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note we do not apply the heuristic below if inv was queued as an orphan root, since
|
||||||
|
// that means the process started by a proper and relevant relay block
|
||||||
|
if !inv.IsOrphanRoot {
|
||||||
|
// Check bounded merge depth to avoid requesting irrelevant data which cannot be merged under virtual
|
||||||
|
virtualMergeDepthRoot, err := flow.Domain().Consensus().VirtualMergeDepthRoot()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !virtualMergeDepthRoot.Equal(model.VirtualGenesisBlockHash) {
|
||||||
|
mergeDepthRootHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualMergeDepthRoot)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Since `BlueWork` respects topology, this condition means that the relay
|
||||||
|
// block is not in the future of virtual's merge depth root, and thus cannot be merged unless
|
||||||
|
// other valid blocks Kosherize it, in which case it will be obtained once the merger is relayed
|
||||||
|
if block.Header.BlueWork().Cmp(mergeDepthRootHeader.BlueWork()) <= 0 {
|
||||||
|
log.Debugf("Block %s has lower blue work than virtual's merge root %s (%d <= %d), hence we are skipping it",
|
||||||
|
inv.Hash, virtualMergeDepthRoot, block.Header.BlueWork(), mergeDepthRootHeader.BlueWork())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Debugf("Processing block %s", inv.Hash)
|
log.Debugf("Processing block %s", inv.Hash)
|
||||||
missingParents, blockInsertionResult, err := flow.processBlock(block)
|
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
missingParents, err := flow.processBlock(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||||
@ -126,20 +188,55 @@ func (flow *handleRelayInvsFlow) start() error {
|
|||||||
}
|
}
|
||||||
if len(missingParents) > 0 {
|
if len(missingParents) > 0 {
|
||||||
log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents)
|
log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents)
|
||||||
err := flow.processOrphan(block, missingParents)
|
err := flow.processOrphan(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Relaying block %s", inv.Hash)
|
oldVirtualParents := hashset.New()
|
||||||
err = flow.relayBlock(block)
|
for _, parent := range oldVirtualInfo.ParentHashes {
|
||||||
|
oldVirtualParents.Add(parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtualHasNewParents := false
|
||||||
|
for _, parent := range newVirtualInfo.ParentHashes {
|
||||||
|
if oldVirtualParents.Contains(parent) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
virtualHasNewParents = true
|
||||||
|
block, found, err := flow.Domain().Consensus().GetBlock(parent)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return protocolerrors.Errorf(false, "Virtual parent %s not found", parent)
|
||||||
|
}
|
||||||
|
blockHash := consensushashing.BlockHash(block)
|
||||||
|
log.Debugf("Relaying block %s", blockHash)
|
||||||
|
err = flow.relayBlock(block)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if virtualHasNewParents {
|
||||||
|
log.Debugf("Virtual %d has new parents, raising new block template event", newVirtualInfo.DAAScore)
|
||||||
|
err = flow.OnNewBlockTemplate()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||||
err = flow.OnNewBlock(block, blockInsertionResult)
|
err = flow.OnNewBlock(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -155,35 +252,35 @@ func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.Domai
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
func (flow *handleRelayInvsFlow) readInv() (invRelayBlock, error) {
|
||||||
if len(flow.invsQueue) > 0 {
|
if len(flow.invsQueue) > 0 {
|
||||||
var inv *appmessage.MsgInvRelayBlock
|
var inv invRelayBlock
|
||||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||||
return inv, nil
|
return inv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
msg, err := flow.incomingRoute.Dequeue()
|
msg, err := flow.incomingRoute.Dequeue()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return invRelayBlock{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
msgInv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
return invRelayBlock{}, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||||
"expecting an inv message", msg.Command())
|
"expecting an inv message", msg.Command())
|
||||||
}
|
}
|
||||||
return inv, nil
|
return invRelayBlock{Hash: msgInv.Hash, IsOrphanRoot: false}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||||
exists := flow.SharedRequestedBlocks().addIfNotExists(requestHash)
|
exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash)
|
||||||
if exists {
|
if exists {
|
||||||
return nil, true, nil
|
return nil, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
|
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
|
||||||
// clean from any pending blocks.
|
// clean from any pending blocks.
|
||||||
defer flow.SharedRequestedBlocks().remove(requestHash)
|
defer flow.SharedRequestedBlocks().Remove(requestHash)
|
||||||
|
|
||||||
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
|
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
|
||||||
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
|
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
|
||||||
@ -217,7 +314,7 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
|||||||
|
|
||||||
switch message := message.(type) {
|
switch message := message.(type) {
|
||||||
case *appmessage.MsgInvRelayBlock:
|
case *appmessage.MsgInvRelayBlock:
|
||||||
flow.invsQueue = append(flow.invsQueue, message)
|
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||||
case *appmessage.MsgBlock:
|
case *appmessage.MsgBlock:
|
||||||
return message, nil
|
return message, nil
|
||||||
default:
|
default:
|
||||||
@ -226,22 +323,25 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.BlockInsertionResult, error) {
|
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, error) {
|
||||||
blockHash := consensushashing.BlockHash(block)
|
blockHash := consensushashing.BlockHash(block)
|
||||||
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||||
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
return nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
missingParentsError := &ruleerrors.ErrMissingParents{}
|
missingParentsError := &ruleerrors.ErrMissingParents{}
|
||||||
if errors.As(err, missingParentsError) {
|
if errors.As(err, missingParentsError) {
|
||||||
return missingParentsError.MissingParentHashes, nil, nil
|
return missingParentsError.MissingParentHashes, nil
|
||||||
}
|
}
|
||||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
|
||||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||||
|
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||||
|
}
|
||||||
|
return nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||||
}
|
}
|
||||||
return nil, blockInsertionResult, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
||||||
@ -249,7 +349,7 @@ func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) erro
|
|||||||
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
|
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, missingParents []*externalapi.DomainHash) error {
|
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) error {
|
||||||
blockHash := consensushashing.BlockHash(block)
|
blockHash := consensushashing.BlockHash(block)
|
||||||
|
|
||||||
// Return if the block has been orphaned from elsewhere already
|
// Return if the block has been orphaned from elsewhere already
|
||||||
@ -264,6 +364,19 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, m
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if isBlockInOrphanResolutionRange {
|
if isBlockInOrphanResolutionRange {
|
||||||
|
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||||
|
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGenesisVirtualSelectedParent {
|
||||||
|
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||||
|
"to the recent pruning point before normal operation can resume.", blockHash)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Debugf("Block %s is within orphan resolution range. "+
|
log.Debugf("Block %s is within orphan resolution range. "+
|
||||||
"Adding it to the orphan set", blockHash)
|
"Adding it to the orphan set", blockHash)
|
||||||
flow.AddOrphan(block)
|
flow.AddOrphan(block)
|
||||||
@ -274,7 +387,28 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, m
|
|||||||
// Start IBD unless we already are in IBD
|
// Start IBD unless we already are in IBD
|
||||||
log.Debugf("Block %s is out of orphan resolution range. "+
|
log.Debugf("Block %s is out of orphan resolution range. "+
|
||||||
"Attempting to start IBD against it.", blockHash)
|
"Attempting to start IBD against it.", blockHash)
|
||||||
return flow.runIBDIfNotRunning(blockHash)
|
|
||||||
|
// Send the block to IBD flow via the IBDRequestChannel.
|
||||||
|
// Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it
|
||||||
|
select {
|
||||||
|
case flow.peer.IBDRequestChannel() <- block:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||||
|
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool {
|
||||||
|
parents := block.Header.DirectParents()
|
||||||
|
return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
|
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
|
||||||
@ -283,8 +417,7 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, m
|
|||||||
// In the response, if we know none of the hashes, we should retrieve the given
|
// In the response, if we know none of the hashes, we should retrieve the given
|
||||||
// blockHash via IBD. Otherwise, via unorphaning.
|
// blockHash via IBD. Otherwise, via unorphaning.
|
||||||
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
|
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
|
||||||
lowHash := flow.Config().ActiveNetParams.GenesisHash
|
err := flow.sendGetBlockLocator(blockHash, orphanResolutionRange)
|
||||||
err := flow.sendGetBlockLocator(lowHash, blockHash, orphanResolutionRange)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -316,12 +449,16 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
|
|||||||
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(orphanRoots) == 0 {
|
||||||
|
// In some rare cases we get here when there are no orphan roots already
|
||||||
|
return nil
|
||||||
|
}
|
||||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
||||||
|
|
||||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
invMessages := make([]invRelayBlock, len(orphanRoots))
|
||||||
for i, root := range orphanRoots {
|
for i, root := range orphanRoots {
|
||||||
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
||||||
invMessages[i] = appmessage.NewMsgInvBlock(root)
|
invMessages[i] = invRelayBlock{Hash: root, IsOrphanRoot: true}
|
||||||
}
|
}
|
||||||
|
|
||||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
95
app/protocol/flows/v5/blockrelay/handle_request_anticone.go
Normal file
95
app/protocol/flows/v5/blockrelay/handle_request_anticone.go
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RequestAnticoneContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||||
|
type RequestAnticoneContext interface {
|
||||||
|
Domain() domain.Domain
|
||||||
|
Config() *config.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
type handleRequestAnticoneFlow struct {
|
||||||
|
RequestAnticoneContext
|
||||||
|
incomingRoute, outgoingRoute *router.Route
|
||||||
|
peer *peer.Peer
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleRequestAnticone handles RequestAnticone messages
|
||||||
|
func HandleRequestAnticone(context RequestAnticoneContext, incomingRoute *router.Route,
|
||||||
|
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||||
|
|
||||||
|
flow := &handleRequestAnticoneFlow{
|
||||||
|
RequestAnticoneContext: context,
|
||||||
|
incomingRoute: incomingRoute,
|
||||||
|
outgoingRoute: outgoingRoute,
|
||||||
|
peer: peer,
|
||||||
|
}
|
||||||
|
return flow.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestAnticoneFlow) start() error {
|
||||||
|
for {
|
||||||
|
blockHash, contextHash, err := receiveRequestAnticone(flow.incomingRoute)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("Received requestAnticone with blockHash: %s, contextHash: %s", blockHash, contextHash)
|
||||||
|
log.Debugf("Getting past(%s) cap anticone(%s) for peer %s", contextHash, blockHash, flow.peer)
|
||||||
|
|
||||||
|
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
|
||||||
|
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
|
||||||
|
// we relay blocks only if they enter virtual's mergeset. We add a 2 factor for possible sync gaps.
|
||||||
|
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
|
||||||
|
flow.Config().ActiveNetParams.MergeSetSizeLimit*2)
|
||||||
|
if err != nil {
|
||||||
|
return protocolerrors.Wrap(true, err, "Failed querying anticone")
|
||||||
|
}
|
||||||
|
log.Debugf("Got %d header hashes in past(%s) cap anticone(%s)", len(blockHashes), contextHash, blockHash)
|
||||||
|
|
||||||
|
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||||
|
for i, blockHash := range blockHashes {
|
||||||
|
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We sort the headers in bottom-up topological order before sending
|
||||||
|
sort.Slice(blockHeaders, func(i, j int) bool {
|
||||||
|
return blockHeaders[i].BlueWork.Cmp(blockHeaders[j].BlueWork) < 0
|
||||||
|
})
|
||||||
|
|
||||||
|
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
|
||||||
|
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func receiveRequestAnticone(incomingRoute *router.Route) (blockHash *externalapi.DomainHash,
|
||||||
|
contextHash *externalapi.DomainHash, err error) {
|
||||||
|
|
||||||
|
message, err := incomingRoute.Dequeue()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
msgRequestAnticone := message.(*appmessage.MsgRequestAnticone)
|
||||||
|
|
||||||
|
return msgRequestAnticone.BlockHash, msgRequestAnticone.ContextHash, nil
|
||||||
|
}
|
@ -32,20 +32,19 @@ func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute
|
|||||||
|
|
||||||
func (flow *handleRequestBlockLocatorFlow) start() error {
|
func (flow *handleRequestBlockLocatorFlow) start() error {
|
||||||
for {
|
for {
|
||||||
lowHash, highHash, limit, err := flow.receiveGetBlockLocator()
|
highHash, limit, err := flow.receiveGetBlockLocator()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Debugf("Received getBlockLocator with lowHash: %s, highHash: %s, limit: %d",
|
log.Debugf("Received getBlockLocator with highHash: %s, limit: %d", highHash, limit)
|
||||||
lowHash, highHash, limit)
|
|
||||||
|
|
||||||
locator, err := flow.Domain().Consensus().CreateBlockLocator(lowHash, highHash, limit)
|
locator, err := flow.Domain().Consensus().CreateBlockLocatorFromPruningPoint(highHash, limit)
|
||||||
if err != nil || len(locator) == 0 {
|
if err != nil || len(locator) == 0 {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("Received error from CreateBlockLocator: %s", err)
|
log.Debugf("Received error from CreateBlockLocatorFromPruningPoint: %s", err)
|
||||||
}
|
}
|
||||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||||
"locator between blocks %s and %s", lowHash, highHash)
|
"locator between the pruning point and %s", highHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = flow.sendBlockLocator(locator)
|
err = flow.sendBlockLocator(locator)
|
||||||
@ -55,16 +54,15 @@ func (flow *handleRequestBlockLocatorFlow) start() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (lowHash *externalapi.DomainHash,
|
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (highHash *externalapi.DomainHash, limit uint32, err error) {
|
||||||
highHash *externalapi.DomainHash, limit uint32, err error) {
|
|
||||||
|
|
||||||
message, err := flow.incomingRoute.Dequeue()
|
message, err := flow.incomingRoute.Dequeue()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
|
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
|
||||||
|
|
||||||
return msgGetBlockLocator.LowHash, msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
|
return msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {
|
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {
|
@ -10,28 +10,30 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
)
|
)
|
||||||
|
|
||||||
const ibdBatchSize = router.DefaultMaxMessages
|
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
|
||||||
|
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
|
||||||
|
const ibdBatchSize = 99
|
||||||
|
|
||||||
// RequestIBDBlocksContext is the interface for the context needed for the HandleRequestHeaders flow.
|
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||||
type RequestIBDBlocksContext interface {
|
type RequestHeadersContext interface {
|
||||||
Domain() domain.Domain
|
Domain() domain.Domain
|
||||||
}
|
}
|
||||||
|
|
||||||
type handleRequestHeadersFlow struct {
|
type handleRequestHeadersFlow struct {
|
||||||
RequestIBDBlocksContext
|
RequestHeadersContext
|
||||||
incomingRoute, outgoingRoute *router.Route
|
incomingRoute, outgoingRoute *router.Route
|
||||||
peer *peer.Peer
|
peer *peer.Peer
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleRequestHeaders handles RequestHeaders messages
|
// HandleRequestHeaders handles RequestHeaders messages
|
||||||
func HandleRequestHeaders(context RequestIBDBlocksContext, incomingRoute *router.Route,
|
func HandleRequestHeaders(context RequestHeadersContext, incomingRoute *router.Route,
|
||||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||||
|
|
||||||
flow := &handleRequestHeadersFlow{
|
flow := &handleRequestHeadersFlow{
|
||||||
RequestIBDBlocksContext: context,
|
RequestHeadersContext: context,
|
||||||
incomingRoute: incomingRoute,
|
incomingRoute: incomingRoute,
|
||||||
outgoingRoute: outgoingRoute,
|
outgoingRoute: outgoingRoute,
|
||||||
peer: peer,
|
peer: peer,
|
||||||
}
|
}
|
||||||
return flow.start()
|
return flow.start()
|
||||||
}
|
}
|
||||||
@ -42,7 +44,34 @@ func (flow *handleRequestHeadersFlow) start() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||||
|
|
||||||
|
consensus := flow.Domain().Consensus()
|
||||||
|
|
||||||
|
lowHashInfo, err := consensus.GetBlockInfo(lowHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !lowHashInfo.HasHeader() {
|
||||||
|
return protocolerrors.Errorf(true, "Block %s does not exist", lowHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
highHashInfo, err := consensus.GetBlockInfo(highHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !highHashInfo.HasHeader() {
|
||||||
|
return protocolerrors.Errorf(true, "Block %s does not exist", highHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
isLowSelectedAncestorOfHigh, err := consensus.IsInSelectedParentChainOf(lowHash, highHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !isLowSelectedAncestorOfHigh {
|
||||||
|
return protocolerrors.Errorf(true, "Expected %s to be on the selected chain of %s",
|
||||||
|
lowHash, highHash)
|
||||||
|
}
|
||||||
|
|
||||||
for !lowHash.Equal(highHash) {
|
for !lowHash.Equal(highHash) {
|
||||||
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
||||||
@ -51,7 +80,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
|||||||
// in order to avoid locking the consensus for too long
|
// in order to avoid locking the consensus for too long
|
||||||
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
||||||
const maxBlocks = 1 << 10
|
const maxBlocks = 1 << 10
|
||||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
|
blockHashes, _, err := consensus.GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -59,7 +88,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
|||||||
|
|
||||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||||
for i, blockHash := range blockHashes {
|
for i, blockHash := range blockHashes {
|
||||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
blockHeader, err := consensus.GetBlockHeader(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
@ -0,0 +1,140 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HandleRequestPruningPointUTXOSetContext is the interface for the context needed for the HandleRequestPruningPointUTXOSet flow.
|
||||||
|
type HandleRequestPruningPointUTXOSetContext interface {
|
||||||
|
Domain() domain.Domain
|
||||||
|
}
|
||||||
|
|
||||||
|
type handleRequestPruningPointUTXOSetFlow struct {
|
||||||
|
HandleRequestPruningPointUTXOSetContext
|
||||||
|
incomingRoute, outgoingRoute *router.Route
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleRequestPruningPointUTXOSet listens to appmessage.MsgRequestPruningPointUTXOSet messages and sends
|
||||||
|
// the pruning point UTXO set and block body.
|
||||||
|
func HandleRequestPruningPointUTXOSet(context HandleRequestPruningPointUTXOSetContext, incomingRoute,
|
||||||
|
outgoingRoute *router.Route) error {
|
||||||
|
|
||||||
|
flow := &handleRequestPruningPointUTXOSetFlow{
|
||||||
|
HandleRequestPruningPointUTXOSetContext: context,
|
||||||
|
incomingRoute: incomingRoute,
|
||||||
|
outgoingRoute: outgoingRoute,
|
||||||
|
}
|
||||||
|
|
||||||
|
return flow.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestPruningPointUTXOSetFlow) start() error {
|
||||||
|
for {
|
||||||
|
msgRequestPruningPointUTXOSet, err := flow.waitForRequestPruningPointUTXOSetMessages()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.handleRequestPruningPointUTXOSetMessage(msgRequestPruningPointUTXOSet)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestPruningPointUTXOSetFlow) handleRequestPruningPointUTXOSetMessage(
|
||||||
|
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
||||||
|
|
||||||
|
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetFlow")
|
||||||
|
defer onEnd()
|
||||||
|
|
||||||
|
log.Debugf("Got request for pruning point UTXO set")
|
||||||
|
|
||||||
|
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSet)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXOSetMessages() (
|
||||||
|
*appmessage.MsgRequestPruningPointUTXOSet, error) {
|
||||||
|
|
||||||
|
message, err := flow.incomingRoute.Dequeue()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
|
||||||
|
if !ok {
|
||||||
|
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||||
|
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
|
||||||
|
}
|
||||||
|
return msgRequestPruningPointUTXOSet, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
|
||||||
|
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
||||||
|
|
||||||
|
// Send the UTXO set in `step`-sized chunks
|
||||||
|
const step = 1000
|
||||||
|
var fromOutpoint *externalapi.DomainOutpoint
|
||||||
|
chunksSent := 0
|
||||||
|
for {
|
||||||
|
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
|
||||||
|
msgRequestPruningPointUTXOSet.PruningPointHash, fromOutpoint, step)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
|
||||||
|
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Retrieved %d UTXOs for pruning block %s",
|
||||||
|
len(pruningPointUTXOs), msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||||
|
|
||||||
|
outpointAndUTXOEntryPairs :=
|
||||||
|
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
|
||||||
|
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
finished := len(pruningPointUTXOs) < step
|
||||||
|
if finished && chunksSent%ibdBatchSize != 0 {
|
||||||
|
log.Debugf("Finished sending UTXOs for pruning block %s",
|
||||||
|
msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||||
|
|
||||||
|
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pruningPointUTXOs) > 0 {
|
||||||
|
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
|
||||||
|
}
|
||||||
|
chunksSent++
|
||||||
|
|
||||||
|
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
|
||||||
|
if chunksSent%ibdBatchSize == 0 {
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
|
||||||
|
if !ok {
|
||||||
|
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||||
|
return protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
|
||||||
|
}
|
||||||
|
|
||||||
|
if finished {
|
||||||
|
log.Debugf("Finished sending UTXOs for pruning block %s",
|
||||||
|
msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||||
|
|
||||||
|
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
751
app/protocol/flows/v5/blockrelay/ibd.go
Normal file
751
app/protocol/flows/v5/blockrelay/ibd.go
Normal file
@ -0,0 +1,751 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||||
|
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
|
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IBDContext is the interface for the context needed for the HandleIBD flow.
|
||||||
|
type IBDContext interface {
|
||||||
|
Domain() domain.Domain
|
||||||
|
Config() *config.Config
|
||||||
|
OnNewBlock(block *externalapi.DomainBlock) error
|
||||||
|
OnNewBlockTemplate() error
|
||||||
|
OnPruningPointUTXOSetOverride() error
|
||||||
|
IsIBDRunning() bool
|
||||||
|
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||||
|
UnsetIBDRunning()
|
||||||
|
IsRecoverableError(err error) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type handleIBDFlow struct {
|
||||||
|
IBDContext
|
||||||
|
incomingRoute, outgoingRoute *router.Route
|
||||||
|
peer *peerpkg.Peer
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleIBD handles IBD
|
||||||
|
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||||
|
peer *peerpkg.Peer) error {
|
||||||
|
|
||||||
|
flow := &handleIBDFlow{
|
||||||
|
IBDContext: context,
|
||||||
|
incomingRoute: incomingRoute,
|
||||||
|
outgoingRoute: outgoingRoute,
|
||||||
|
peer: peer,
|
||||||
|
}
|
||||||
|
return flow.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) start() error {
|
||||||
|
for {
|
||||||
|
// Wait for IBD requests triggered by other flows
|
||||||
|
block, ok := <-flow.peer.IBDRequestChannel()
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := flow.runIBDIfNotRunning(block)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
||||||
|
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
||||||
|
if !wasIBDNotRunning {
|
||||||
|
log.Debugf("IBD is already running")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
isFinishedSuccessfully := false
|
||||||
|
var err error
|
||||||
|
defer func() {
|
||||||
|
flow.UnsetIBDRunning()
|
||||||
|
flow.logIBDFinished(isFinishedSuccessfully, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
relayBlockHash := consensushashing.BlockHash(block)
|
||||||
|
|
||||||
|
log.Infof("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
|
||||||
|
log.Infof("Syncing blocks up to %s", relayBlockHash)
|
||||||
|
log.Infof("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
|
||||||
|
|
||||||
|
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(
|
||||||
|
block, highestKnownSyncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !shouldSync {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if shouldDownloadHeadersProof {
|
||||||
|
log.Infof("Starting IBD with headers proof")
|
||||||
|
err = flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||||
|
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGenesisVirtualSelectedParent {
|
||||||
|
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
||||||
|
"to the recent pruning point before normal operation can resume.", relayBlockHash)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.syncPruningPointFutureHeaders(
|
||||||
|
flow.Domain().Consensus(),
|
||||||
|
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash, block.Header.DAAScore())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We start by syncing missing bodies over the syncer selected chain
|
||||||
|
err = flow.syncMissingBlockBodies(syncerHeaderSelectedTipHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
relayBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(relayBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Relay block might be in the anticone of syncer selected tip, thus
|
||||||
|
// check his chain for missing bodies as well.
|
||||||
|
// Note: this operation can be slightly optimized to avoid the full chain search since relay block
|
||||||
|
// is in syncer virtual mergeset which has bounded size.
|
||||||
|
if relayBlockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||||
|
err = flow.syncMissingBlockBodies(relayBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Finished syncing blocks up to %s", relayBlockHash)
|
||||||
|
isFinishedSuccessfully = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) {
|
||||||
|
/*
|
||||||
|
Algorithm:
|
||||||
|
Request full selected chain block locator from syncer
|
||||||
|
Find the highest block which we know
|
||||||
|
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Empty hashes indicate that the full chain is queried
|
||||||
|
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(locatorHashes) == 0 {
|
||||||
|
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||||
|
"to contain at least one element")
|
||||||
|
}
|
||||||
|
log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer,
|
||||||
|
len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||||
|
syncerHeaderSelectedTipHash := locatorHashes[0]
|
||||||
|
var highestKnownSyncerChainHash *externalapi.DomainHash
|
||||||
|
chainNegotiationRestartCounter := 0
|
||||||
|
chainNegotiationZoomCounts := 0
|
||||||
|
initialLocatorLen := len(locatorHashes)
|
||||||
|
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
|
||||||
|
for _, syncerChainHash := range locatorHashes {
|
||||||
|
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if info.Exists {
|
||||||
|
if info.BlockStatus == externalapi.StatusInvalid {
|
||||||
|
return nil, nil, protocolerrors.Errorf(true, "Sent invalid chain block %s", syncerChainHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
isPruningPointOnSyncerChain, err := flow.Domain().Consensus().IsInSelectedParentChainOf(pruningPoint, syncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Error checking isPruningPointOnSyncerChain: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're only interested in syncer chain blocks that have our pruning
|
||||||
|
// point in their selected chain. Otherwise, it means one of the following:
|
||||||
|
// 1) We will not switch the virtual selected chain to the syncers chain since it will violate finality
|
||||||
|
// (hence we can ignore it unless merged by others).
|
||||||
|
// 2) syncerChainHash is actually in the past of our pruning point so there's no
|
||||||
|
// point in syncing from it.
|
||||||
|
if err == nil && isPruningPointOnSyncerChain {
|
||||||
|
currentHighestKnownSyncerChainHash = syncerChainHash
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lowestUnknownSyncerChainHash = syncerChainHash
|
||||||
|
}
|
||||||
|
// No unknown blocks, break. Note this can only happen in the first iteration
|
||||||
|
if lowestUnknownSyncerChainHash == nil {
|
||||||
|
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// No shared block, break
|
||||||
|
if currentHighestKnownSyncerChainHash == nil {
|
||||||
|
highestKnownSyncerChainHash = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// No point in zooming further
|
||||||
|
if len(locatorHashes) == 1 {
|
||||||
|
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Zoom in
|
||||||
|
locatorHashes, err = flow.getSyncerChainBlockLocator(
|
||||||
|
lowestUnknownSyncerChainHash,
|
||||||
|
currentHighestKnownSyncerChainHash, time.Second*10)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(locatorHashes) > 0 {
|
||||||
|
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
|
||||||
|
!locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) {
|
||||||
|
return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+
|
||||||
|
"hashes to match the locator bounds")
|
||||||
|
}
|
||||||
|
|
||||||
|
chainNegotiationZoomCounts++
|
||||||
|
log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||||
|
chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||||
|
|
||||||
|
if len(locatorHashes) == 2 {
|
||||||
|
// We found our search target
|
||||||
|
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if chainNegotiationZoomCounts > initialLocatorLen*2 {
|
||||||
|
// Since the zoom-in always queries two consecutive entries in the previous locator, it is
|
||||||
|
// expected to decrease in size at least every two iterations
|
||||||
|
return nil, nil, protocolerrors.Errorf(true,
|
||||||
|
"IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d",
|
||||||
|
chainNegotiationZoomCounts, initialLocatorLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
} else { // Empty locator signals a restart due to chain changes
|
||||||
|
chainNegotiationZoomCounts = 0
|
||||||
|
chainNegotiationRestartCounter++
|
||||||
|
if chainNegotiationRestartCounter > 32 {
|
||||||
|
return nil, nil, protocolerrors.Errorf(false,
|
||||||
|
"IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
|
||||||
|
}
|
||||||
|
log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter)
|
||||||
|
|
||||||
|
// An empty locator signals that the syncer chain was modified and no longer contains one of
|
||||||
|
// the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack
|
||||||
|
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(locatorHashes) == 0 {
|
||||||
|
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||||
|
"to contain at least one element")
|
||||||
|
}
|
||||||
|
log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||||
|
chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||||
|
|
||||||
|
initialLocatorLen = len(locatorHashes)
|
||||||
|
// Reset syncer's header selected tip
|
||||||
|
syncerHeaderSelectedTipHash = locatorHashes[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Found highest known syncer chain block %s from peer %s",
|
||||||
|
highestKnownSyncerChainHash, flow.peer)
|
||||||
|
|
||||||
|
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||||
|
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool, err error) {
|
||||||
|
successString := "successfully"
|
||||||
|
if !isFinishedSuccessfully {
|
||||||
|
if err != nil {
|
||||||
|
successString = fmt.Sprintf("(interrupted: %s)", err)
|
||||||
|
} else {
|
||||||
|
successString = fmt.Sprintf("(interrupted)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Infof("IBD with peer %s finished %s", flow.peer, successString)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) getSyncerChainBlockLocator(
|
||||||
|
highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) {
|
||||||
|
|
||||||
|
requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash)
|
||||||
|
err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch message := message.(type) {
|
||||||
|
case *appmessage.MsgIBDChainBlockLocator:
|
||||||
|
if len(message.BlockLocatorHashes) > 64 {
|
||||||
|
return nil, protocolerrors.Errorf(true,
|
||||||
|
"Got block locator of size %d>64 while expecting locator to have size "+
|
||||||
|
"which is logarithmic in DAG size (which should never exceed 2^64)",
|
||||||
|
len(message.BlockLocatorHashes))
|
||||||
|
}
|
||||||
|
return message.BlockLocatorHashes, nil
|
||||||
|
default:
|
||||||
|
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdIBDChainBlockLocator, message.Command())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus,
|
||||||
|
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash *externalapi.DomainHash,
|
||||||
|
highBlockDAAScoreHint uint64) error {
|
||||||
|
|
||||||
|
log.Infof("Downloading headers from %s", flow.peer)
|
||||||
|
|
||||||
|
if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) {
|
||||||
|
// No need to get syncer selected tip headers, so sync relay past and return
|
||||||
|
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestKnownSyncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScoreHint, "block headers")
|
||||||
|
|
||||||
|
// Keep a short queue of BlockHeadersMessages so that there's
|
||||||
|
// never a moment when the node is not validating and inserting
|
||||||
|
// headers
|
||||||
|
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
||||||
|
errChan := make(chan error)
|
||||||
|
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
|
||||||
|
for {
|
||||||
|
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
||||||
|
if err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if doneIBD {
|
||||||
|
close(blockHeadersMessageChan)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(blockHeadersMessage.BlockHeaders) == 0 {
|
||||||
|
// The syncer should have sent a done message if the search completed, and not an empty list
|
||||||
|
errChan <- protocolerrors.Errorf(true, "Received an empty headers message from peer %s", flow.peer)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
blockHeadersMessageChan <- blockHeadersMessage
|
||||||
|
|
||||||
|
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
||||||
|
if err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||||
|
if !ok {
|
||||||
|
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||||
|
}
|
||||||
|
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||||
|
err = flow.processHeader(consensus, header)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1]
|
||||||
|
progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore)
|
||||||
|
case err := <-errChan:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error {
|
||||||
|
// Finished downloading syncer selected tip blocks,
|
||||||
|
// check if we already have the triggering relayBlockHash
|
||||||
|
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !relayBlockInfo.Exists {
|
||||||
|
// Send a special header request for the selected tip anticone. This is expected to
|
||||||
|
// be a small set, as it is bounded to the size of virtual's mergeset.
|
||||||
|
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if anticoneDone {
|
||||||
|
return protocolerrors.Errorf(true,
|
||||||
|
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
|
||||||
|
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||||
|
}
|
||||||
|
_, anticoneDone, err = flow.receiveHeaders()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !anticoneDone {
|
||||||
|
return protocolerrors.Errorf(true,
|
||||||
|
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
|
||||||
|
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||||
|
}
|
||||||
|
for _, header := range anticoneHeadersMessage.BlockHeaders {
|
||||||
|
err = flow.processHeader(consensus, header)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the relayBlockHash has still not been received, the peer is misbehaving
|
||||||
|
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !relayBlockInfo.Exists {
|
||||||
|
return protocolerrors.Errorf(true, "did not receive "+
|
||||||
|
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) sendRequestAnticone(
|
||||||
|
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error {
|
||||||
|
|
||||||
|
msgRequestAnticone := appmessage.NewMsgRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||||
|
return flow.outgoingRoute.Enqueue(msgRequestAnticone)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) sendRequestHeaders(
|
||||||
|
highestKnownSyncerChainHash, syncerHeaderSelectedTipHash *externalapi.DomainHash) error {
|
||||||
|
|
||||||
|
msgRequestHeaders := appmessage.NewMsgRequstHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||||
|
return flow.outgoingRoute.Enqueue(msgRequestHeaders)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
switch message := message.(type) {
|
||||||
|
case *appmessage.BlockHeadersMessage:
|
||||||
|
return message, false, nil
|
||||||
|
case *appmessage.MsgDoneHeaders:
|
||||||
|
return nil, true, nil
|
||||||
|
default:
|
||||||
|
return nil, false,
|
||||||
|
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s or %s, got: %s",
|
||||||
|
appmessage.CmdBlockHeaders,
|
||||||
|
appmessage.CmdDoneHeaders,
|
||||||
|
message.Command())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||||
|
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||||
|
block := &externalapi.DomainBlock{
|
||||||
|
Header: header,
|
||||||
|
Transactions: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
blockHash := consensushashing.BlockHash(block)
|
||||||
|
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if blockInfo.Exists {
|
||||||
|
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err = consensus.ValidateAndInsertBlock(block, false)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||||
|
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||||
|
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
||||||
|
} else {
|
||||||
|
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||||
|
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
|
||||||
|
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
|
||||||
|
|
||||||
|
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
|
||||||
|
|
||||||
|
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
|
||||||
|
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
|
||||||
|
"tip is smaller than the current selected tip")
|
||||||
|
}
|
||||||
|
|
||||||
|
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
|
||||||
|
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
|
||||||
|
return protocolerrors.Errorf(false, "difference between the timestamps of "+
|
||||||
|
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
||||||
|
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
|
||||||
|
|
||||||
|
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
||||||
|
defer onEnd()
|
||||||
|
|
||||||
|
receivedChunkCount := 0
|
||||||
|
receivedUTXOCount := 0
|
||||||
|
for {
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch message := message.(type) {
|
||||||
|
case *appmessage.MsgPruningPointUTXOSetChunk:
|
||||||
|
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
||||||
|
domainOutpointAndUTXOEntryPairs :=
|
||||||
|
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
||||||
|
|
||||||
|
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
receivedChunkCount++
|
||||||
|
if receivedChunkCount%ibdBatchSize == 0 {
|
||||||
|
log.Infof("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||||
|
receivedChunkCount, receivedUTXOCount)
|
||||||
|
|
||||||
|
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||||
|
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
||||||
|
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
||||||
|
return true, nil
|
||||||
|
|
||||||
|
case *appmessage.MsgUnexpectedPruningPoint:
|
||||||
|
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
|
||||||
|
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
||||||
|
return false, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
||||||
|
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
||||||
|
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(hashes) == 0 {
|
||||||
|
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
||||||
|
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
||||||
|
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
||||||
|
log.Debugf("No missing block body hashes found.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
|
||||||
|
highestProcessedDAAScore := lowBlockHeader.DAAScore()
|
||||||
|
|
||||||
|
// If the IBD is small, we want to update the virtual after each block in order to avoid complications and possible bugs.
|
||||||
|
updateVirtual, err := flow.Domain().Consensus().IsNearlySynced()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||||
|
var hashesToRequest []*externalapi.DomainHash
|
||||||
|
if offset+ibdBatchSize < len(hashes) {
|
||||||
|
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
||||||
|
} else {
|
||||||
|
hashesToRequest = hashes[offset:]
|
||||||
|
}
|
||||||
|
|
||||||
|
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, expectedHash := range hashesToRequest {
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
||||||
|
if !ok {
|
||||||
|
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
||||||
|
blockHash := consensushashing.BlockHash(block)
|
||||||
|
if !expectedHash.Equal(blockHash) {
|
||||||
|
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.banIfBlockIsHeaderOnly(block)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.Domain().Consensus().ValidateAndInsertBlock(block, updateVirtual)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||||
|
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||||
|
}
|
||||||
|
err = flow.OnNewBlock(block)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
highestProcessedDAAScore = block.Header.DAAScore()
|
||||||
|
}
|
||||||
|
|
||||||
|
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to resolve virtual only if it wasn't updated while syncing block bodies
|
||||||
|
if !updateVirtual {
|
||||||
|
err := flow.resolveVirtual(highestProcessedDAAScore)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return flow.OnNewBlockTemplate()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||||
|
if len(block.Transactions) == 0 {
|
||||||
|
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||||
|
consensushashing.BlockHash(block))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
|
||||||
|
err := flow.Domain().Consensus().ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) {
|
||||||
|
var percents int
|
||||||
|
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||||
|
percents = 100
|
||||||
|
} else {
|
||||||
|
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||||
|
}
|
||||||
|
if percents < 0 {
|
||||||
|
percents = 0
|
||||||
|
} else if percents > 100 {
|
||||||
|
percents = 100
|
||||||
|
}
|
||||||
|
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Resolved virtual")
|
||||||
|
return nil
|
||||||
|
}
|
45
app/protocol/flows/v5/blockrelay/ibd_progress_reporter.go
Normal file
45
app/protocol/flows/v5/blockrelay/ibd_progress_reporter.go
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
type ibdProgressReporter struct {
|
||||||
|
lowDAAScore uint64
|
||||||
|
highDAAScore uint64
|
||||||
|
objectName string
|
||||||
|
totalDAAScoreDifference uint64
|
||||||
|
lastReportedProgressPercent int
|
||||||
|
processed int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
|
||||||
|
if highDAAScore <= lowDAAScore {
|
||||||
|
// Avoid a zero or negative diff
|
||||||
|
highDAAScore = lowDAAScore + 1
|
||||||
|
}
|
||||||
|
return &ibdProgressReporter{
|
||||||
|
lowDAAScore: lowDAAScore,
|
||||||
|
highDAAScore: highDAAScore,
|
||||||
|
objectName: objectName,
|
||||||
|
totalDAAScoreDifference: highDAAScore - lowDAAScore,
|
||||||
|
lastReportedProgressPercent: 0,
|
||||||
|
processed: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
|
||||||
|
ipr.processed += processedDelta
|
||||||
|
|
||||||
|
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
|
||||||
|
if highestProcessedDAAScore > ipr.highDAAScore {
|
||||||
|
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
|
||||||
|
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
|
||||||
|
}
|
||||||
|
relativeDAAScore := uint64(0)
|
||||||
|
if highestProcessedDAAScore > ipr.lowDAAScore {
|
||||||
|
// Avoid a negative diff
|
||||||
|
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
|
||||||
|
}
|
||||||
|
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
|
||||||
|
if progressPercent > ipr.lastReportedProgressPercent {
|
||||||
|
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
|
||||||
|
ipr.lastReportedProgressPercent = progressPercent
|
||||||
|
}
|
||||||
|
}
|
444
app/protocol/flows/v5/blockrelay/ibd_with_headers_proof.go
Normal file
444
app/protocol/flows/v5/blockrelay/ibd_with_headers_proof.go
Normal file
@ -0,0 +1,444 @@
|
|||||||
|
package blockrelay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/kaspanet/kaspad/app/appmessage"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||||
|
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||||
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) ibdWithHeadersProof(
|
||||||
|
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||||
|
err := flow.Domain().InitStagingConsensusWithoutGenesis()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.downloadHeadersAndPruningUTXOSet(syncerHeaderSelectedTipHash, relayBlockHash, highBlockDAAScore)
|
||||||
|
if err != nil {
|
||||||
|
if !flow.IsRecoverableError(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus. (%s)", flow.peer, err)
|
||||||
|
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
||||||
|
if deleteStagingConsensusErr != nil {
|
||||||
|
return deleteStagingConsensusErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
|
||||||
|
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
|
||||||
|
err = flow.Domain().CommitStagingConsensus()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.OnPruningPointUTXOSetOverride()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
|
||||||
|
relayBlock *externalapi.DomainBlock,
|
||||||
|
highestKnownSyncerChainHash *externalapi.DomainHash) (shouldDownload, shouldSync bool, err error) {
|
||||||
|
|
||||||
|
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
|
||||||
|
if highestKnownSyncerChainHash != nil {
|
||||||
|
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highestKnownSyncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
return false, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
highestSharedBlockFound = blockInfo.HasBody()
|
||||||
|
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||||
|
if err != nil {
|
||||||
|
return false, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
isPruningPointInSharedBlockChain, err = flow.Domain().Consensus().IsInSelectedParentChainOf(
|
||||||
|
pruningPoint, highestKnownSyncerChainHash)
|
||||||
|
if err != nil {
|
||||||
|
return false, false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Note: in the case where `highestSharedBlockFound == true && isPruningPointInSharedBlockChain == false`
|
||||||
|
// we might have here info which is relevant to finality conflict decisions. This should be taken into
|
||||||
|
// account when we improve this aspect.
|
||||||
|
if !highestSharedBlockFound || !isPruningPointInSharedBlockChain {
|
||||||
|
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock)
|
||||||
|
if err != nil {
|
||||||
|
return false, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore {
|
||||||
|
return true, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if highestKnownSyncerChainHash == nil {
|
||||||
|
log.Infof("Stopping IBD since IBD from this node will cause a finality conflict")
|
||||||
|
return false, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
|
||||||
|
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
virtualSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(virtualSelectedParent)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if relayBlock.Header.BlueScore() < virtualSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return relayBlock.Header.BlueWork().Cmp(virtualSelectedTipInfo.BlueWork) > 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
||||||
|
log.Infof("Downloading the pruning point proof from %s", flow.peer)
|
||||||
|
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointProof())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(10 * time.Minute)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pruningPointProofMessage, ok := message.(*appmessage.MsgPruningPointProof)
|
||||||
|
if !ok {
|
||||||
|
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdPruningPointProof, message.Command())
|
||||||
|
}
|
||||||
|
pruningPointProof := appmessage.MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage)
|
||||||
|
err = flow.Domain().Consensus().ValidatePruningPointProof(pruningPointProof)
|
||||||
|
if err != nil {
|
||||||
|
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||||
|
return nil, protocolerrors.Wrapf(true, err, "pruning point proof validation failed")
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.Domain().StagingConsensus().ApplyPruningPointProof(pruningPointProof)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
|
||||||
|
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash,
|
||||||
|
highBlockDAAScore uint64) error {
|
||||||
|
|
||||||
|
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.syncPruningPointsAndPruningPointAnticone(proofPruningPoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Remove this condition once there's more proper way to check finality violation
|
||||||
|
// in the headers proof.
|
||||||
|
if proofPruningPoint.Equal(flow.Config().NetParams().GenesisHash) {
|
||||||
|
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(),
|
||||||
|
syncerHeaderSelectedTipHash, proofPruningPoint, relayBlockHash, highBlockDAAScore)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Headers downloaded from peer %s", flow.peer)
|
||||||
|
|
||||||
|
relayBlockInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(relayBlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !relayBlockInfo.Exists {
|
||||||
|
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.validatePruningPointFutureHeaderTimestamps()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Syncing the current pruning point UTXO set")
|
||||||
|
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet(flow.Domain().StagingConsensus(), proofPruningPoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !syncedPruningPointUTXOSetSuccessfully {
|
||||||
|
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
log.Debugf("Finished syncing the current pruning point UTXO set")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error {
|
||||||
|
log.Infof("Downloading the past pruning points and the pruning point anticone from %s", flow.peer)
|
||||||
|
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.validateAndInsertPruningPoints(proofPruningPoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msgTrustedData, ok := message.(*appmessage.MsgTrustedData)
|
||||||
|
if !ok {
|
||||||
|
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdTrustedData, message.Command())
|
||||||
|
}
|
||||||
|
|
||||||
|
pruningPointWithMetaData, done, err := flow.receiveBlockWithTrustedData()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if done {
|
||||||
|
return protocolerrors.Errorf(true, "got `done` message before receiving the pruning point")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !pruningPointWithMetaData.Block.Header.BlockHash().Equal(proofPruningPoint) {
|
||||||
|
return protocolerrors.Errorf(true, "first block with trusted data is not the pruning point")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData, msgTrustedData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for ; ; i++ {
|
||||||
|
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if done {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData, msgTrustedData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're using i+2 because we want to check if the next block will belong to the next batch, but we already downloaded
|
||||||
|
// the pruning point outside the loop so we use i+2 instead of i+1.
|
||||||
|
if (i+2)%ibdBatchSize == 0 {
|
||||||
|
log.Infof("Downloaded %d blocks from the pruning point anticone", i+1)
|
||||||
|
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextPruningPointAndItsAnticoneBlocks())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Finished downloading pruning point and its anticone from %s. Total blocks downloaded: %d", flow.peer, i+1)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) processBlockWithTrustedData(
|
||||||
|
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedDataV4, data *appmessage.MsgTrustedData) error {
|
||||||
|
|
||||||
|
blockWithTrustedData := &externalapi.BlockWithTrustedData{
|
||||||
|
Block: appmessage.MsgBlockToDomainBlock(block.Block),
|
||||||
|
DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(block.DAAWindowIndices)),
|
||||||
|
GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(block.GHOSTDAGDataIndices)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, index := range block.DAAWindowIndices {
|
||||||
|
blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, appmessage.TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(data.DAAWindow[index]))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, index := range block.GHOSTDAGDataIndices {
|
||||||
|
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
|
||||||
|
}
|
||||||
|
|
||||||
|
err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||||
|
if err != nil {
|
||||||
|
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||||
|
return protocolerrors.Wrapf(true, err, "failed validating block with trusted data")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch downCastedMessage := message.(type) {
|
||||||
|
case *appmessage.MsgBlockWithTrustedDataV4:
|
||||||
|
return downCastedMessage, false, nil
|
||||||
|
case *appmessage.MsgDoneBlocksWithTrustedData:
|
||||||
|
return nil, true, nil
|
||||||
|
default:
|
||||||
|
return nil, false,
|
||||||
|
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s or %s, got: %s",
|
||||||
|
(&appmessage.MsgBlockWithTrustedData{}).Command(),
|
||||||
|
(&appmessage.MsgDoneBlocksWithTrustedData{}).Command(),
|
||||||
|
downCastedMessage.Command())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) {
|
||||||
|
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
msgPruningPoints, ok := message.(*appmessage.MsgPruningPoints)
|
||||||
|
if !ok {
|
||||||
|
return nil,
|
||||||
|
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||||
|
"expected: %s, got: %s", appmessage.CmdPruningPoints, message.Command())
|
||||||
|
}
|
||||||
|
|
||||||
|
return msgPruningPoints, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error {
|
||||||
|
currentPruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if currentPruningPoint.Equal(proofPruningPoint) {
|
||||||
|
return protocolerrors.Errorf(true, "the proposed pruning point is the same as the current pruning point")
|
||||||
|
}
|
||||||
|
|
||||||
|
pruningPoints, err := flow.receivePruningPoints()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := make([]externalapi.BlockHeader, len(pruningPoints.Headers))
|
||||||
|
for i, header := range pruningPoints.Headers {
|
||||||
|
headers[i] = appmessage.BlockHeaderToDomainBlockHeader(header)
|
||||||
|
}
|
||||||
|
|
||||||
|
arePruningPointsViolatingFinality, err := flow.Domain().Consensus().ArePruningPointsViolatingFinality(headers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if arePruningPointsViolatingFinality {
|
||||||
|
// TODO: Find a better way to deal with finality conflicts.
|
||||||
|
return protocolerrors.Errorf(false, "pruning points are violating finality")
|
||||||
|
}
|
||||||
|
|
||||||
|
lastPruningPoint := consensushashing.HeaderHash(headers[len(headers)-1])
|
||||||
|
if !lastPruningPoint.Equal(proofPruningPoint) {
|
||||||
|
return protocolerrors.Errorf(true, "the proof pruning point is not equal to the last pruning "+
|
||||||
|
"point in the list")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.Domain().StagingConsensus().ImportPruningPoints(headers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus,
|
||||||
|
pruningPoint *externalapi.DomainHash) (bool, error) {
|
||||||
|
|
||||||
|
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint)
|
||||||
|
isValid, err := flow.Domain().StagingConsensus().IsValidPruningPoint(pruningPoint)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isValid {
|
||||||
|
return false, protocolerrors.Errorf(true, "invalid pruning point %s", pruningPoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Fetching the pruning point UTXO set")
|
||||||
|
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
||||||
|
if err != nil {
|
||||||
|
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isSuccessful {
|
||||||
|
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Fetched the new pruning point UTXO set")
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
|
||||||
|
defer func() {
|
||||||
|
err := flow.Domain().StagingConsensus().ClearImportedPruningPointData()
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSet(pruningPointHash))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(consensus, pruningPointHash)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if !receivedAll {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flow.Domain().StagingConsensus().ValidateAndInsertImportedPruningPoint(pruningPointHash)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Find a better way to deal with finality conflicts.
|
||||||
|
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user