mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 19:22:53 +00:00
Compare commits
331 Commits
v0.8.6-dev
...
patch3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b5eda33488 | ||
|
|
ef1a3c0dce | ||
|
|
1cedc720ac | ||
|
|
6449b03034 | ||
|
|
9f02a24e8b | ||
|
|
9b23bbcdb5 | ||
|
|
b30f7309a2 | ||
|
|
1c18a49992 | ||
|
|
28d0f1ea2e | ||
|
|
3f7e482291 | ||
|
|
ce4f5fcc33 | ||
|
|
be3a6604d7 | ||
|
|
f452531df0 | ||
|
|
13a09da848 | ||
|
|
f58aeb4f9f | ||
|
|
82f0a4d74f | ||
|
|
69d90fe827 | ||
|
|
c85b5d70fd | ||
|
|
1cd712a63e | ||
|
|
27ba9d0374 | ||
|
|
b1229f7908 | ||
|
|
4a560f25a6 | ||
|
|
dab1a881fe | ||
|
|
598392d0cf | ||
|
|
6d27637055 | ||
|
|
4855d845b3 | ||
|
|
b1b179c105 | ||
|
|
dadacdc0f4 | ||
|
|
d2379608ad | ||
|
|
14b2bcbd81 | ||
|
|
71b284f4d5 | ||
|
|
0e1d247915 | ||
|
|
504ec36612 | ||
|
|
c80b113319 | ||
|
|
0bdd19136f | ||
|
|
7c1cddff11 | ||
|
|
064b0454e8 | ||
|
|
8282fb486e | ||
|
|
428449bb7d | ||
|
|
f54659ead0 | ||
|
|
99f82eb80f | ||
|
|
aa43c14fc5 | ||
|
|
129e9119d2 | ||
|
|
cae7faced2 | ||
|
|
f036b18f9e | ||
|
|
1d740e1eab | ||
|
|
011871cda2 | ||
|
|
7b4f761fb9 | ||
|
|
5806fef35f | ||
|
|
227ef392ba | ||
|
|
f3d76d6565 | ||
|
|
df573bba63 | ||
|
|
2a97b7c9bb | ||
|
|
70900c571b | ||
|
|
7292438e4a | ||
|
|
dced1a9376 | ||
|
|
32e8e539ac | ||
|
|
11103a36d3 | ||
|
|
606b781ca0 | ||
|
|
dbf18d8052 | ||
|
|
2a1b38ce7a | ||
|
|
29c410d123 | ||
|
|
6e6fabf956 | ||
|
|
b04292c97a | ||
|
|
765dd170e4 | ||
|
|
8e362845b3 | ||
|
|
5c1ba9170e | ||
|
|
9d8c555bdf | ||
|
|
a2f574eab8 | ||
|
|
7bed86dc1b | ||
|
|
9b81f5145e | ||
|
|
cd8341ef57 | ||
|
|
ad8bdbed21 | ||
|
|
7cdceb6df0 | ||
|
|
cc5248106e | ||
|
|
e3463b7268 | ||
|
|
a2173ef80a | ||
|
|
aeb4500b61 | ||
|
|
0a1daae319 | ||
|
|
131cd3357e | ||
|
|
ff72568d6b | ||
|
|
2dddb650b9 | ||
|
|
99aaacd649 | ||
|
|
77a344cc29 | ||
|
|
3dbc42b4f7 | ||
|
|
1b9be28613 | ||
|
|
5dbb1da84b | ||
|
|
afaac28da1 | ||
|
|
0053ee788d | ||
|
|
af7e7de247 | ||
|
|
02a08902a7 | ||
|
|
d9bc94a2a8 | ||
|
|
837dac68b5 | ||
|
|
ba5880fab1 | ||
|
|
7b5720a155 | ||
|
|
65b5a080e4 | ||
|
|
ce17348175 | ||
|
|
d922ee1be2 | ||
|
|
4132891ac9 | ||
|
|
2094f4facf | ||
|
|
2de68f43f0 | ||
|
|
d748089a14 | ||
|
|
7d1071a9b1 | ||
|
|
f26a7fdedf | ||
|
|
d207888b67 | ||
|
|
38e2ee1b43 | ||
|
|
aba44e7bfb | ||
|
|
c731d74bc0 | ||
|
|
60e7a8ebed | ||
|
|
369a3bac09 | ||
|
|
8022e4cbea | ||
|
|
28ac77b202 | ||
|
|
28af7eb596 | ||
|
|
a4d241c30a | ||
|
|
487fab0e2b | ||
|
|
2f272cd517 | ||
|
|
e3a6d9e49a | ||
|
|
069ee26e84 | ||
|
|
61aa15fd61 | ||
|
|
f7cce5cb39 | ||
|
|
2f7a1395e7 | ||
|
|
8b1ac86532 | ||
|
|
ab721f3ad6 | ||
|
|
798c5fab7d | ||
|
|
c13a4d90ed | ||
|
|
4ba8b14675 | ||
|
|
319cbce768 | ||
|
|
bdd42903b4 | ||
|
|
9bedf84740 | ||
|
|
f317f51cdd | ||
|
|
4207c82f5a | ||
|
|
70399dae2a | ||
|
|
2ae1b7853f | ||
|
|
d53d040bee | ||
|
|
79c74c482b | ||
|
|
3b0394eefe | ||
|
|
43e6467ff1 | ||
|
|
363494ef7a | ||
|
|
d1df97c4c5 | ||
|
|
4f52a6de51 | ||
|
|
4f4a8934e7 | ||
|
|
16ba2bd312 | ||
|
|
6613faee2d | ||
|
|
edc459ae1b | ||
|
|
d7f2cf81c0 | ||
|
|
4658f9d05c | ||
|
|
010df3b0d3 | ||
|
|
346598e67f | ||
|
|
268906a7ce | ||
|
|
befc60b185 | ||
|
|
dd3e04e671 | ||
|
|
9c743db4d6 | ||
|
|
eb3dba5c88 | ||
|
|
e46e2580b1 | ||
|
|
414f58fb90 | ||
|
|
9df80957b1 | ||
|
|
268c9fa83c | ||
|
|
2e3592e351 | ||
|
|
19718ac102 | ||
|
|
28a8e96e65 | ||
|
|
4df283934a | ||
|
|
ab89efe3dc | ||
|
|
fa16c30cf3 | ||
|
|
c28366eb50 | ||
|
|
dc0bf56bf3 | ||
|
|
91de1807ad | ||
|
|
830684167c | ||
|
|
1f56a68a28 | ||
|
|
13a6b4cc51 | ||
|
|
dfd8b3423d | ||
|
|
28bfc0fb9c | ||
|
|
83beae4463 | ||
|
|
a6ebe83198 | ||
|
|
acdc59b565 | ||
|
|
15811b0bcb | ||
|
|
a8a7e3dd9b | ||
|
|
3f193e9219 | ||
|
|
dfa24d8353 | ||
|
|
3c3ad1425d | ||
|
|
9bb8123391 | ||
|
|
347dd8fc4b | ||
|
|
d2cccd2829 | ||
|
|
7186f83095 | ||
|
|
5c394c2951 | ||
|
|
a786cdc15e | ||
|
|
6dd3d4a9e7 | ||
|
|
73b36f12f0 | ||
|
|
a795a9e619 | ||
|
|
0be1bba408 | ||
|
|
6afc06ce58 | ||
|
|
d01a213f3d | ||
|
|
7ad8ce521c | ||
|
|
86ba80a091 | ||
|
|
088e2114c2 | ||
|
|
2854d91688 | ||
|
|
af10b59181 | ||
|
|
c5b0394bbc | ||
|
|
9266d179a9 | ||
|
|
321792778e | ||
|
|
70f3fa9893 | ||
|
|
4e18031483 | ||
|
|
2abc284e3b | ||
|
|
f1451406f7 | ||
|
|
c12e180873 | ||
|
|
3959bc1e7c | ||
|
|
6ec0a8a559 | ||
|
|
6824be9216 | ||
|
|
d0511c1636 | ||
|
|
7d69b66c7c | ||
|
|
cebcab7f5c | ||
|
|
caf251b7a8 | ||
|
|
1a4161ffc0 | ||
|
|
b84080f3d9 | ||
|
|
cbd0bb6d14 | ||
|
|
d9449a32b8 | ||
|
|
0ee8f2b631 | ||
|
|
ff1c96c149 | ||
|
|
5e335be5ab | ||
|
|
032eda4604 | ||
|
|
e4e3541a30 | ||
|
|
b5933bc4fe | ||
|
|
ec446ac511 | ||
|
|
3d668cc1bd | ||
|
|
1486a6312c | ||
|
|
cd27f2850e | ||
|
|
14cf7f81f3 | ||
|
|
74539f8f0b | ||
|
|
a7299c1b87 | ||
|
|
27c1e4611e | ||
|
|
b8413fcecb | ||
|
|
c084c69771 | ||
|
|
53781eed4d | ||
|
|
837fa65735 | ||
|
|
dd3b2cf7d1 | ||
|
|
3fd324ca28 | ||
|
|
0271858f25 | ||
|
|
7909480757 | ||
|
|
18274c236b | ||
|
|
7829a9fd76 | ||
|
|
1548ed9629 | ||
|
|
32cd643e8b | ||
|
|
05df6e3e4e | ||
|
|
ce326b3c7d | ||
|
|
a4ae263ed5 | ||
|
|
79be1edaa5 | ||
|
|
c1ef5f0c56 | ||
|
|
458e409654 | ||
|
|
df19bdfaf3 | ||
|
|
103edf97d0 | ||
|
|
1f69f9eed9 | ||
|
|
6a12428504 | ||
|
|
63b1d2a05a | ||
|
|
089115c389 | ||
|
|
24a12cf2a1 | ||
|
|
6597f24bab | ||
|
|
dc84913214 | ||
|
|
201646282b | ||
|
|
3f08bf87a8 | ||
|
|
581a12db96 | ||
|
|
fb6c9c8f21 | ||
|
|
2adb4f5d0f | ||
|
|
9ffda2b1da | ||
|
|
bee0893660 | ||
|
|
a7bb1853f9 | ||
|
|
35e555e959 | ||
|
|
a250f697ee | ||
|
|
f66708b3c6 | ||
|
|
8fbea5d239 | ||
|
|
5fa06fe7d7 | ||
|
|
06fd6f1b95 | ||
|
|
d2f4ed660c | ||
|
|
19878aa062 | ||
|
|
6415e525c3 | ||
|
|
995e526dae | ||
|
|
00a023620d | ||
|
|
2908a46441 | ||
|
|
e78cdff3d0 | ||
|
|
2a31074fc4 | ||
|
|
d835f72e74 | ||
|
|
a581dea127 | ||
|
|
7b4b5668e2 | ||
|
|
0e2061d838 | ||
|
|
0a579e7f78 | ||
|
|
1ed6c4c086 | ||
|
|
fea83e5c6c | ||
|
|
7c3beb526e | ||
|
|
171deded4e | ||
|
|
94cdc77481 | ||
|
|
1222a555f2 | ||
|
|
f13fc35b9e | ||
|
|
2d61a67592 | ||
|
|
3a4fa6e0e1 | ||
|
|
2edf6bfd07 | ||
|
|
8225f7fb3c | ||
|
|
3d0a2a47b2 | ||
|
|
4a354cd538 | ||
|
|
1a3b16aaa3 | ||
|
|
5b5a7e60af | ||
|
|
d30f05b250 | ||
|
|
6bc7a4eb85 | ||
|
|
608d1f8ef9 | ||
|
|
a792d4a19e | ||
|
|
8941c518fc | ||
|
|
6f53da18b1 | ||
|
|
44280b9006 | ||
|
|
dbababb978 | ||
|
|
238950cb98 | ||
|
|
ee8fa32ff8 | ||
|
|
e7f9606683 | ||
|
|
97be133cee | ||
|
|
aeb8e9d2cd | ||
|
|
b636ae234e | ||
|
|
a3913dbf80 | ||
|
|
2871a6a527 | ||
|
|
d5a3a96bde | ||
|
|
12c438d389 | ||
|
|
280fa3de46 | ||
|
|
d281dabdb4 | ||
|
|
331042edf1 | ||
|
|
669a9ab4c3 | ||
|
|
65e149b2bb | ||
|
|
7c1495ba65 | ||
|
|
13ffa5093c | ||
|
|
a9a810a2b2 | ||
|
|
c9b591f2d3 | ||
|
|
8d6e71d490 | ||
|
|
2823461fe2 | ||
|
|
2075c585da | ||
|
|
01aee62cb0 | ||
|
|
a6ee871f7e | ||
|
|
6393a8186a |
4
.github/workflows/SetPageFileSize.ps1
vendored
4
.github/workflows/SetPageFileSize.ps1
vendored
@@ -11,8 +11,8 @@
|
||||
#>
|
||||
|
||||
param(
|
||||
[System.UInt64] $MinimumSize = 8gb ,
|
||||
[System.UInt64] $MaximumSize = 8gb ,
|
||||
[System.UInt64] $MinimumSize = 16gb ,
|
||||
[System.UInt64] $MaximumSize = 16gb ,
|
||||
[System.String] $DiskRoot = "D:"
|
||||
)
|
||||
|
||||
|
||||
76
.github/workflows/deploy.yaml
vendored
Normal file
76
.github/workflows/deploy.yaml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
name: Build and upload assets
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest, windows-latest, macos-latest ]
|
||||
name: Building, ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Fix CRLF on Windows
|
||||
if: runner.os == 'Windows'
|
||||
run: git config --global core.autocrlf false
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Increase the pagefile size on Windows to aviod running out of memory
|
||||
- name: Increase pagefile size on Windows
|
||||
if: runner.os == 'Windows'
|
||||
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
||||
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- name: Build on Linux
|
||||
if: runner.os == 'Linux'
|
||||
# `-extldflags=-static` - means static link everything,
|
||||
# `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
|
||||
# `-s -w` strips the binary to produce smaller size binaries
|
||||
run: |
|
||||
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ . ./cmd/...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build on Windows
|
||||
if: runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o bin/ . ./cmd/...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
powershell "Compress-Archive bin/* \"${archive}\""
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build on MacOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o ./bin/ . ./cmd/...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Upload release asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ github.event.release.upload_url }}
|
||||
asset_path: "./${{ env.archive }}"
|
||||
asset_name: "${{ env.asset_name }}"
|
||||
asset_content_type: application/zip
|
||||
70
.github/workflows/go.yml
vendored
70
.github/workflows/go.yml
vendored
@@ -1,70 +0,0 @@
|
||||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
# edtited - "title, body, or the base branch of the PR is modified"
|
||||
# synchronize - "commit(s) pushed to the pull request"
|
||||
types: [opened, synchronize, edited, reopened]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-16.04, macos-10.15 ]
|
||||
name: Testing on on ${{ matrix.os }}
|
||||
steps:
|
||||
|
||||
- name: Fix windows CRLF
|
||||
run: git config --global core.autocrlf false
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# We need to increase the page size because the tests run out of memory on github CI windows.
|
||||
# Use the powershell script from this github action: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
|
||||
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
|
||||
- name: Increase page size on windows
|
||||
if: runner.os == 'Windows'
|
||||
shell: powershell
|
||||
run: powershell -command .\.github\workflows\SetPageFileSize.ps1
|
||||
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
|
||||
|
||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||
- name: Go Cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Test
|
||||
shell: bash
|
||||
run: ./build_and_test.sh
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-20.04
|
||||
name: Produce code coverage
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
|
||||
- name: Create coverage file
|
||||
# Because of https://github.com/golang/go/issues/27333 this seem to "fail" even though nothing is wrong, so ignore the failure
|
||||
run: go test -json -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./... || true
|
||||
|
||||
- name: Upload coverage file
|
||||
run: bash <(curl -s https://codecov.io/bash)
|
||||
49
.github/workflows/race.yaml
vendored
Normal file
49
.github/workflows/race.yaml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Race
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
race_test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
branch: [ master, latest ]
|
||||
name: Race detection on ${{ matrix.branch }}
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- name: Set scheduled branch name
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
if [ "${{ matrix.branch }}" == "master" ]; then
|
||||
echo "run_on=master" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ "${{ matrix.branch }}" == "latest" ]; then
|
||||
branch=$(git branch -r | grep 'v\([0-9]\+\.\)\([0-9]\+\.\)\([0-9]\+\)-dev' | sort -Vr | head -1 | xargs)
|
||||
echo "run_on=${branch}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Set manual branch name
|
||||
shell: bash
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: echo "run_on=${{ github.ref }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Test with race detector
|
||||
shell: bash
|
||||
run: |
|
||||
git checkout "${{ env.run_on }}"
|
||||
git status
|
||||
go test -timeout 20m -race ./...
|
||||
98
.github/workflows/tests.yaml
vendored
Normal file
98
.github/workflows/tests.yaml
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
# edtited - because base branch can be modified
|
||||
# synchronize - update commits on PR
|
||||
types: [opened, synchronize, edited]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest, macos-latest ]
|
||||
name: Tests, ${{ matrix.os }}
|
||||
steps:
|
||||
|
||||
- name: Fix CRLF on Windows
|
||||
if: runner.os == 'Windows'
|
||||
run: git config --global core.autocrlf false
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Increase the pagefile size on Windows to aviod running out of memory
|
||||
- name: Increase pagefile size on Windows
|
||||
if: runner.os == 'Windows'
|
||||
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
|
||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||
- name: Go Cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Test
|
||||
shell: bash
|
||||
run: ./build_and_test.sh -v
|
||||
|
||||
|
||||
stability-test-fast:
|
||||
runs-on: ubuntu-latest
|
||||
name: Fast stability tests, ${{ github.head_ref }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install kaspad
|
||||
run: go install ./...
|
||||
|
||||
- name: Install golint
|
||||
run: go get -u golang.org/x/lint/golint
|
||||
|
||||
- name: Run fast stability tests
|
||||
working-directory: stability-tests
|
||||
run: ./install_and_test.sh
|
||||
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
name: Produce code coverage
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- name: Delete the stability tests from coverage
|
||||
run: rm -r stability-tests
|
||||
|
||||
- name: Create coverage file
|
||||
run: go test -v -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./...
|
||||
|
||||
- name: Upload coverage file
|
||||
run: bash <(curl -s https://codecov.io/bash)
|
||||
@@ -12,8 +12,7 @@ If you want to make a big change it's better to discuss it first by opening an i
|
||||
|
||||
## Pull Request process
|
||||
|
||||
Any pull request should be opened against the development branch of the target version. The development branch format is
|
||||
as follows: `vx.y.z-dev`, for example: `v0.8.5-dev`.
|
||||
Any pull request should be opened against the development branch `dev`.
|
||||
|
||||
All pull requests should pass the checks written in `build_and_test.sh`, so it's recommended to run this script before
|
||||
submitting your PR.
|
||||
13
README.md
13
README.md
@@ -1,16 +1,13 @@
|
||||
|
||||
Kaspad
|
||||
====
|
||||
Warning: This is pre-alpha software. There's no guarantee anything works.
|
||||
====
|
||||
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad)
|
||||
|
||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
||||
|
||||
This project is currently under active development and is in a pre-Alpha state.
|
||||
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
|
||||
This project is currently under active development and is in Beta state.
|
||||
|
||||
## What is kaspa
|
||||
|
||||
@@ -18,7 +15,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.14 or later.
|
||||
Go 1.16 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -56,16 +53,18 @@ $ kaspad
|
||||
```
|
||||
|
||||
## Discord
|
||||
Join our discord server using the following link: https://discord.gg/WmGhhzk
|
||||
Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
|
||||
|
||||
## Issue Tracker
|
||||
|
||||
The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues)
|
||||
is used for this project.
|
||||
|
||||
Issue priorities may be seen at https://github.com/orgs/kaspanet/projects/4
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is a work-in-progress.
|
||||
The [documentation](https://github.com/kaspanet/docs) is a work-in-progress
|
||||
|
||||
## License
|
||||
|
||||
|
||||
50
app/app.go
50
app/app.go
@@ -7,19 +7,22 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/signal"
|
||||
"github.com/kaspanet/kaspad/util/profiling"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/execenv"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/limits"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/signal"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/winservice"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
"github.com/kaspanet/kaspad/util/profiling"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
)
|
||||
|
||||
const (
|
||||
leveldbCacheSizeMiB = 256
|
||||
defaultDataDirname = "datadir2"
|
||||
)
|
||||
|
||||
var desiredLimits = &limits.DesiredLimits{
|
||||
@@ -49,6 +52,7 @@ func StartApp() error {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return err
|
||||
}
|
||||
defer logger.BackendLog.Close()
|
||||
defer panics.HandlePanic(log, "MAIN", nil)
|
||||
|
||||
app := &kaspadApp{cfg: cfg}
|
||||
@@ -83,12 +87,7 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
|
||||
if app.cfg.Profile != "" {
|
||||
profiling.Start(app.cfg.Profile, log)
|
||||
}
|
||||
|
||||
// Perform upgrades to kaspad as new versions require it.
|
||||
if err := doUpgrades(); err != nil {
|
||||
log.Error(err)
|
||||
return err
|
||||
}
|
||||
profiling.TrackHeap(app.cfg.AppDir, log)
|
||||
|
||||
// Return now if an interrupt signal was triggered.
|
||||
if signal.InterruptRequested(interrupt) {
|
||||
@@ -106,7 +105,7 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
|
||||
// Open the database
|
||||
databaseContext, err := openDB(app.cfg)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
log.Errorf("Loading database failed: %+v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -162,15 +161,9 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// doUpgrades performs upgrades to kaspad as new versions require it.
|
||||
// currently it's a placeholder we got from kaspad upstream, that does nothing
|
||||
func doUpgrades() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbPath returns the path to the block database given a database type.
|
||||
func databasePath(cfg *config.Config) string {
|
||||
return filepath.Join(cfg.DataDir, "db")
|
||||
return filepath.Join(cfg.AppDir, defaultDataDirname)
|
||||
}
|
||||
|
||||
func removeDatabase(cfg *config.Config) error {
|
||||
@@ -180,6 +173,17 @@ func removeDatabase(cfg *config.Config) error {
|
||||
|
||||
func openDB(cfg *config.Config) (database.Database, error) {
|
||||
dbPath := databasePath(cfg)
|
||||
|
||||
err := checkDatabaseVersion(dbPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("Loading database from '%s'", dbPath)
|
||||
return ldb.NewLevelDB(dbPath)
|
||||
db, err := ldb.NewLevelDB(dbPath, leveldbCacheSizeMiB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,11 @@ package appmessage
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/pkg/errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@@ -27,13 +31,17 @@ func DomainBlockToMsgBlock(domainBlock *externalapi.DomainBlock) *MsgBlock {
|
||||
func DomainBlockHeaderToBlockHeader(domainBlockHeader externalapi.BlockHeader) *MsgBlockHeader {
|
||||
return &MsgBlockHeader{
|
||||
Version: domainBlockHeader.Version(),
|
||||
ParentHashes: domainBlockHeader.ParentHashes(),
|
||||
Parents: domainBlockHeader.Parents(),
|
||||
HashMerkleRoot: domainBlockHeader.HashMerkleRoot(),
|
||||
AcceptedIDMerkleRoot: domainBlockHeader.AcceptedIDMerkleRoot(),
|
||||
UTXOCommitment: domainBlockHeader.UTXOCommitment(),
|
||||
Timestamp: mstime.UnixMilliseconds(domainBlockHeader.TimeInMilliseconds()),
|
||||
Bits: domainBlockHeader.Bits(),
|
||||
Nonce: domainBlockHeader.Nonce(),
|
||||
BlueScore: domainBlockHeader.BlueScore(),
|
||||
DAAScore: domainBlockHeader.DAAScore(),
|
||||
BlueWork: domainBlockHeader.BlueWork(),
|
||||
PruningPoint: domainBlockHeader.PruningPoint(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,13 +62,17 @@ func MsgBlockToDomainBlock(msgBlock *MsgBlock) *externalapi.DomainBlock {
|
||||
func BlockHeaderToDomainBlockHeader(blockHeader *MsgBlockHeader) externalapi.BlockHeader {
|
||||
return blockheader.NewImmutableBlockHeader(
|
||||
blockHeader.Version,
|
||||
blockHeader.ParentHashes,
|
||||
blockHeader.Parents,
|
||||
blockHeader.HashMerkleRoot,
|
||||
blockHeader.AcceptedIDMerkleRoot,
|
||||
blockHeader.UTXOCommitment,
|
||||
blockHeader.Timestamp.UnixMilliseconds(),
|
||||
blockHeader.Bits,
|
||||
blockHeader.Nonce,
|
||||
blockHeader.DAAScore,
|
||||
blockHeader.BlueScore,
|
||||
blockHeader.BlueWork,
|
||||
blockHeader.PruningPoint,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -83,7 +95,6 @@ func DomainTransactionToMsgTx(domainTransaction *externalapi.DomainTransaction)
|
||||
LockTime: domainTransaction.LockTime,
|
||||
SubnetworkID: domainTransaction.SubnetworkID,
|
||||
Gas: domainTransaction.Gas,
|
||||
PayloadHash: domainTransaction.PayloadHash,
|
||||
Payload: domainTransaction.Payload,
|
||||
}
|
||||
}
|
||||
@@ -100,6 +111,7 @@ func domainTransactionInputToTxIn(domainTransactionInput *externalapi.DomainTran
|
||||
PreviousOutpoint: *domainOutpointToOutpoint(domainTransactionInput.PreviousOutpoint),
|
||||
SignatureScript: domainTransactionInput.SignatureScript,
|
||||
Sequence: domainTransactionInput.Sequence,
|
||||
SigOpCount: domainTransactionInput.SigOpCount,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,7 +145,6 @@ func MsgTxToDomainTransaction(msgTx *MsgTx) *externalapi.DomainTransaction {
|
||||
LockTime: msgTx.LockTime,
|
||||
SubnetworkID: msgTx.SubnetworkID,
|
||||
Gas: msgTx.Gas,
|
||||
PayloadHash: msgTx.PayloadHash,
|
||||
Payload: payload,
|
||||
}
|
||||
}
|
||||
@@ -149,6 +160,7 @@ func txInToDomainTransactionInput(txIn *TxIn) *externalapi.DomainTransactionInpu
|
||||
return &externalapi.DomainTransactionInput{
|
||||
PreviousOutpoint: *outpointToDomainOutpoint(&txIn.PreviousOutpoint), //TODO
|
||||
SignatureScript: txIn.SignatureScript,
|
||||
SigOpCount: txIn.SigOpCount,
|
||||
Sequence: txIn.Sequence,
|
||||
}
|
||||
}
|
||||
@@ -164,18 +176,10 @@ func outpointToDomainOutpoint(outpoint *Outpoint) *externalapi.DomainOutpoint {
|
||||
func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externalapi.DomainTransaction, error) {
|
||||
inputs := make([]*externalapi.DomainTransactionInput, len(rpcTransaction.Inputs))
|
||||
for i, input := range rpcTransaction.Inputs {
|
||||
transactionIDBytes, err := hex.DecodeString(input.PreviousOutpoint.TransactionID)
|
||||
previousOutpoint, err := RPCOutpointToDomainOutpoint(input.PreviousOutpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactionID, err := transactionid.FromBytes(transactionIDBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
previousOutpoint := &externalapi.DomainOutpoint{
|
||||
TransactionID: *transactionID,
|
||||
Index: input.PreviousOutpoint.Index,
|
||||
}
|
||||
signatureScript, err := hex.DecodeString(input.SignatureScript)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -184,6 +188,7 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
||||
PreviousOutpoint: *previousOutpoint,
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: input.Sequence,
|
||||
SigOpCount: input.SigOpCount,
|
||||
}
|
||||
}
|
||||
outputs := make([]*externalapi.DomainTransactionOutput, len(rpcTransaction.Outputs))
|
||||
@@ -198,19 +203,7 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
||||
}
|
||||
}
|
||||
|
||||
subnetworkIDBytes, err := hex.DecodeString(rpcTransaction.SubnetworkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subnetworkID, err := subnetworks.FromBytes(subnetworkIDBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHashBytes, err := hex.DecodeString(rpcTransaction.PayloadHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHash, err := externalapi.NewDomainHashFromByteSlice(payloadHashBytes)
|
||||
subnetworkID, err := subnetworks.FromString(rpcTransaction.SubnetworkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -226,11 +219,40 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
||||
LockTime: rpcTransaction.LockTime,
|
||||
SubnetworkID: *subnetworkID,
|
||||
Gas: rpcTransaction.LockTime,
|
||||
PayloadHash: *payloadHash,
|
||||
Payload: payload,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RPCOutpointToDomainOutpoint converts RPCOutpoint to DomainOutpoint
|
||||
func RPCOutpointToDomainOutpoint(outpoint *RPCOutpoint) (*externalapi.DomainOutpoint, error) {
|
||||
transactionID, err := transactionid.FromString(outpoint.TransactionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &externalapi.DomainOutpoint{
|
||||
TransactionID: *transactionID,
|
||||
Index: outpoint.Index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RPCUTXOEntryToUTXOEntry converts RPCUTXOEntry to UTXOEntry
|
||||
func RPCUTXOEntryToUTXOEntry(entry *RPCUTXOEntry) (externalapi.UTXOEntry, error) {
|
||||
script, err := hex.DecodeString(entry.ScriptPublicKey.Script)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return utxo.NewUTXOEntry(
|
||||
entry.Amount,
|
||||
&externalapi.ScriptPublicKey{
|
||||
Script: script,
|
||||
Version: entry.ScriptPublicKey.Version,
|
||||
},
|
||||
entry.IsCoinbase,
|
||||
entry.BlockDAAScore,
|
||||
), nil
|
||||
}
|
||||
|
||||
// DomainTransactionToRPCTransaction converts DomainTransactions to RPCTransactions
|
||||
func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransaction) *RPCTransaction {
|
||||
inputs := make([]*RPCTransactionInput, len(transaction.Inputs))
|
||||
@@ -245,6 +267,7 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
||||
PreviousOutpoint: previousOutpoint,
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: input.Sequence,
|
||||
SigOpCount: input.SigOpCount,
|
||||
}
|
||||
}
|
||||
outputs := make([]*RPCTransactionOutput, len(transaction.Outputs))
|
||||
@@ -255,8 +278,7 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
||||
ScriptPublicKey: &RPCScriptPublicKey{Script: scriptPublicKey, Version: output.ScriptPublicKey.Version},
|
||||
}
|
||||
}
|
||||
subnetworkID := hex.EncodeToString(transaction.SubnetworkID[:])
|
||||
payloadHash := transaction.PayloadHash.String()
|
||||
subnetworkID := transaction.SubnetworkID.String()
|
||||
payload := hex.EncodeToString(transaction.Payload)
|
||||
return &RPCTransaction{
|
||||
Version: transaction.Version,
|
||||
@@ -265,7 +287,6 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
||||
LockTime: transaction.LockTime,
|
||||
SubnetworkID: subnetworkID,
|
||||
Gas: transaction.LockTime,
|
||||
PayloadHash: payloadHash,
|
||||
Payload: payload,
|
||||
}
|
||||
}
|
||||
@@ -277,22 +298,27 @@ func OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(
|
||||
|
||||
domainOutpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, len(outpointAndUTXOEntryPairs))
|
||||
for i, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs {
|
||||
domainOutpointAndUTXOEntryPairs[i] = &externalapi.OutpointAndUTXOEntryPair{
|
||||
Outpoint: &externalapi.DomainOutpoint{
|
||||
TransactionID: outpointAndUTXOEntryPair.Outpoint.TxID,
|
||||
Index: outpointAndUTXOEntryPair.Outpoint.Index,
|
||||
},
|
||||
UTXOEntry: utxo.NewUTXOEntry(
|
||||
outpointAndUTXOEntryPair.UTXOEntry.Amount,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.BlockBlueScore,
|
||||
),
|
||||
}
|
||||
domainOutpointAndUTXOEntryPairs[i] = outpointAndUTXOEntryPairToDomainOutpointAndUTXOEntryPair(outpointAndUTXOEntryPair)
|
||||
}
|
||||
return domainOutpointAndUTXOEntryPairs
|
||||
}
|
||||
|
||||
func outpointAndUTXOEntryPairToDomainOutpointAndUTXOEntryPair(
|
||||
outpointAndUTXOEntryPair *OutpointAndUTXOEntryPair) *externalapi.OutpointAndUTXOEntryPair {
|
||||
return &externalapi.OutpointAndUTXOEntryPair{
|
||||
Outpoint: &externalapi.DomainOutpoint{
|
||||
TransactionID: outpointAndUTXOEntryPair.Outpoint.TxID,
|
||||
Index: outpointAndUTXOEntryPair.Outpoint.Index,
|
||||
},
|
||||
UTXOEntry: utxo.NewUTXOEntry(
|
||||
outpointAndUTXOEntryPair.UTXOEntry.Amount,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs converts
|
||||
// domain OutpointAndUTXOEntryPairs to OutpointAndUTXOEntryPairs
|
||||
func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
|
||||
@@ -309,9 +335,267 @@ func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
|
||||
Amount: outpointAndUTXOEntryPair.UTXOEntry.Amount(),
|
||||
ScriptPublicKey: outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey(),
|
||||
IsCoinbase: outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase(),
|
||||
BlockBlueScore: outpointAndUTXOEntryPair.UTXOEntry.BlockBlueScore(),
|
||||
BlockDAAScore: outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore(),
|
||||
},
|
||||
}
|
||||
}
|
||||
return domainOutpointAndUTXOEntryPairs
|
||||
}
|
||||
|
||||
// DomainBlockToRPCBlock converts DomainBlocks to RPCBlocks
|
||||
func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock {
|
||||
parents := make([]*RPCBlockLevelParents, len(block.Header.Parents()))
|
||||
for i, blockLevelParents := range block.Header.Parents() {
|
||||
parents[i] = &RPCBlockLevelParents{
|
||||
ParentHashes: hashes.ToStrings(blockLevelParents),
|
||||
}
|
||||
}
|
||||
header := &RPCBlockHeader{
|
||||
Version: uint32(block.Header.Version()),
|
||||
Parents: parents,
|
||||
HashMerkleRoot: block.Header.HashMerkleRoot().String(),
|
||||
AcceptedIDMerkleRoot: block.Header.AcceptedIDMerkleRoot().String(),
|
||||
UTXOCommitment: block.Header.UTXOCommitment().String(),
|
||||
Timestamp: block.Header.TimeInMilliseconds(),
|
||||
Bits: block.Header.Bits(),
|
||||
Nonce: block.Header.Nonce(),
|
||||
DAAScore: block.Header.DAAScore(),
|
||||
BlueScore: block.Header.BlueScore(),
|
||||
BlueWork: block.Header.BlueWork().Text(16),
|
||||
PruningPoint: block.Header.PruningPoint().String(),
|
||||
}
|
||||
transactions := make([]*RPCTransaction, len(block.Transactions))
|
||||
for i, transaction := range block.Transactions {
|
||||
transactions[i] = DomainTransactionToRPCTransaction(transaction)
|
||||
}
|
||||
return &RPCBlock{
|
||||
Header: header,
|
||||
Transactions: transactions,
|
||||
}
|
||||
}
|
||||
|
||||
// RPCBlockToDomainBlock converts `block` into a DomainBlock
|
||||
func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
|
||||
parents := make([]externalapi.BlockLevelParents, len(block.Header.Parents))
|
||||
for i, blockLevelParents := range block.Header.Parents {
|
||||
parents[i] = make(externalapi.BlockLevelParents, len(blockLevelParents.ParentHashes))
|
||||
for j, parentHash := range blockLevelParents.ParentHashes {
|
||||
var err error
|
||||
parents[i][j], err = externalapi.NewDomainHashFromString(parentHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
hashMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.HashMerkleRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
acceptedIDMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.AcceptedIDMerkleRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoCommitment, err := externalapi.NewDomainHashFromString(block.Header.UTXOCommitment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blueWork, success := new(big.Int).SetString(block.Header.BlueWork, 16)
|
||||
if !success {
|
||||
return nil, errors.Errorf("failed to parse blue work: %s", block.Header.BlueWork)
|
||||
}
|
||||
pruningPoint, err := externalapi.NewDomainHashFromString(block.Header.PruningPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header := blockheader.NewImmutableBlockHeader(
|
||||
uint16(block.Header.Version),
|
||||
parents,
|
||||
hashMerkleRoot,
|
||||
acceptedIDMerkleRoot,
|
||||
utxoCommitment,
|
||||
block.Header.Timestamp,
|
||||
block.Header.Bits,
|
||||
block.Header.Nonce,
|
||||
block.Header.DAAScore,
|
||||
block.Header.BlueScore,
|
||||
blueWork,
|
||||
pruningPoint)
|
||||
transactions := make([]*externalapi.DomainTransaction, len(block.Transactions))
|
||||
for i, transaction := range block.Transactions {
|
||||
domainTransaction, err := RPCTransactionToDomainTransaction(transaction)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions[i] = domainTransaction
|
||||
}
|
||||
return &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: transactions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BlockWithTrustedDataToDomainBlockWithTrustedData converts *MsgBlockWithTrustedData to *externalapi.BlockWithTrustedData
|
||||
func BlockWithTrustedDataToDomainBlockWithTrustedData(block *MsgBlockWithTrustedData) *externalapi.BlockWithTrustedData {
|
||||
daaWindow := make([]*externalapi.TrustedDataDataDAAHeader, len(block.DAAWindow))
|
||||
for i, daaBlock := range block.DAAWindow {
|
||||
daaWindow[i] = &externalapi.TrustedDataDataDAAHeader{
|
||||
Header: BlockHeaderToDomainBlockHeader(&daaBlock.Block.Header),
|
||||
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, len(block.GHOSTDAGData))
|
||||
for i, datum := range block.GHOSTDAGData {
|
||||
ghostdagData[i] = &externalapi.BlockGHOSTDAGDataHashPair{
|
||||
Hash: datum.Hash,
|
||||
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(datum.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
return &externalapi.BlockWithTrustedData{
|
||||
Block: MsgBlockToDomainBlock(block.Block),
|
||||
DAAWindow: daaWindow,
|
||||
GHOSTDAGData: ghostdagData,
|
||||
}
|
||||
}
|
||||
|
||||
// TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader converts *TrustedDataDAAHeader to *externalapi.TrustedDataDataDAAHeader
|
||||
func TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(daaBlock *TrustedDataDAAHeader) *externalapi.TrustedDataDataDAAHeader {
|
||||
return &externalapi.TrustedDataDataDAAHeader{
|
||||
Header: BlockHeaderToDomainBlockHeader(daaBlock.Header),
|
||||
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
// GHOSTDAGHashPairToDomainGHOSTDAGHashPair converts *BlockGHOSTDAGDataHashPair to *externalapi.BlockGHOSTDAGDataHashPair
|
||||
func GHOSTDAGHashPairToDomainGHOSTDAGHashPair(datum *BlockGHOSTDAGDataHashPair) *externalapi.BlockGHOSTDAGDataHashPair {
|
||||
return &externalapi.BlockGHOSTDAGDataHashPair{
|
||||
Hash: datum.Hash,
|
||||
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(datum.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
func ghostdagDataToDomainGHOSTDAGData(data *BlockGHOSTDAGData) *externalapi.BlockGHOSTDAGData {
|
||||
bluesAnticoneSizes := make(map[externalapi.DomainHash]externalapi.KType, len(data.BluesAnticoneSizes))
|
||||
for _, pair := range data.BluesAnticoneSizes {
|
||||
bluesAnticoneSizes[*pair.BlueHash] = pair.AnticoneSize
|
||||
}
|
||||
return externalapi.NewBlockGHOSTDAGData(
|
||||
data.BlueScore,
|
||||
data.BlueWork,
|
||||
data.SelectedParent,
|
||||
data.MergeSetBlues,
|
||||
data.MergeSetReds,
|
||||
bluesAnticoneSizes,
|
||||
)
|
||||
}
|
||||
|
||||
func domainGHOSTDAGDataGHOSTDAGData(data *externalapi.BlockGHOSTDAGData) *BlockGHOSTDAGData {
|
||||
bluesAnticoneSizes := make([]*BluesAnticoneSizes, 0, len(data.BluesAnticoneSizes()))
|
||||
for blueHash, anticoneSize := range data.BluesAnticoneSizes() {
|
||||
blueHashCopy := blueHash
|
||||
bluesAnticoneSizes = append(bluesAnticoneSizes, &BluesAnticoneSizes{
|
||||
BlueHash: &blueHashCopy,
|
||||
AnticoneSize: anticoneSize,
|
||||
})
|
||||
}
|
||||
|
||||
return &BlockGHOSTDAGData{
|
||||
BlueScore: data.BlueScore(),
|
||||
BlueWork: data.BlueWork(),
|
||||
SelectedParent: data.SelectedParent(),
|
||||
MergeSetBlues: data.MergeSetBlues(),
|
||||
MergeSetReds: data.MergeSetReds(),
|
||||
BluesAnticoneSizes: bluesAnticoneSizes,
|
||||
}
|
||||
}
|
||||
|
||||
// DomainBlockWithTrustedDataToBlockWithTrustedData converts *externalapi.BlockWithTrustedData to *MsgBlockWithTrustedData
|
||||
func DomainBlockWithTrustedDataToBlockWithTrustedData(block *externalapi.BlockWithTrustedData) *MsgBlockWithTrustedData {
|
||||
daaWindow := make([]*TrustedDataDataDAABlock, len(block.DAAWindow))
|
||||
for i, daaBlock := range block.DAAWindow {
|
||||
daaWindow[i] = &TrustedDataDataDAABlock{
|
||||
Block: &MsgBlock{
|
||||
Header: *DomainBlockHeaderToBlockHeader(daaBlock.Header),
|
||||
},
|
||||
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
ghostdagData := make([]*BlockGHOSTDAGDataHashPair, len(block.GHOSTDAGData))
|
||||
for i, datum := range block.GHOSTDAGData {
|
||||
ghostdagData[i] = &BlockGHOSTDAGDataHashPair{
|
||||
Hash: datum.Hash,
|
||||
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(datum.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
return &MsgBlockWithTrustedData{
|
||||
Block: DomainBlockToMsgBlock(block.Block),
|
||||
DAAScore: block.Block.Header.DAAScore(),
|
||||
DAAWindow: daaWindow,
|
||||
GHOSTDAGData: ghostdagData,
|
||||
}
|
||||
}
|
||||
|
||||
// DomainBlockWithTrustedDataToBlockWithTrustedDataV4 converts a set of *externalapi.DomainBlock, daa window indices and ghostdag data indices
|
||||
// to *MsgBlockWithTrustedDataV4
|
||||
func DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block *externalapi.DomainBlock, daaWindowIndices, ghostdagDataIndices []uint64) *MsgBlockWithTrustedDataV4 {
|
||||
return &MsgBlockWithTrustedDataV4{
|
||||
Block: DomainBlockToMsgBlock(block),
|
||||
DAAWindowIndices: daaWindowIndices,
|
||||
GHOSTDAGDataIndices: ghostdagDataIndices,
|
||||
}
|
||||
}
|
||||
|
||||
// DomainTrustedDataToTrustedData converts *externalapi.BlockWithTrustedData to *MsgBlockWithTrustedData
|
||||
func DomainTrustedDataToTrustedData(domainDAAWindow []*externalapi.TrustedDataDataDAAHeader, domainGHOSTDAGData []*externalapi.BlockGHOSTDAGDataHashPair) *MsgTrustedData {
|
||||
daaWindow := make([]*TrustedDataDAAHeader, len(domainDAAWindow))
|
||||
for i, daaBlock := range domainDAAWindow {
|
||||
daaWindow[i] = &TrustedDataDAAHeader{
|
||||
Header: DomainBlockHeaderToBlockHeader(daaBlock.Header),
|
||||
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
ghostdagData := make([]*BlockGHOSTDAGDataHashPair, len(domainGHOSTDAGData))
|
||||
for i, datum := range domainGHOSTDAGData {
|
||||
ghostdagData[i] = &BlockGHOSTDAGDataHashPair{
|
||||
Hash: datum.Hash,
|
||||
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(datum.GHOSTDAGData),
|
||||
}
|
||||
}
|
||||
|
||||
return &MsgTrustedData{
|
||||
DAAWindow: daaWindow,
|
||||
GHOSTDAGData: ghostdagData,
|
||||
}
|
||||
}
|
||||
|
||||
// MsgPruningPointProofToDomainPruningPointProof converts *MsgPruningPointProof to *externalapi.PruningPointProof
|
||||
func MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage *MsgPruningPointProof) *externalapi.PruningPointProof {
|
||||
headers := make([][]externalapi.BlockHeader, len(pruningPointProofMessage.Headers))
|
||||
for blockLevel, blockLevelParents := range pruningPointProofMessage.Headers {
|
||||
headers[blockLevel] = make([]externalapi.BlockHeader, len(blockLevelParents))
|
||||
for i, header := range blockLevelParents {
|
||||
headers[blockLevel][i] = BlockHeaderToDomainBlockHeader(header)
|
||||
}
|
||||
}
|
||||
return &externalapi.PruningPointProof{
|
||||
Headers: headers,
|
||||
}
|
||||
}
|
||||
|
||||
// DomainPruningPointProofToMsgPruningPointProof converts *externalapi.PruningPointProof to *MsgPruningPointProof
|
||||
func DomainPruningPointProofToMsgPruningPointProof(pruningPointProof *externalapi.PruningPointProof) *MsgPruningPointProof {
|
||||
headers := make([][]*MsgBlockHeader, len(pruningPointProof.Headers))
|
||||
for blockLevel, blockLevelParents := range pruningPointProof.Headers {
|
||||
headers[blockLevel] = make([]*MsgBlockHeader, len(blockLevelParents))
|
||||
for i, header := range blockLevelParents {
|
||||
headers[blockLevel][i] = DomainBlockHeaderToBlockHeader(header)
|
||||
}
|
||||
}
|
||||
return &MsgPruningPointProof{
|
||||
Headers: headers,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,6 +38,10 @@ type RPCError struct {
|
||||
Message string
|
||||
}
|
||||
|
||||
func (err RPCError) Error() string {
|
||||
return err.Message
|
||||
}
|
||||
|
||||
// RPCErrorf formats according to a format specifier and returns the string
|
||||
// as an RPCError.
|
||||
func RPCErrorf(format string, args ...interface{}) *RPCError {
|
||||
|
||||
@@ -45,23 +45,30 @@ const (
|
||||
CmdRequestRelayBlocks
|
||||
CmdInvTransaction
|
||||
CmdRequestTransactions
|
||||
CmdIBDBlock
|
||||
CmdDoneHeaders
|
||||
CmdTransactionNotFound
|
||||
CmdReject
|
||||
CmdHeader
|
||||
CmdRequestNextHeaders
|
||||
CmdRequestPruningPointUTXOSetAndBlock
|
||||
CmdRequestPruningPointUTXOSet
|
||||
CmdPruningPointUTXOSetChunk
|
||||
CmdRequestIBDBlocks
|
||||
CmdUnexpectedPruningPoint
|
||||
CmdRequestPruningPointHash
|
||||
CmdPruningPointHash
|
||||
CmdIBDBlockLocator
|
||||
CmdIBDBlockLocatorHighestHash
|
||||
CmdIBDBlockLocatorHighestHashNotFound
|
||||
CmdBlockHeaders
|
||||
CmdRequestNextPruningPointUTXOSetChunk
|
||||
CmdDonePruningPointUTXOSetChunks
|
||||
CmdBlockWithTrustedData
|
||||
CmdDoneBlocksWithTrustedData
|
||||
CmdRequestPruningPointAndItsAnticone
|
||||
CmdIBDBlock
|
||||
CmdRequestIBDBlocks
|
||||
CmdPruningPoints
|
||||
CmdRequestPruningPointProof
|
||||
CmdPruningPointProof
|
||||
CmdReady
|
||||
CmdTrustedData
|
||||
CmdBlockWithTrustedDataV4
|
||||
|
||||
// rpc
|
||||
CmdGetCurrentNetworkRequestMessage
|
||||
@@ -116,13 +123,35 @@ const (
|
||||
CmdNotifyUTXOsChangedRequestMessage
|
||||
CmdNotifyUTXOsChangedResponseMessage
|
||||
CmdUTXOsChangedNotificationMessage
|
||||
CmdStopNotifyingUTXOsChangedRequestMessage
|
||||
CmdStopNotifyingUTXOsChangedResponseMessage
|
||||
CmdGetUTXOsByAddressesRequestMessage
|
||||
CmdGetUTXOsByAddressesResponseMessage
|
||||
CmdGetBalanceByAddressRequestMessage
|
||||
CmdGetBalanceByAddressResponseMessage
|
||||
CmdGetVirtualSelectedParentBlueScoreRequestMessage
|
||||
CmdGetVirtualSelectedParentBlueScoreResponseMessage
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage
|
||||
CmdVirtualSelectedParentBlueScoreChangedNotificationMessage
|
||||
CmdBanRequestMessage
|
||||
CmdBanResponseMessage
|
||||
CmdUnbanRequestMessage
|
||||
CmdUnbanResponseMessage
|
||||
CmdGetInfoRequestMessage
|
||||
CmdGetInfoResponseMessage
|
||||
CmdNotifyPruningPointUTXOSetOverrideRequestMessage
|
||||
CmdNotifyPruningPointUTXOSetOverrideResponseMessage
|
||||
CmdPruningPointUTXOSetOverrideNotificationMessage
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage
|
||||
CmdEstimateNetworkHashesPerSecondRequestMessage
|
||||
CmdEstimateNetworkHashesPerSecondResponseMessage
|
||||
CmdNotifyVirtualDaaScoreChangedRequestMessage
|
||||
CmdNotifyVirtualDaaScoreChangedResponseMessage
|
||||
CmdVirtualDaaScoreChangedNotificationMessage
|
||||
CmdGetBalancesByAddressesRequestMessage
|
||||
CmdGetBalancesByAddressesResponseMessage
|
||||
)
|
||||
|
||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -131,7 +160,7 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdVerAck: "VerAck",
|
||||
CmdRequestAddresses: "RequestAddresses",
|
||||
CmdAddresses: "Addresses",
|
||||
CmdRequestHeaders: "RequestHeaders",
|
||||
CmdRequestHeaders: "CmdRequestHeaders",
|
||||
CmdBlock: "Block",
|
||||
CmdTx: "Tx",
|
||||
CmdPing: "Ping",
|
||||
@@ -142,23 +171,30 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdRequestRelayBlocks: "RequestRelayBlocks",
|
||||
CmdInvTransaction: "InvTransaction",
|
||||
CmdRequestTransactions: "RequestTransactions",
|
||||
CmdIBDBlock: "IBDBlock",
|
||||
CmdDoneHeaders: "DoneHeaders",
|
||||
CmdTransactionNotFound: "TransactionNotFound",
|
||||
CmdReject: "Reject",
|
||||
CmdHeader: "Header",
|
||||
CmdRequestNextHeaders: "RequestNextHeaders",
|
||||
CmdRequestPruningPointUTXOSetAndBlock: "RequestPruningPointUTXOSetAndBlock",
|
||||
CmdRequestPruningPointUTXOSet: "RequestPruningPointUTXOSet",
|
||||
CmdPruningPointUTXOSetChunk: "PruningPointUTXOSetChunk",
|
||||
CmdRequestIBDBlocks: "RequestIBDBlocks",
|
||||
CmdUnexpectedPruningPoint: "UnexpectedPruningPoint",
|
||||
CmdRequestPruningPointHash: "RequestPruningPointHashHash",
|
||||
CmdPruningPointHash: "PruningPointHash",
|
||||
CmdIBDBlockLocator: "IBDBlockLocator",
|
||||
CmdIBDBlockLocatorHighestHash: "IBDBlockLocatorHighestHash",
|
||||
CmdIBDBlockLocatorHighestHashNotFound: "IBDBlockLocatorHighestHashNotFound",
|
||||
CmdBlockHeaders: "BlockHeaders",
|
||||
CmdRequestNextPruningPointUTXOSetChunk: "RequestNextPruningPointUTXOSetChunk",
|
||||
CmdDonePruningPointUTXOSetChunks: "DonePruningPointUTXOSetChunks",
|
||||
CmdBlockWithTrustedData: "BlockWithTrustedData",
|
||||
CmdDoneBlocksWithTrustedData: "DoneBlocksWithTrustedData",
|
||||
CmdRequestPruningPointAndItsAnticone: "RequestPruningPointAndItsAnticoneHeaders",
|
||||
CmdIBDBlock: "IBDBlock",
|
||||
CmdRequestIBDBlocks: "RequestIBDBlocks",
|
||||
CmdPruningPoints: "PruningPoints",
|
||||
CmdRequestPruningPointProof: "RequestPruningPointProof",
|
||||
CmdPruningPointProof: "PruningPointProof",
|
||||
CmdReady: "Ready",
|
||||
CmdTrustedData: "TrustedData",
|
||||
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
|
||||
}
|
||||
|
||||
// RPCMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -213,13 +249,35 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdNotifyUTXOsChangedRequestMessage: "NotifyUTXOsChangedRequest",
|
||||
CmdNotifyUTXOsChangedResponseMessage: "NotifyUTXOsChangedResponse",
|
||||
CmdUTXOsChangedNotificationMessage: "UTXOsChangedNotification",
|
||||
CmdStopNotifyingUTXOsChangedRequestMessage: "StopNotifyingUTXOsChangedRequest",
|
||||
CmdStopNotifyingUTXOsChangedResponseMessage: "StopNotifyingUTXOsChangedResponse",
|
||||
CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest",
|
||||
CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse",
|
||||
CmdGetBalanceByAddressRequestMessage: "GetBalanceByAddressRequest",
|
||||
CmdGetBalanceByAddressResponseMessage: "GetBalancesByAddressResponse",
|
||||
CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest",
|
||||
CmdGetVirtualSelectedParentBlueScoreResponseMessage: "GetVirtualSelectedParentBlueScoreResponse",
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest",
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage: "NotifyVirtualSelectedParentBlueScoreChangedResponse",
|
||||
CmdVirtualSelectedParentBlueScoreChangedNotificationMessage: "VirtualSelectedParentBlueScoreChangedNotification",
|
||||
CmdBanRequestMessage: "BanRequest",
|
||||
CmdBanResponseMessage: "BanResponse",
|
||||
CmdUnbanRequestMessage: "UnbanRequest",
|
||||
CmdUnbanResponseMessage: "UnbanResponse",
|
||||
CmdGetInfoRequestMessage: "GetInfoRequest",
|
||||
CmdGetInfoResponseMessage: "GeInfoResponse",
|
||||
CmdNotifyPruningPointUTXOSetOverrideRequestMessage: "NotifyPruningPointUTXOSetOverrideRequest",
|
||||
CmdNotifyPruningPointUTXOSetOverrideResponseMessage: "NotifyPruningPointUTXOSetOverrideResponse",
|
||||
CmdPruningPointUTXOSetOverrideNotificationMessage: "PruningPointUTXOSetOverrideNotification",
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: "StopNotifyingPruningPointUTXOSetOverrideRequest",
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage: "StopNotifyingPruningPointUTXOSetOverrideResponse",
|
||||
CmdEstimateNetworkHashesPerSecondRequestMessage: "EstimateNetworkHashesPerSecondRequest",
|
||||
CmdEstimateNetworkHashesPerSecondResponseMessage: "EstimateNetworkHashesPerSecondResponse",
|
||||
CmdNotifyVirtualDaaScoreChangedRequestMessage: "NotifyVirtualDaaScoreChangedRequest",
|
||||
CmdNotifyVirtualDaaScoreChangedResponseMessage: "NotifyVirtualDaaScoreChangedResponse",
|
||||
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
|
||||
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
|
||||
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
|
||||
}
|
||||
|
||||
// Message is an interface that describes a kaspa message. A type that
|
||||
|
||||
@@ -15,19 +15,6 @@ import (
|
||||
// backing array multiple times.
|
||||
const defaultTransactionAlloc = 2048
|
||||
|
||||
// MaxMassAcceptedByBlock is the maximum total transaction mass a block may accept.
|
||||
const MaxMassAcceptedByBlock = 10000000
|
||||
|
||||
// MaxMassPerTx is the maximum total mass a transaction may have.
|
||||
const MaxMassPerTx = MaxMassAcceptedByBlock / 2
|
||||
|
||||
// MaxTxPerBlock is the maximum number of transactions that could
|
||||
// possibly fit into a block.
|
||||
const MaxTxPerBlock = (MaxMassAcceptedByBlock / minTxPayload) + 1
|
||||
|
||||
// MaxBlockParents is the maximum allowed number of parents for block.
|
||||
const MaxBlockParents = 10
|
||||
|
||||
// TxLoc holds locator data for the offset and length of where a transaction is
|
||||
// located within a MsgBlock data buffer.
|
||||
type TxLoc struct {
|
||||
|
||||
@@ -18,16 +18,21 @@ import (
|
||||
|
||||
// TestBlock tests the MsgBlock API.
|
||||
func TestBlock(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
pver := uint32(4)
|
||||
|
||||
// Block 1 header.
|
||||
parentHashes := blockOne.Header.ParentHashes
|
||||
parents := blockOne.Header.Parents
|
||||
hashMerkleRoot := blockOne.Header.HashMerkleRoot
|
||||
acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot
|
||||
utxoCommitment := blockOne.Header.UTXOCommitment
|
||||
bits := blockOne.Header.Bits
|
||||
nonce := blockOne.Header.Nonce
|
||||
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
|
||||
daaScore := blockOne.Header.DAAScore
|
||||
blueScore := blockOne.Header.BlueScore
|
||||
blueWork := blockOne.Header.BlueWork
|
||||
pruningPoint := blockOne.Header.PruningPoint
|
||||
bh := NewBlockHeader(1, parents, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce,
|
||||
daaScore, blueScore, blueWork, pruningPoint)
|
||||
|
||||
// Ensure the command is expected value.
|
||||
wantCmd := MessageCommand(5)
|
||||
@@ -131,7 +136,7 @@ func TestConvertToPartial(t *testing.T) {
|
||||
var blockOne = MsgBlock{
|
||||
Header: MsgBlockHeader{
|
||||
Version: 0,
|
||||
ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
|
||||
Parents: []externalapi.BlockLevelParents{[]*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}},
|
||||
HashMerkleRoot: mainnetGenesisMerkleRoot,
|
||||
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
|
||||
UTXOCommitment: exampleUTXOCommitment,
|
||||
|
||||
@@ -5,13 +5,12 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
|
||||
@@ -39,8 +38,8 @@ type MsgBlockHeader struct {
|
||||
// Version of the block. This is not the same as the protocol version.
|
||||
Version uint16
|
||||
|
||||
// Hashes of the parent block headers in the blockDAG.
|
||||
ParentHashes []*externalapi.DomainHash
|
||||
// Parents are the parent block hashes of the block in the DAG per superblock level.
|
||||
Parents []externalapi.BlockLevelParents
|
||||
|
||||
// HashMerkleRoot is the merkle tree reference to hash of all transactions for the block.
|
||||
HashMerkleRoot *externalapi.DomainHash
|
||||
@@ -60,15 +59,16 @@ type MsgBlockHeader struct {
|
||||
|
||||
// Nonce used to generate the block.
|
||||
Nonce uint64
|
||||
}
|
||||
|
||||
// NumParentBlocks return the number of entries in ParentHashes
|
||||
func (h *MsgBlockHeader) NumParentBlocks() byte {
|
||||
numParents := len(h.ParentHashes)
|
||||
if numParents > math.MaxUint8 {
|
||||
panic(errors.Errorf("number of parents is %d, which is more than one byte can fit", numParents))
|
||||
}
|
||||
return byte(numParents)
|
||||
// DAASCore is the DAA score of the block.
|
||||
DAAScore uint64
|
||||
|
||||
BlueScore uint64
|
||||
|
||||
// BlueWork is the blue work of the block.
|
||||
BlueWork *big.Int
|
||||
|
||||
PruningPoint *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// BlockHash computes the block identifier hash for the given block header.
|
||||
@@ -76,33 +76,27 @@ func (h *MsgBlockHeader) BlockHash() *externalapi.DomainHash {
|
||||
return consensushashing.HeaderHash(BlockHeaderToDomainBlockHeader(h))
|
||||
}
|
||||
|
||||
// IsGenesis returns true iff this block is a genesis block
|
||||
func (h *MsgBlockHeader) IsGenesis() bool {
|
||||
return h.NumParentBlocks() == 0
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (h *MsgBlockHeader) Command() MessageCommand {
|
||||
return CmdHeader
|
||||
}
|
||||
|
||||
// NewBlockHeader returns a new MsgBlockHeader using the provided version, previous
|
||||
// block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the
|
||||
// block with defaults or calclulated values for the remaining fields.
|
||||
func NewBlockHeader(version uint16, parentHashes []*externalapi.DomainHash, hashMerkleRoot *externalapi.DomainHash,
|
||||
acceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce uint64) *MsgBlockHeader {
|
||||
func NewBlockHeader(version uint16, parents []externalapi.BlockLevelParents, hashMerkleRoot *externalapi.DomainHash,
|
||||
acceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce,
|
||||
daaScore, blueScore uint64, blueWork *big.Int, pruningPoint *externalapi.DomainHash) *MsgBlockHeader {
|
||||
|
||||
// Limit the timestamp to one millisecond precision since the protocol
|
||||
// doesn't support better.
|
||||
return &MsgBlockHeader{
|
||||
Version: version,
|
||||
ParentHashes: parentHashes,
|
||||
Parents: parents,
|
||||
HashMerkleRoot: hashMerkleRoot,
|
||||
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
|
||||
UTXOCommitment: utxoCommitment,
|
||||
Timestamp: mstime.Now(),
|
||||
Bits: bits,
|
||||
Nonce: nonce,
|
||||
DAAScore: daaScore,
|
||||
BlueScore: blueScore,
|
||||
BlueWork: blueWork,
|
||||
PruningPoint: pruningPoint,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,33 +5,34 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/kaspanet/kaspad/util/random"
|
||||
)
|
||||
|
||||
// TestBlockHeader tests the MsgBlockHeader API.
|
||||
func TestBlockHeader(t *testing.T) {
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
t.Errorf("random.Uint64: Error generating nonce: %v", err)
|
||||
}
|
||||
nonce := uint64(0xba4d87a69924a93d)
|
||||
|
||||
hashes := []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}
|
||||
parents := []externalapi.BlockLevelParents{[]*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}}
|
||||
|
||||
merkleHash := mainnetGenesisMerkleRoot
|
||||
acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot
|
||||
bits := uint32(0x1d00ffff)
|
||||
bh := NewBlockHeader(1, hashes, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce)
|
||||
daaScore := uint64(123)
|
||||
blueScore := uint64(456)
|
||||
blueWork := big.NewInt(789)
|
||||
pruningPoint := simnetGenesisHash
|
||||
bh := NewBlockHeader(1, parents, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce,
|
||||
daaScore, blueScore, blueWork, pruningPoint)
|
||||
|
||||
// Ensure we get the same data back out.
|
||||
if !reflect.DeepEqual(bh.ParentHashes, hashes) {
|
||||
t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v",
|
||||
spew.Sprint(bh.ParentHashes), spew.Sprint(hashes))
|
||||
if !reflect.DeepEqual(bh.Parents, parents) {
|
||||
t.Errorf("NewBlockHeader: wrong parents - got %v, want %v",
|
||||
spew.Sprint(bh.Parents), spew.Sprint(parents))
|
||||
}
|
||||
if bh.HashMerkleRoot != merkleHash {
|
||||
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
|
||||
@@ -45,44 +46,20 @@ func TestBlockHeader(t *testing.T) {
|
||||
t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v",
|
||||
bh.Nonce, nonce)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsGenesis(t *testing.T) {
|
||||
nonce := uint64(123123) // 0x1e0f3
|
||||
bits := uint32(0x1d00ffff)
|
||||
timestamp := mstime.UnixMilliseconds(0x495fab29000)
|
||||
|
||||
baseBlockHdr := &MsgBlockHeader{
|
||||
Version: 1,
|
||||
ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
|
||||
HashMerkleRoot: mainnetGenesisMerkleRoot,
|
||||
Timestamp: timestamp,
|
||||
Bits: bits,
|
||||
Nonce: nonce,
|
||||
if bh.DAAScore != daaScore {
|
||||
t.Errorf("NewBlockHeader: wrong daaScore - got %v, want %v",
|
||||
bh.DAAScore, daaScore)
|
||||
}
|
||||
genesisBlockHdr := &MsgBlockHeader{
|
||||
Version: 1,
|
||||
ParentHashes: []*externalapi.DomainHash{},
|
||||
HashMerkleRoot: mainnetGenesisMerkleRoot,
|
||||
Timestamp: timestamp,
|
||||
Bits: bits,
|
||||
Nonce: nonce,
|
||||
if bh.BlueScore != blueScore {
|
||||
t.Errorf("NewBlockHeader: wrong blueScore - got %v, want %v",
|
||||
bh.BlueScore, blueScore)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
in *MsgBlockHeader // Block header to encode
|
||||
isGenesis bool // Expected result for call of .IsGenesis
|
||||
}{
|
||||
{genesisBlockHdr, true},
|
||||
{baseBlockHdr, false},
|
||||
if bh.BlueWork != blueWork {
|
||||
t.Errorf("NewBlockHeader: wrong blueWork - got %v, want %v",
|
||||
bh.BlueWork, blueWork)
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
isGenesis := test.in.IsGenesis()
|
||||
if isGenesis != test.isGenesis {
|
||||
t.Errorf("MsgBlockHeader.IsGenesis: #%d got: %t, want: %t",
|
||||
i, isGenesis, test.isGenesis)
|
||||
}
|
||||
if !bh.PruningPoint.Equal(pruningPoint) {
|
||||
t.Errorf("NewBlockHeader: wrong pruningPoint - got %v, want %v",
|
||||
bh.PruningPoint, pruningPoint)
|
||||
}
|
||||
}
|
||||
|
||||
54
app/appmessage/p2p_msgblockwithtrusteddata.go
Normal file
54
app/appmessage/p2p_msgblockwithtrusteddata.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// MsgBlockWithTrustedData represents a kaspa BlockWithTrustedData message
|
||||
type MsgBlockWithTrustedData struct {
|
||||
baseMessage
|
||||
|
||||
Block *MsgBlock
|
||||
DAAScore uint64
|
||||
DAAWindow []*TrustedDataDataDAABlock
|
||||
GHOSTDAGData []*BlockGHOSTDAGDataHashPair
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgBlockWithTrustedData) Command() MessageCommand {
|
||||
return CmdBlockWithTrustedData
|
||||
}
|
||||
|
||||
// NewMsgBlockWithTrustedData returns a new MsgBlockWithTrustedData.
|
||||
func NewMsgBlockWithTrustedData() *MsgBlockWithTrustedData {
|
||||
return &MsgBlockWithTrustedData{}
|
||||
}
|
||||
|
||||
// TrustedDataDataDAABlock is an appmessage representation of externalapi.TrustedDataDataDAABlock
|
||||
type TrustedDataDataDAABlock struct {
|
||||
Block *MsgBlock
|
||||
GHOSTDAGData *BlockGHOSTDAGData
|
||||
}
|
||||
|
||||
// BlockGHOSTDAGData is an appmessage representation of externalapi.BlockGHOSTDAGData
|
||||
type BlockGHOSTDAGData struct {
|
||||
BlueScore uint64
|
||||
BlueWork *big.Int
|
||||
SelectedParent *externalapi.DomainHash
|
||||
MergeSetBlues []*externalapi.DomainHash
|
||||
MergeSetReds []*externalapi.DomainHash
|
||||
BluesAnticoneSizes []*BluesAnticoneSizes
|
||||
}
|
||||
|
||||
// BluesAnticoneSizes is an appmessage representation of the BluesAnticoneSizes part of GHOSTDAG data.
|
||||
type BluesAnticoneSizes struct {
|
||||
BlueHash *externalapi.DomainHash
|
||||
AnticoneSize externalapi.KType
|
||||
}
|
||||
|
||||
// BlockGHOSTDAGDataHashPair is an appmessage representation of externalapi.BlockGHOSTDAGDataHashPair
|
||||
type BlockGHOSTDAGDataHashPair struct {
|
||||
Hash *externalapi.DomainHash
|
||||
GHOSTDAGData *BlockGHOSTDAGData
|
||||
}
|
||||
20
app/appmessage/p2p_msgblockwithtrusteddatav4.go
Normal file
20
app/appmessage/p2p_msgblockwithtrusteddatav4.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package appmessage
|
||||
|
||||
// MsgBlockWithTrustedDataV4 represents a kaspa BlockWithTrustedDataV4 message
|
||||
type MsgBlockWithTrustedDataV4 struct {
|
||||
baseMessage
|
||||
|
||||
Block *MsgBlock
|
||||
DAAWindowIndices []uint64
|
||||
GHOSTDAGDataIndices []uint64
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgBlockWithTrustedDataV4) Command() MessageCommand {
|
||||
return CmdBlockWithTrustedDataV4
|
||||
}
|
||||
|
||||
// NewMsgBlockWithTrustedDataV4 returns a new MsgBlockWithTrustedDataV4.
|
||||
func NewMsgBlockWithTrustedDataV4() *MsgBlockWithTrustedDataV4 {
|
||||
return &MsgBlockWithTrustedDataV4{}
|
||||
}
|
||||
21
app/appmessage/p2p_msgdoneblockswithmetadata.go
Normal file
21
app/appmessage/p2p_msgdoneblockswithmetadata.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package appmessage
|
||||
|
||||
// MsgDoneBlocksWithTrustedData implements the Message interface and represents a kaspa
|
||||
// DoneBlocksWithTrustedData message
|
||||
//
|
||||
// This message has no payload.
|
||||
type MsgDoneBlocksWithTrustedData struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgDoneBlocksWithTrustedData) Command() MessageCommand {
|
||||
return CmdDoneBlocksWithTrustedData
|
||||
}
|
||||
|
||||
// NewMsgDoneBlocksWithTrustedData returns a new kaspa DoneBlocksWithTrustedData message that conforms to the
|
||||
// Message interface.
|
||||
func NewMsgDoneBlocksWithTrustedData() *MsgDoneBlocksWithTrustedData {
|
||||
return &MsgDoneBlocksWithTrustedData{}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package appmessage
|
||||
|
||||
// MsgRequestPruningPointAndItsAnticone represents a kaspa RequestPruningPointAndItsAnticone message
|
||||
type MsgRequestPruningPointAndItsAnticone struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgRequestPruningPointAndItsAnticone) Command() MessageCommand {
|
||||
return CmdRequestPruningPointAndItsAnticone
|
||||
}
|
||||
|
||||
// NewMsgRequestPruningPointAndItsAnticone returns a new MsgRequestPruningPointAndItsAnticone.
|
||||
func NewMsgRequestPruningPointAndItsAnticone() *MsgRequestPruningPointAndItsAnticone {
|
||||
return &MsgRequestPruningPointAndItsAnticone{}
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
// TestIBDBlock tests the MsgIBDBlock API.
|
||||
func TestIBDBlock(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
|
||||
// Block 1 header.
|
||||
parentHashes := blockOne.Header.ParentHashes
|
||||
hashMerkleRoot := blockOne.Header.HashMerkleRoot
|
||||
acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot
|
||||
utxoCommitment := blockOne.Header.UTXOCommitment
|
||||
bits := blockOne.Header.Bits
|
||||
nonce := blockOne.Header.Nonce
|
||||
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
|
||||
|
||||
// Ensure the command is expected value.
|
||||
wantCmd := MessageCommand(15)
|
||||
msg := NewMsgIBDBlock(NewMsgBlock(bh))
|
||||
if cmd := msg.Command(); cmd != wantCmd {
|
||||
t.Errorf("NewMsgIBDBlock: wrong command - got %v want %v",
|
||||
cmd, wantCmd)
|
||||
}
|
||||
|
||||
// Ensure max payload is expected value for latest protocol version.
|
||||
wantPayload := uint32(1024 * 1024 * 32)
|
||||
maxPayload := msg.MaxPayloadLength(pver)
|
||||
if maxPayload != wantPayload {
|
||||
t.Errorf("MaxPayloadLength: wrong max payload length for "+
|
||||
"protocol version %d - got %v, want %v", pver,
|
||||
maxPayload, wantPayload)
|
||||
}
|
||||
|
||||
// Ensure we get the same block header data back out.
|
||||
if !reflect.DeepEqual(&msg.Header, bh) {
|
||||
t.Errorf("NewMsgIBDBlock: wrong block header - got %v, want %v",
|
||||
spew.Sdump(&msg.Header), spew.Sdump(bh))
|
||||
}
|
||||
|
||||
// Ensure transactions are added properly.
|
||||
tx := blockOne.Transactions[0].Copy()
|
||||
msg.AddTransaction(tx)
|
||||
if !reflect.DeepEqual(msg.Transactions, blockOne.Transactions) {
|
||||
t.Errorf("AddTransaction: wrong transactions - got %v, want %v",
|
||||
spew.Sdump(msg.Transactions),
|
||||
spew.Sdump(blockOne.Transactions))
|
||||
}
|
||||
|
||||
// Ensure transactions are properly cleared.
|
||||
msg.ClearTransactions()
|
||||
if len(msg.Transactions) != 0 {
|
||||
t.Errorf("ClearTransactions: wrong transactions - got %v, want %v",
|
||||
len(msg.Transactions), 0)
|
||||
}
|
||||
}
|
||||
16
app/appmessage/p2p_msgibdblocklocatorhighesthashnotfound.go
Normal file
16
app/appmessage/p2p_msgibdblocklocatorhighesthashnotfound.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package appmessage
|
||||
|
||||
// MsgIBDBlockLocatorHighestHashNotFound represents a kaspa BlockLocatorHighestHashNotFound message
|
||||
type MsgIBDBlockLocatorHighestHashNotFound struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgIBDBlockLocatorHighestHashNotFound) Command() MessageCommand {
|
||||
return CmdIBDBlockLocatorHighestHashNotFound
|
||||
}
|
||||
|
||||
// NewMsgIBDBlockLocatorHighestHashNotFound returns a new IBDBlockLocatorHighestHashNotFound message
|
||||
func NewMsgIBDBlockLocatorHighestHashNotFound() *MsgIBDBlockLocatorHighestHashNotFound {
|
||||
return &MsgIBDBlockLocatorHighestHashNotFound{}
|
||||
}
|
||||
@@ -6,17 +6,12 @@ package appmessage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/random"
|
||||
)
|
||||
|
||||
// TestPing tests the MsgPing API against the latest protocol version.
|
||||
func TestPing(t *testing.T) {
|
||||
// Ensure we get the same nonce back out.
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
t.Errorf("random.Uint64: Error generating nonce: %v", err)
|
||||
}
|
||||
nonce := uint64(0x61c2c5535902862)
|
||||
msg := NewMsgPing(nonce)
|
||||
if msg.Nonce != nonce {
|
||||
t.Errorf("NewMsgPing: wrong nonce - got %v, want %v",
|
||||
|
||||
@@ -6,16 +6,11 @@ package appmessage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/random"
|
||||
)
|
||||
|
||||
// TestPongLatest tests the MsgPong API against the latest protocol version.
|
||||
func TestPongLatest(t *testing.T) {
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
t.Errorf("random.Uint64: error generating nonce: %v", err)
|
||||
}
|
||||
nonce := uint64(0x1a05b581a5182c)
|
||||
msg := NewMsgPong(nonce)
|
||||
if msg.Nonce != nonce {
|
||||
t.Errorf("NewMsgPong: wrong nonce - got %v, want %v",
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgPruningPointHashMessage represents a kaspa PruningPointHash message
|
||||
type MsgPruningPointHashMessage struct {
|
||||
baseMessage
|
||||
Hash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgPruningPointHashMessage) Command() MessageCommand {
|
||||
return CmdPruningPointHash
|
||||
}
|
||||
|
||||
// NewPruningPointHashMessage returns a new kaspa PruningPointHash message
|
||||
func NewPruningPointHashMessage(hash *externalapi.DomainHash) *MsgPruningPointHashMessage {
|
||||
return &MsgPruningPointHashMessage{
|
||||
Hash: hash,
|
||||
}
|
||||
}
|
||||
20
app/appmessage/p2p_msgpruningpointproof.go
Normal file
20
app/appmessage/p2p_msgpruningpointproof.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package appmessage
|
||||
|
||||
// MsgPruningPointProof represents a kaspa PruningPointProof message
|
||||
type MsgPruningPointProof struct {
|
||||
baseMessage
|
||||
|
||||
Headers [][]*MsgBlockHeader
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgPruningPointProof) Command() MessageCommand {
|
||||
return CmdPruningPointProof
|
||||
}
|
||||
|
||||
// NewMsgPruningPointProof returns a new MsgPruningPointProof.
|
||||
func NewMsgPruningPointProof(headers [][]*MsgBlockHeader) *MsgPruningPointProof {
|
||||
return &MsgPruningPointProof{
|
||||
Headers: headers,
|
||||
}
|
||||
}
|
||||
20
app/appmessage/p2p_msgpruningpoints.go
Normal file
20
app/appmessage/p2p_msgpruningpoints.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package appmessage
|
||||
|
||||
// MsgPruningPoints represents a kaspa PruningPoints message
|
||||
type MsgPruningPoints struct {
|
||||
baseMessage
|
||||
|
||||
Headers []*MsgBlockHeader
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgPruningPoints) Command() MessageCommand {
|
||||
return CmdPruningPoints
|
||||
}
|
||||
|
||||
// NewMsgPruningPoints returns a new MsgPruningPoints.
|
||||
func NewMsgPruningPoints(headers []*MsgBlockHeader) *MsgPruningPoints {
|
||||
return &MsgPruningPoints{
|
||||
Headers: headers,
|
||||
}
|
||||
}
|
||||
@@ -31,6 +31,6 @@ type OutpointAndUTXOEntryPair struct {
|
||||
type UTXOEntry struct {
|
||||
Amount uint64
|
||||
ScriptPublicKey *externalapi.ScriptPublicKey
|
||||
BlockBlueScore uint64
|
||||
BlockDAAScore uint64
|
||||
IsCoinbase bool
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
// The locator is returned via a locator message (MsgBlockLocator).
|
||||
type MsgRequestBlockLocator struct {
|
||||
baseMessage
|
||||
LowHash *externalapi.DomainHash
|
||||
HighHash *externalapi.DomainHash
|
||||
Limit uint32
|
||||
}
|
||||
@@ -24,9 +23,8 @@ func (msg *MsgRequestBlockLocator) Command() MessageCommand {
|
||||
// NewMsgRequestBlockLocator returns a new RequestBlockLocator message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgRequestBlockLocator(lowHash, highHash *externalapi.DomainHash, limit uint32) *MsgRequestBlockLocator {
|
||||
func NewMsgRequestBlockLocator(highHash *externalapi.DomainHash, limit uint32) *MsgRequestBlockLocator {
|
||||
return &MsgRequestBlockLocator{
|
||||
LowHash: lowHash,
|
||||
HighHash: highHash,
|
||||
Limit: limit,
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestRequestBlockLocator(t *testing.T) {
|
||||
|
||||
// Ensure the command is expected value.
|
||||
wantCmd := MessageCommand(9)
|
||||
msg := NewMsgRequestBlockLocator(highHash, &externalapi.DomainHash{}, 0)
|
||||
msg := NewMsgRequestBlockLocator(highHash, 0)
|
||||
if cmd := msg.Command(); cmd != wantCmd {
|
||||
t.Errorf("NewMsgRequestBlockLocator: wrong command - got %v want %v",
|
||||
cmd, wantCmd)
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// TestRequstIBDBlocks tests the MsgRequestHeaders API.
|
||||
// TestRequstIBDBlocks tests the MsgRequestIBDBlocks API.
|
||||
func TestRequstIBDBlocks(t *testing.T) {
|
||||
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
|
||||
lowHash, err := externalapi.NewDomainHashFromString(hashStr)
|
||||
@@ -27,14 +27,14 @@ func TestRequstIBDBlocks(t *testing.T) {
|
||||
// Ensure we get the same data back out.
|
||||
msg := NewMsgRequstHeaders(lowHash, highHash)
|
||||
if !msg.HighHash.Equal(highHash) {
|
||||
t.Errorf("NewMsgRequstHeaders: wrong high hash - got %v, want %v",
|
||||
t.Errorf("NewMsgRequstIBDBlocks: wrong high hash - got %v, want %v",
|
||||
msg.HighHash, highHash)
|
||||
}
|
||||
|
||||
// Ensure the command is expected value.
|
||||
wantCmd := MessageCommand(4)
|
||||
if cmd := msg.Command(); cmd != wantCmd {
|
||||
t.Errorf("NewMsgRequstHeaders: wrong command - got %v want %v",
|
||||
t.Errorf("NewMsgRequstIBDBlocks: wrong command - got %v want %v",
|
||||
cmd, wantCmd)
|
||||
}
|
||||
}
|
||||
|
||||
16
app/appmessage/p2p_msgrequestpruningpointproof.go
Normal file
16
app/appmessage/p2p_msgrequestpruningpointproof.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package appmessage
|
||||
|
||||
// MsgRequestPruningPointProof represents a kaspa RequestPruningPointProof message
|
||||
type MsgRequestPruningPointProof struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgRequestPruningPointProof) Command() MessageCommand {
|
||||
return CmdRequestPruningPointProof
|
||||
}
|
||||
|
||||
// NewMsgRequestPruningPointProof returns a new MsgRequestPruningPointProof.
|
||||
func NewMsgRequestPruningPointProof() *MsgRequestPruningPointProof {
|
||||
return &MsgRequestPruningPointProof{}
|
||||
}
|
||||
@@ -4,20 +4,20 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgRequestPruningPointUTXOSetAndBlock represents a kaspa RequestPruningPointUTXOSetAndBlock message
|
||||
type MsgRequestPruningPointUTXOSetAndBlock struct {
|
||||
// MsgRequestPruningPointUTXOSet represents a kaspa RequestPruningPointUTXOSet message
|
||||
type MsgRequestPruningPointUTXOSet struct {
|
||||
baseMessage
|
||||
PruningPointHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgRequestPruningPointUTXOSetAndBlock) Command() MessageCommand {
|
||||
return CmdRequestPruningPointUTXOSetAndBlock
|
||||
func (msg *MsgRequestPruningPointUTXOSet) Command() MessageCommand {
|
||||
return CmdRequestPruningPointUTXOSet
|
||||
}
|
||||
|
||||
// NewMsgRequestPruningPointUTXOSetAndBlock returns a new MsgRequestPruningPointUTXOSetAndBlock
|
||||
func NewMsgRequestPruningPointUTXOSetAndBlock(pruningPointHash *externalapi.DomainHash) *MsgRequestPruningPointUTXOSetAndBlock {
|
||||
return &MsgRequestPruningPointUTXOSetAndBlock{
|
||||
// NewMsgRequestPruningPointUTXOSet returns a new MsgRequestPruningPointUTXOSet
|
||||
func NewMsgRequestPruningPointUTXOSet(pruningPointHash *externalapi.DomainHash) *MsgRequestPruningPointUTXOSet {
|
||||
return &MsgRequestPruningPointUTXOSet{
|
||||
PruningPointHash: pruningPointHash,
|
||||
}
|
||||
}
|
||||
|
||||
25
app/appmessage/p2p_msgtrusteddata.go
Normal file
25
app/appmessage/p2p_msgtrusteddata.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package appmessage
|
||||
|
||||
// MsgTrustedData represents a kaspa TrustedData message
|
||||
type MsgTrustedData struct {
|
||||
baseMessage
|
||||
|
||||
DAAWindow []*TrustedDataDAAHeader
|
||||
GHOSTDAGData []*BlockGHOSTDAGDataHashPair
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgTrustedData) Command() MessageCommand {
|
||||
return CmdTrustedData
|
||||
}
|
||||
|
||||
// NewMsgTrustedData returns a new MsgTrustedData.
|
||||
func NewMsgTrustedData() *MsgTrustedData {
|
||||
return &MsgTrustedData{}
|
||||
}
|
||||
|
||||
// TrustedDataDAAHeader is an appmessage representation of externalapi.TrustedDataDataDAAHeader
|
||||
type TrustedDataDAAHeader struct {
|
||||
Header *MsgBlockHeader
|
||||
GHOSTDAGData *BlockGHOSTDAGData
|
||||
}
|
||||
@@ -6,7 +6,6 @@ package appmessage
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"strconv"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@@ -91,16 +90,18 @@ type TxIn struct {
|
||||
PreviousOutpoint Outpoint
|
||||
SignatureScript []byte
|
||||
Sequence uint64
|
||||
SigOpCount byte
|
||||
}
|
||||
|
||||
// NewTxIn returns a new kaspa transaction input with the provided
|
||||
// previous outpoint point and signature script with a default sequence of
|
||||
// MaxTxInSequenceNum.
|
||||
func NewTxIn(prevOut *Outpoint, signatureScript []byte, sequence uint64) *TxIn {
|
||||
func NewTxIn(prevOut *Outpoint, signatureScript []byte, sequence uint64, sigOpCount byte) *TxIn {
|
||||
return &TxIn{
|
||||
PreviousOutpoint: *prevOut,
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: sequence,
|
||||
SigOpCount: sigOpCount,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,7 +134,6 @@ type MsgTx struct {
|
||||
LockTime uint64
|
||||
SubnetworkID externalapi.DomainSubnetworkID
|
||||
Gas uint64
|
||||
PayloadHash externalapi.DomainHash
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
@@ -179,7 +179,6 @@ func (msg *MsgTx) Copy() *MsgTx {
|
||||
LockTime: msg.LockTime,
|
||||
SubnetworkID: msg.SubnetworkID,
|
||||
Gas: msg.Gas,
|
||||
PayloadHash: msg.PayloadHash,
|
||||
}
|
||||
|
||||
if msg.Payload != nil {
|
||||
@@ -209,6 +208,7 @@ func (msg *MsgTx) Copy() *MsgTx {
|
||||
PreviousOutpoint: newOutpoint,
|
||||
SignatureScript: newScript,
|
||||
Sequence: oldTxIn.Sequence,
|
||||
SigOpCount: oldTxIn.SigOpCount,
|
||||
}
|
||||
|
||||
// Finally, append this fully copied txin.
|
||||
@@ -280,18 +280,12 @@ func newMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetworkID *extern
|
||||
txOut = make([]*TxOut, 0, defaultTxInOutAlloc)
|
||||
}
|
||||
|
||||
var payloadHash externalapi.DomainHash
|
||||
if *subnetworkID != subnetworks.SubnetworkIDNative {
|
||||
payloadHash = *hashes.PayloadHash(payload)
|
||||
}
|
||||
|
||||
return &MsgTx{
|
||||
Version: version,
|
||||
TxIn: txIn,
|
||||
TxOut: txOut,
|
||||
SubnetworkID: *subnetworkID,
|
||||
Gas: gas,
|
||||
PayloadHash: payloadHash,
|
||||
Payload: payload,
|
||||
LockTime: lockTime,
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
// TestTx tests the MsgTx API.
|
||||
func TestTx(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
pver := uint32(4)
|
||||
|
||||
txIDStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
|
||||
txID, err := transactionid.FromString(txIDStr)
|
||||
@@ -68,7 +68,7 @@ func TestTx(t *testing.T) {
|
||||
|
||||
// Ensure we get the same transaction input back out.
|
||||
sigScript := []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}
|
||||
txIn := NewTxIn(prevOut, sigScript, constants.MaxTxInSequenceNum)
|
||||
txIn := NewTxIn(prevOut, sigScript, constants.MaxTxInSequenceNum, 1)
|
||||
if !reflect.DeepEqual(&txIn.PreviousOutpoint, prevOut) {
|
||||
t.Errorf("NewTxIn: wrong prev outpoint - got %v, want %v",
|
||||
spew.Sprint(&txIn.PreviousOutpoint),
|
||||
@@ -133,8 +133,8 @@ func TestTx(t *testing.T) {
|
||||
|
||||
// TestTxHash tests the ability to generate the hash of a transaction accurately.
|
||||
func TestTxHashAndID(t *testing.T) {
|
||||
txHash1Str := "4bee9ee495bd93a755de428376bd582a2bb6ec37c041753b711c0606d5745c13"
|
||||
txID1Str := "f868bd20e816256b80eac976821be4589d24d21141bd1cec6e8005d0c16c6881"
|
||||
txHash1Str := "93663e597f6c968d32d229002f76408edf30d6a0151ff679fc729812d8cb2acc"
|
||||
txID1Str := "24079c6d2bdf602fc389cc307349054937744a9c8dc0f07c023e6af0e949a4e7"
|
||||
wantTxID1, err := transactionid.FromString(txID1Str)
|
||||
if err != nil {
|
||||
t.Fatalf("NewTxIDFromStr: %v", err)
|
||||
@@ -185,14 +185,14 @@ func TestTxHashAndID(t *testing.T) {
|
||||
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
|
||||
}
|
||||
|
||||
hash2Str := "cb1bdb4a83d4885535fb3cceb5c96597b7df903db83f0ffcd779d703affd8efd"
|
||||
hash2Str := "8dafd1bec24527d8e3b443ceb0a3b92fffc0d60026317f890b2faf5e9afc177a"
|
||||
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
|
||||
if err != nil {
|
||||
t.Errorf("NewTxIDFromStr: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
id2Str := "ca080073d4ddf5b84443a0964af633f3c70a5b290fd3bc35a7e6f93fd33f9330"
|
||||
id2Str := "89ffb49474637502d9059af38b8a95fc2f0d3baef5c801d7a9b9c8830671b711"
|
||||
wantID2, err := transactionid.FromString(id2Str)
|
||||
if err != nil {
|
||||
t.Errorf("NewTxIDFromStr: %v", err)
|
||||
|
||||
@@ -82,12 +82,12 @@ func (msg *MsgVersion) Command() MessageCommand {
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
|
||||
subnetworkID *externalapi.DomainSubnetworkID) *MsgVersion {
|
||||
subnetworkID *externalapi.DomainSubnetworkID, protocolVersion uint32) *MsgVersion {
|
||||
|
||||
// Limit the timestamp to one millisecond precision since the protocol
|
||||
// doesn't support better.
|
||||
return &MsgVersion{
|
||||
ProtocolVersion: ProtocolVersion,
|
||||
ProtocolVersion: protocolVersion,
|
||||
Network: network,
|
||||
Services: 0,
|
||||
Timestamp: mstime.Now(),
|
||||
|
||||
@@ -15,18 +15,18 @@ import (
|
||||
|
||||
// TestVersion tests the MsgVersion API.
|
||||
func TestVersion(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
pver := uint32(4)
|
||||
|
||||
// Create version message data.
|
||||
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
|
||||
me := NewNetAddress(tcpAddrMe, SFNodeNetwork)
|
||||
me := NewNetAddress(tcpAddrMe)
|
||||
generatedID, err := id.GenerateID()
|
||||
if err != nil {
|
||||
t.Fatalf("id.GenerateID: %s", err)
|
||||
}
|
||||
|
||||
// Ensure we get the correct data back out.
|
||||
msg := NewMsgVersion(me, generatedID, "mainnet", nil)
|
||||
msg := NewMsgVersion(me, generatedID, "mainnet", nil, 4)
|
||||
if msg.ProtocolVersion != pver {
|
||||
t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v",
|
||||
msg.ProtocolVersion, pver)
|
||||
|
||||
@@ -5,8 +5,9 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"net"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
)
|
||||
|
||||
// NetAddress defines information about a peer on the network including the time
|
||||
@@ -15,9 +16,6 @@ type NetAddress struct {
|
||||
// Last time the address was seen.
|
||||
Timestamp mstime.Time
|
||||
|
||||
// Bitfield which identifies the services supported by the address.
|
||||
Services ServiceFlag
|
||||
|
||||
// IP address of the peer.
|
||||
IP net.IP
|
||||
|
||||
@@ -26,17 +24,6 @@ type NetAddress struct {
|
||||
Port uint16
|
||||
}
|
||||
|
||||
// HasService returns whether the specified service is supported by the address.
|
||||
func (na *NetAddress) HasService(service ServiceFlag) bool {
|
||||
return na.Services&service == service
|
||||
}
|
||||
|
||||
// AddService adds service as a supported service by the peer generating the
|
||||
// message.
|
||||
func (na *NetAddress) AddService(service ServiceFlag) {
|
||||
na.Services |= service
|
||||
}
|
||||
|
||||
// TCPAddress converts the NetAddress to *net.TCPAddr
|
||||
func (na *NetAddress) TCPAddress() *net.TCPAddr {
|
||||
return &net.TCPAddr{
|
||||
@@ -47,20 +34,19 @@ func (na *NetAddress) TCPAddress() *net.TCPAddr {
|
||||
|
||||
// NewNetAddressIPPort returns a new NetAddress using the provided IP, port, and
|
||||
// supported services with defaults for the remaining fields.
|
||||
func NewNetAddressIPPort(ip net.IP, port uint16, services ServiceFlag) *NetAddress {
|
||||
return NewNetAddressTimestamp(mstime.Now(), services, ip, port)
|
||||
func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress {
|
||||
return NewNetAddressTimestamp(mstime.Now(), ip, port)
|
||||
}
|
||||
|
||||
// NewNetAddressTimestamp returns a new NetAddress using the provided
|
||||
// timestamp, IP, port, and supported services. The timestamp is rounded to
|
||||
// single millisecond precision.
|
||||
func NewNetAddressTimestamp(
|
||||
timestamp mstime.Time, services ServiceFlag, ip net.IP, port uint16) *NetAddress {
|
||||
timestamp mstime.Time, ip net.IP, port uint16) *NetAddress {
|
||||
// Limit the timestamp to one millisecond precision since the protocol
|
||||
// doesn't support better.
|
||||
na := NetAddress{
|
||||
Timestamp: timestamp,
|
||||
Services: services,
|
||||
IP: ip,
|
||||
Port: port,
|
||||
}
|
||||
@@ -69,6 +55,10 @@ func NewNetAddressTimestamp(
|
||||
|
||||
// NewNetAddress returns a new NetAddress using the provided TCP address and
|
||||
// supported services with defaults for the remaining fields.
|
||||
func NewNetAddress(addr *net.TCPAddr, services ServiceFlag) *NetAddress {
|
||||
return NewNetAddressIPPort(addr.IP, uint16(addr.Port), services)
|
||||
func NewNetAddress(addr *net.TCPAddr) *NetAddress {
|
||||
return NewNetAddressIPPort(addr.IP, uint16(addr.Port))
|
||||
}
|
||||
|
||||
func (na NetAddress) String() string {
|
||||
return na.TCPAddress().String()
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestNetAddress(t *testing.T) {
|
||||
port := 16111
|
||||
|
||||
// Test NewNetAddress.
|
||||
na := NewNetAddress(&net.TCPAddr{IP: ip, Port: port}, 0)
|
||||
na := NewNetAddress(&net.TCPAddr{IP: ip, Port: port})
|
||||
|
||||
// Ensure we get the same ip, port, and services back out.
|
||||
if !na.IP.Equal(ip) {
|
||||
@@ -25,21 +25,4 @@ func TestNetAddress(t *testing.T) {
|
||||
t.Errorf("NetNetAddress: wrong port - got %v, want %v", na.Port,
|
||||
port)
|
||||
}
|
||||
if na.Services != 0 {
|
||||
t.Errorf("NetNetAddress: wrong services - got %v, want %v",
|
||||
na.Services, 0)
|
||||
}
|
||||
if na.HasService(SFNodeNetwork) {
|
||||
t.Errorf("HasService: SFNodeNetwork service is set")
|
||||
}
|
||||
|
||||
// Ensure adding the full service node flag works.
|
||||
na.AddService(SFNodeNetwork)
|
||||
if na.Services != SFNodeNetwork {
|
||||
t.Errorf("AddService: wrong services - got %v, want %v",
|
||||
na.Services, SFNodeNetwork)
|
||||
}
|
||||
if !na.HasService(SFNodeNetwork) {
|
||||
t.Errorf("HasService: SFNodeNetwork service not set")
|
||||
}
|
||||
}
|
||||
|
||||
22
app/appmessage/p2p_ready.go
Normal file
22
app/appmessage/p2p_ready.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package appmessage
|
||||
|
||||
// MsgReady implements the Message interface and represents a kaspa
|
||||
// Ready message. It is used to notify that the peer is ready to receive
|
||||
// messages.
|
||||
//
|
||||
// This message has no payload.
|
||||
type MsgReady struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgReady) Command() MessageCommand {
|
||||
return CmdReady
|
||||
}
|
||||
|
||||
// NewMsgReady returns a new kaspa Ready message that conforms to the
|
||||
// Message interface.
|
||||
func NewMsgReady() *MsgReady {
|
||||
return &MsgReady{}
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// MsgRequestPruningPointHashMessage represents a kaspa RequestPruningPointHashMessage message
|
||||
type MsgRequestPruningPointHashMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgRequestPruningPointHashMessage) Command() MessageCommand {
|
||||
return CmdRequestPruningPointHash
|
||||
}
|
||||
|
||||
// NewMsgRequestPruningPointHashMessage returns a new kaspa RequestPruningPointHash message
|
||||
func NewMsgRequestPruningPointHashMessage() *MsgRequestPruningPointHashMessage {
|
||||
return &MsgRequestPruningPointHashMessage{}
|
||||
}
|
||||
@@ -11,9 +11,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// ProtocolVersion is the latest protocol version this package supports.
|
||||
ProtocolVersion uint32 = 1
|
||||
|
||||
// DefaultServices describes the default services that are supported by
|
||||
// the server.
|
||||
DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF
|
||||
|
||||
39
app/appmessage/rpc_ban.go
Normal file
39
app/appmessage/rpc_ban.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package appmessage
|
||||
|
||||
// BanRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type BanRequestMessage struct {
|
||||
baseMessage
|
||||
|
||||
IP string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *BanRequestMessage) Command() MessageCommand {
|
||||
return CmdBanRequestMessage
|
||||
}
|
||||
|
||||
// NewBanRequestMessage returns an instance of the message
|
||||
func NewBanRequestMessage(ip string) *BanRequestMessage {
|
||||
return &BanRequestMessage{
|
||||
IP: ip,
|
||||
}
|
||||
}
|
||||
|
||||
// BanResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type BanResponseMessage struct {
|
||||
baseMessage
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *BanResponseMessage) Command() MessageCommand {
|
||||
return CmdBanResponseMessage
|
||||
}
|
||||
|
||||
// NewBanResponseMessage returns a instance of the message
|
||||
func NewBanResponseMessage() *BanResponseMessage {
|
||||
return &BanResponseMessage{}
|
||||
}
|
||||
43
app/appmessage/rpc_estimate_network_hashes_per_second.go
Normal file
43
app/appmessage/rpc_estimate_network_hashes_per_second.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package appmessage
|
||||
|
||||
// EstimateNetworkHashesPerSecondRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type EstimateNetworkHashesPerSecondRequestMessage struct {
|
||||
baseMessage
|
||||
StartHash string
|
||||
WindowSize uint32
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *EstimateNetworkHashesPerSecondRequestMessage) Command() MessageCommand {
|
||||
return CmdEstimateNetworkHashesPerSecondRequestMessage
|
||||
}
|
||||
|
||||
// NewEstimateNetworkHashesPerSecondRequestMessage returns a instance of the message
|
||||
func NewEstimateNetworkHashesPerSecondRequestMessage(startHash string, windowSize uint32) *EstimateNetworkHashesPerSecondRequestMessage {
|
||||
return &EstimateNetworkHashesPerSecondRequestMessage{
|
||||
StartHash: startHash,
|
||||
WindowSize: windowSize,
|
||||
}
|
||||
}
|
||||
|
||||
// EstimateNetworkHashesPerSecondResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type EstimateNetworkHashesPerSecondResponseMessage struct {
|
||||
baseMessage
|
||||
NetworkHashesPerSecond uint64
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *EstimateNetworkHashesPerSecondResponseMessage) Command() MessageCommand {
|
||||
return CmdEstimateNetworkHashesPerSecondResponseMessage
|
||||
}
|
||||
|
||||
// NewEstimateNetworkHashesPerSecondResponseMessage returns a instance of the message
|
||||
func NewEstimateNetworkHashesPerSecondResponseMessage(networkHashesPerSecond uint64) *EstimateNetworkHashesPerSecondResponseMessage {
|
||||
return &EstimateNetworkHashesPerSecondResponseMessage{
|
||||
NetworkHashesPerSecond: networkHashesPerSecond,
|
||||
}
|
||||
}
|
||||
41
app/appmessage/rpc_get_balance_by_address.go
Normal file
41
app/appmessage/rpc_get_balance_by_address.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package appmessage
|
||||
|
||||
// GetBalanceByAddressRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetBalanceByAddressRequestMessage struct {
|
||||
baseMessage
|
||||
Address string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetBalanceByAddressRequestMessage) Command() MessageCommand {
|
||||
return CmdGetBalanceByAddressRequestMessage
|
||||
}
|
||||
|
||||
// NewGetBalanceByAddressRequest returns a instance of the message
|
||||
func NewGetBalanceByAddressRequest(address string) *GetBalanceByAddressRequestMessage {
|
||||
return &GetBalanceByAddressRequestMessage{
|
||||
Address: address,
|
||||
}
|
||||
}
|
||||
|
||||
// GetBalanceByAddressResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetBalanceByAddressResponseMessage struct {
|
||||
baseMessage
|
||||
Balance uint64
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetBalanceByAddressResponseMessage) Command() MessageCommand {
|
||||
return CmdGetBalanceByAddressResponseMessage
|
||||
}
|
||||
|
||||
// NewGetBalanceByAddressResponse returns an instance of the message
|
||||
func NewGetBalanceByAddressResponse(Balance uint64) *GetBalanceByAddressResponseMessage {
|
||||
return &GetBalanceByAddressResponseMessage{
|
||||
Balance: Balance,
|
||||
}
|
||||
}
|
||||
47
app/appmessage/rpc_get_balances_by_addresses.go
Normal file
47
app/appmessage/rpc_get_balances_by_addresses.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package appmessage
|
||||
|
||||
// GetBalancesByAddressesRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetBalancesByAddressesRequestMessage struct {
|
||||
baseMessage
|
||||
Addresses []string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetBalancesByAddressesRequestMessage) Command() MessageCommand {
|
||||
return CmdGetBalancesByAddressesRequestMessage
|
||||
}
|
||||
|
||||
// NewGetBalancesByAddressesRequest returns a instance of the message
|
||||
func NewGetBalancesByAddressesRequest(addresses []string) *GetBalancesByAddressesRequestMessage {
|
||||
return &GetBalancesByAddressesRequestMessage{
|
||||
Addresses: addresses,
|
||||
}
|
||||
}
|
||||
|
||||
// BalancesByAddressesEntry represents the balance of some address
|
||||
type BalancesByAddressesEntry struct {
|
||||
Address string
|
||||
Balance uint64
|
||||
}
|
||||
|
||||
// GetBalancesByAddressesResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetBalancesByAddressesResponseMessage struct {
|
||||
baseMessage
|
||||
Entries []*BalancesByAddressesEntry
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetBalancesByAddressesResponseMessage) Command() MessageCommand {
|
||||
return CmdGetBalancesByAddressesResponseMessage
|
||||
}
|
||||
|
||||
// NewGetBalancesByAddressesResponse returns an instance of the message
|
||||
func NewGetBalancesByAddressesResponse(entries []*BalancesByAddressesEntry) *GetBalancesByAddressesResponseMessage {
|
||||
return &GetBalancesByAddressesResponseMessage{
|
||||
Entries: entries,
|
||||
}
|
||||
}
|
||||
@@ -4,8 +4,8 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetBlockRequestMessage struct {
|
||||
baseMessage
|
||||
Hash string
|
||||
IncludeTransactionVerboseData bool
|
||||
Hash string
|
||||
IncludeTransactions bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -14,10 +14,10 @@ func (msg *GetBlockRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlockRequestMessage returns a instance of the message
|
||||
func NewGetBlockRequestMessage(hash string, includeTransactionVerboseData bool) *GetBlockRequestMessage {
|
||||
func NewGetBlockRequestMessage(hash string, includeTransactions bool) *GetBlockRequestMessage {
|
||||
return &GetBlockRequestMessage{
|
||||
Hash: hash,
|
||||
IncludeTransactionVerboseData: includeTransactionVerboseData,
|
||||
Hash: hash,
|
||||
IncludeTransactions: includeTransactions,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func NewGetBlockRequestMessage(hash string, includeTransactionVerboseData bool)
|
||||
// its respective RPC message
|
||||
type GetBlockResponseMessage struct {
|
||||
baseMessage
|
||||
BlockVerboseData *BlockVerboseData
|
||||
Block *RPCBlock
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@@ -39,69 +39,3 @@ func (msg *GetBlockResponseMessage) Command() MessageCommand {
|
||||
func NewGetBlockResponseMessage() *GetBlockResponseMessage {
|
||||
return &GetBlockResponseMessage{}
|
||||
}
|
||||
|
||||
// BlockVerboseData holds verbose data about a block
|
||||
type BlockVerboseData struct {
|
||||
Hash string
|
||||
Version uint16
|
||||
VersionHex string
|
||||
HashMerkleRoot string
|
||||
AcceptedIDMerkleRoot string
|
||||
UTXOCommitment string
|
||||
TxIDs []string
|
||||
TransactionVerboseData []*TransactionVerboseData
|
||||
Time int64
|
||||
Nonce uint64
|
||||
Bits string
|
||||
Difficulty float64
|
||||
ParentHashes []string
|
||||
SelectedParentHash string
|
||||
BlueScore uint64
|
||||
IsHeaderOnly bool
|
||||
}
|
||||
|
||||
// TransactionVerboseData holds verbose data about a transaction
|
||||
type TransactionVerboseData struct {
|
||||
TxID string
|
||||
Hash string
|
||||
Size uint64
|
||||
Version uint16
|
||||
LockTime uint64
|
||||
SubnetworkID string
|
||||
Gas uint64
|
||||
PayloadHash string
|
||||
Payload string
|
||||
TransactionVerboseInputs []*TransactionVerboseInput
|
||||
TransactionVerboseOutputs []*TransactionVerboseOutput
|
||||
BlockHash string
|
||||
Time uint64
|
||||
BlockTime uint64
|
||||
}
|
||||
|
||||
// TransactionVerboseInput holds data about a transaction input
|
||||
type TransactionVerboseInput struct {
|
||||
TxID string
|
||||
OutputIndex uint32
|
||||
ScriptSig *ScriptSig
|
||||
Sequence uint64
|
||||
}
|
||||
|
||||
// ScriptSig holds data about a script signature
|
||||
type ScriptSig struct {
|
||||
Asm string
|
||||
Hex string
|
||||
}
|
||||
|
||||
// TransactionVerboseOutput holds data about a transaction output
|
||||
type TransactionVerboseOutput struct {
|
||||
Value uint64
|
||||
Index uint32
|
||||
ScriptPubKey *ScriptPubKeyResult
|
||||
}
|
||||
|
||||
// ScriptPubKeyResult holds data about a script public key
|
||||
type ScriptPubKeyResult struct {
|
||||
Hex string
|
||||
Type string
|
||||
Address string
|
||||
}
|
||||
|
||||
@@ -27,6 +27,8 @@ type GetBlockDAGInfoResponseMessage struct {
|
||||
VirtualParentHashes []string
|
||||
Difficulty float64
|
||||
PastMedianTime int64
|
||||
PruningPointHash string
|
||||
VirtualDAAScore uint64
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateReque
|
||||
// its respective RPC message
|
||||
type GetBlockTemplateResponseMessage struct {
|
||||
baseMessage
|
||||
MsgBlock *MsgBlock
|
||||
Block *RPCBlock
|
||||
IsSynced bool
|
||||
|
||||
Error *RPCError
|
||||
@@ -35,9 +35,9 @@ func (msg *GetBlockTemplateResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlockTemplateResponseMessage returns a instance of the message
|
||||
func NewGetBlockTemplateResponseMessage(msgBlock *MsgBlock, isSynced bool) *GetBlockTemplateResponseMessage {
|
||||
func NewGetBlockTemplateResponseMessage(block *RPCBlock, isSynced bool) *GetBlockTemplateResponseMessage {
|
||||
return &GetBlockTemplateResponseMessage{
|
||||
MsgBlock: msgBlock,
|
||||
Block: block,
|
||||
IsSynced: isSynced,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetBlocksRequestMessage struct {
|
||||
baseMessage
|
||||
LowHash string
|
||||
IncludeBlockHexes bool
|
||||
IncludeBlockVerboseData bool
|
||||
LowHash string
|
||||
IncludeBlocks bool
|
||||
IncludeTransactions bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -15,11 +15,12 @@ func (msg *GetBlocksRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlocksRequestMessage returns a instance of the message
|
||||
func NewGetBlocksRequestMessage(lowHash string, includeBlockHexes bool, includeBlockVerboseData bool) *GetBlocksRequestMessage {
|
||||
func NewGetBlocksRequestMessage(lowHash string, includeBlocks bool,
|
||||
includeTransactions bool) *GetBlocksRequestMessage {
|
||||
return &GetBlocksRequestMessage{
|
||||
LowHash: lowHash,
|
||||
IncludeBlockHexes: includeBlockHexes,
|
||||
IncludeBlockVerboseData: includeBlockVerboseData,
|
||||
LowHash: lowHash,
|
||||
IncludeBlocks: includeBlocks,
|
||||
IncludeTransactions: includeTransactions,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,9 +28,8 @@ func NewGetBlocksRequestMessage(lowHash string, includeBlockHexes bool, includeB
|
||||
// its respective RPC message
|
||||
type GetBlocksResponseMessage struct {
|
||||
baseMessage
|
||||
BlockHashes []string
|
||||
BlockHexes []string
|
||||
BlockVerboseData []*BlockVerboseData
|
||||
BlockHashes []string
|
||||
Blocks []*RPCBlock
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@@ -40,12 +40,6 @@ func (msg *GetBlocksResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlocksResponseMessage returns a instance of the message
|
||||
func NewGetBlocksResponseMessage(blockHashes []string, blockHexes []string,
|
||||
blockVerboseData []*BlockVerboseData) *GetBlocksResponseMessage {
|
||||
|
||||
return &GetBlocksResponseMessage{
|
||||
BlockHashes: blockHashes,
|
||||
BlockHexes: blockHexes,
|
||||
BlockVerboseData: blockVerboseData,
|
||||
}
|
||||
func NewGetBlocksResponseMessage() *GetBlocksResponseMessage {
|
||||
return &GetBlocksResponseMessage{}
|
||||
}
|
||||
|
||||
42
app/appmessage/rpc_get_info.go
Normal file
42
app/appmessage/rpc_get_info.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package appmessage
|
||||
|
||||
// GetInfoRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetInfoRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetInfoRequestMessage) Command() MessageCommand {
|
||||
return CmdGetInfoRequestMessage
|
||||
}
|
||||
|
||||
// NewGetInfoRequestMessage returns a instance of the message
|
||||
func NewGetInfoRequestMessage() *GetInfoRequestMessage {
|
||||
return &GetInfoRequestMessage{}
|
||||
}
|
||||
|
||||
// GetInfoResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetInfoResponseMessage struct {
|
||||
baseMessage
|
||||
P2PID string
|
||||
MempoolSize uint64
|
||||
ServerVersion string
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetInfoResponseMessage) Command() MessageCommand {
|
||||
return CmdGetInfoResponseMessage
|
||||
}
|
||||
|
||||
// NewGetInfoResponseMessage returns a instance of the message
|
||||
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string) *GetInfoResponseMessage {
|
||||
return &GetInfoResponseMessage{
|
||||
P2PID: p2pID,
|
||||
MempoolSize: mempoolSize,
|
||||
ServerVersion: serverVersion,
|
||||
}
|
||||
}
|
||||
@@ -28,8 +28,8 @@ type GetMempoolEntryResponseMessage struct {
|
||||
|
||||
// MempoolEntry represents a transaction in the mempool.
|
||||
type MempoolEntry struct {
|
||||
Fee uint64
|
||||
TransactionVerboseData *TransactionVerboseData
|
||||
Fee uint64
|
||||
Transaction *RPCTransaction
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -38,11 +38,11 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntryResponseMessage returns a instance of the message
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transactionVerboseData *TransactionVerboseData) *GetMempoolEntryResponseMessage {
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
|
||||
return &GetMempoolEntryResponseMessage{
|
||||
Entry: &MempoolEntry{
|
||||
Fee: fee,
|
||||
TransactionVerboseData: transactionVerboseData,
|
||||
Fee: fee,
|
||||
Transaction: transaction,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *
|
||||
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
|
||||
baseMessage
|
||||
RemovedChainBlockHashes []string
|
||||
AddedChainBlocks []*ChainBlock
|
||||
AddedChainBlockHashes []string
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@@ -35,11 +35,11 @@ func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() Mess
|
||||
}
|
||||
|
||||
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
|
||||
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes []string,
|
||||
addedChainBlocks []*ChainBlock) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
||||
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes,
|
||||
addedChainBlockHashes []string) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
||||
|
||||
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
|
||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||
AddedChainBlocks: addedChainBlocks,
|
||||
AddedChainBlockHashes: addedChainBlockHashes,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func NewNotifyBlockAddedResponseMessage() *NotifyBlockAddedResponseMessage {
|
||||
// its respective RPC message
|
||||
type BlockAddedNotificationMessage struct {
|
||||
baseMessage
|
||||
Block *MsgBlock
|
||||
Block *RPCBlock
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -46,7 +46,7 @@ func (msg *BlockAddedNotificationMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewBlockAddedNotificationMessage returns a instance of the message
|
||||
func NewBlockAddedNotificationMessage(block *MsgBlock) *BlockAddedNotificationMessage {
|
||||
func NewBlockAddedNotificationMessage(block *RPCBlock) *BlockAddedNotificationMessage {
|
||||
return &BlockAddedNotificationMessage{
|
||||
Block: block,
|
||||
}
|
||||
|
||||
83
app/appmessage/rpc_notify_pruning_point_utxo_set_override.go
Normal file
83
app/appmessage/rpc_notify_pruning_point_utxo_set_override.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package appmessage
|
||||
|
||||
// NotifyPruningPointUTXOSetOverrideRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyPruningPointUTXOSetOverrideRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyPruningPointUTXOSetOverrideRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyPruningPointUTXOSetOverrideRequestMessage returns a instance of the message
|
||||
func NewNotifyPruningPointUTXOSetOverrideRequestMessage() *NotifyPruningPointUTXOSetOverrideRequestMessage {
|
||||
return &NotifyPruningPointUTXOSetOverrideRequestMessage{}
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverrideResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyPruningPointUTXOSetOverrideResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyPruningPointUTXOSetOverrideResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideResponseMessage
|
||||
}
|
||||
|
||||
// NewNotifyPruningPointUTXOSetOverrideResponseMessage returns a instance of the message
|
||||
func NewNotifyPruningPointUTXOSetOverrideResponseMessage() *NotifyPruningPointUTXOSetOverrideResponseMessage {
|
||||
return &NotifyPruningPointUTXOSetOverrideResponseMessage{}
|
||||
}
|
||||
|
||||
// PruningPointUTXOSetOverrideNotificationMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type PruningPointUTXOSetOverrideNotificationMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *PruningPointUTXOSetOverrideNotificationMessage) Command() MessageCommand {
|
||||
return CmdPruningPointUTXOSetOverrideNotificationMessage
|
||||
}
|
||||
|
||||
// NewPruningPointUTXOSetOverrideNotificationMessage returns a instance of the message
|
||||
func NewPruningPointUTXOSetOverrideNotificationMessage() *PruningPointUTXOSetOverrideNotificationMessage {
|
||||
return &PruningPointUTXOSetOverrideNotificationMessage{}
|
||||
}
|
||||
|
||||
// StopNotifyingPruningPointUTXOSetOverrideRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingPruningPointUTXOSetOverrideRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingPruningPointUTXOSetOverrideRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideRequestMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingPruningPointUTXOSetOverrideRequestMessage returns a instance of the message
|
||||
func NewStopNotifyingPruningPointUTXOSetOverrideRequestMessage() *StopNotifyingPruningPointUTXOSetOverrideRequestMessage {
|
||||
return &StopNotifyingPruningPointUTXOSetOverrideRequestMessage{}
|
||||
}
|
||||
|
||||
// StopNotifyingPruningPointUTXOSetOverrideResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingPruningPointUTXOSetOverrideResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingPruningPointUTXOSetOverrideResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideResponseMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage returns a instance of the message
|
||||
func NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage() *StopNotifyingPruningPointUTXOSetOverrideResponseMessage {
|
||||
return &StopNotifyingPruningPointUTXOSetOverrideResponseMessage{}
|
||||
}
|
||||
55
app/appmessage/rpc_notify_virtual_daa_score_changed.go
Normal file
55
app/appmessage/rpc_notify_virtual_daa_score_changed.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package appmessage
|
||||
|
||||
// NotifyVirtualDaaScoreChangedRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyVirtualDaaScoreChangedRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyVirtualDaaScoreChangedRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyVirtualDaaScoreChangedRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyVirtualDaaScoreChangedRequestMessage returns a instance of the message
|
||||
func NewNotifyVirtualDaaScoreChangedRequestMessage() *NotifyVirtualDaaScoreChangedRequestMessage {
|
||||
return &NotifyVirtualDaaScoreChangedRequestMessage{}
|
||||
}
|
||||
|
||||
// NotifyVirtualDaaScoreChangedResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyVirtualDaaScoreChangedResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyVirtualDaaScoreChangedResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyVirtualDaaScoreChangedResponseMessage
|
||||
}
|
||||
|
||||
// NewNotifyVirtualDaaScoreChangedResponseMessage returns a instance of the message
|
||||
func NewNotifyVirtualDaaScoreChangedResponseMessage() *NotifyVirtualDaaScoreChangedResponseMessage {
|
||||
return &NotifyVirtualDaaScoreChangedResponseMessage{}
|
||||
}
|
||||
|
||||
// VirtualDaaScoreChangedNotificationMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type VirtualDaaScoreChangedNotificationMessage struct {
|
||||
baseMessage
|
||||
VirtualDaaScore uint64
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *VirtualDaaScoreChangedNotificationMessage) Command() MessageCommand {
|
||||
return CmdVirtualDaaScoreChangedNotificationMessage
|
||||
}
|
||||
|
||||
// NewVirtualDaaScoreChangedNotificationMessage returns a instance of the message
|
||||
func NewVirtualDaaScoreChangedNotificationMessage(
|
||||
virtualDaaScore uint64) *VirtualDaaScoreChangedNotificationMessage {
|
||||
|
||||
return &VirtualDaaScoreChangedNotificationMessage{
|
||||
VirtualDaaScore: virtualDaaScore,
|
||||
}
|
||||
}
|
||||
@@ -38,19 +38,7 @@ func NewNotifyVirtualSelectedParentChainChangedResponseMessage() *NotifyVirtualS
|
||||
type VirtualSelectedParentChainChangedNotificationMessage struct {
|
||||
baseMessage
|
||||
RemovedChainBlockHashes []string
|
||||
AddedChainBlocks []*ChainBlock
|
||||
}
|
||||
|
||||
// ChainBlock represents a DAG chain-block
|
||||
type ChainBlock struct {
|
||||
Hash string
|
||||
AcceptedBlocks []*AcceptedBlock
|
||||
}
|
||||
|
||||
// AcceptedBlock represents a block accepted into the DAG
|
||||
type AcceptedBlock struct {
|
||||
Hash string
|
||||
AcceptedTransactionIDs []string
|
||||
AddedChainBlockHashes []string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -59,11 +47,11 @@ func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() Messa
|
||||
}
|
||||
|
||||
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
|
||||
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes []string,
|
||||
addedChainBlocks []*ChainBlock) *VirtualSelectedParentChainChangedNotificationMessage {
|
||||
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes,
|
||||
addedChainBlocks []string) *VirtualSelectedParentChainChangedNotificationMessage {
|
||||
|
||||
return &VirtualSelectedParentChainChangedNotificationMessage{
|
||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||
AddedChainBlocks: addedChainBlocks,
|
||||
AddedChainBlockHashes: addedChainBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
37
app/appmessage/rpc_stop_notifying_utxos_changed.go
Normal file
37
app/appmessage/rpc_stop_notifying_utxos_changed.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package appmessage
|
||||
|
||||
// StopNotifyingUTXOsChangedRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingUTXOsChangedRequestMessage struct {
|
||||
baseMessage
|
||||
Addresses []string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingUTXOsChangedRequestMessage) Command() MessageCommand {
|
||||
return CmdStopNotifyingUTXOsChangedRequestMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingUTXOsChangedRequestMessage returns a instance of the message
|
||||
func NewStopNotifyingUTXOsChangedRequestMessage(addresses []string) *StopNotifyingUTXOsChangedRequestMessage {
|
||||
return &StopNotifyingUTXOsChangedRequestMessage{
|
||||
Addresses: addresses,
|
||||
}
|
||||
}
|
||||
|
||||
// StopNotifyingUTXOsChangedResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingUTXOsChangedResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingUTXOsChangedResponseMessage) Command() MessageCommand {
|
||||
return CmdStopNotifyingUTXOsChangedResponseMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingUTXOsChangedResponseMessage returns a instance of the message
|
||||
func NewStopNotifyingUTXOsChangedResponseMessage() *StopNotifyingUTXOsChangedResponseMessage {
|
||||
return &StopNotifyingUTXOsChangedResponseMessage{}
|
||||
}
|
||||
@@ -4,7 +4,8 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type SubmitBlockRequestMessage struct {
|
||||
baseMessage
|
||||
Block *MsgBlock
|
||||
Block *RPCBlock
|
||||
AllowNonDAABlocks bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -13,9 +14,10 @@ func (msg *SubmitBlockRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewSubmitBlockRequestMessage returns a instance of the message
|
||||
func NewSubmitBlockRequestMessage(block *MsgBlock) *SubmitBlockRequestMessage {
|
||||
func NewSubmitBlockRequestMessage(block *RPCBlock, allowNonDAABlocks bool) *SubmitBlockRequestMessage {
|
||||
return &SubmitBlockRequestMessage{
|
||||
Block: block,
|
||||
Block: block,
|
||||
AllowNonDAABlocks: allowNonDAABlocks,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,7 +55,51 @@ func (msg *SubmitBlockResponseMessage) Command() MessageCommand {
|
||||
return CmdSubmitBlockResponseMessage
|
||||
}
|
||||
|
||||
// NewSubmitBlockResponseMessage returns a instance of the message
|
||||
// NewSubmitBlockResponseMessage returns an instance of the message
|
||||
func NewSubmitBlockResponseMessage() *SubmitBlockResponseMessage {
|
||||
return &SubmitBlockResponseMessage{}
|
||||
}
|
||||
|
||||
// RPCBlock is a kaspad block representation meant to be
|
||||
// used over RPC
|
||||
type RPCBlock struct {
|
||||
Header *RPCBlockHeader
|
||||
Transactions []*RPCTransaction
|
||||
VerboseData *RPCBlockVerboseData
|
||||
}
|
||||
|
||||
// RPCBlockHeader is a kaspad block header representation meant to be
|
||||
// used over RPC
|
||||
type RPCBlockHeader struct {
|
||||
Version uint32
|
||||
Parents []*RPCBlockLevelParents
|
||||
HashMerkleRoot string
|
||||
AcceptedIDMerkleRoot string
|
||||
UTXOCommitment string
|
||||
Timestamp int64
|
||||
Bits uint32
|
||||
Nonce uint64
|
||||
DAAScore uint64
|
||||
BlueScore uint64
|
||||
BlueWork string
|
||||
PruningPoint string
|
||||
}
|
||||
|
||||
// RPCBlockLevelParents holds parent hashes for one block level
|
||||
type RPCBlockLevelParents struct {
|
||||
ParentHashes []string
|
||||
}
|
||||
|
||||
// RPCBlockVerboseData holds verbose data about a block
|
||||
type RPCBlockVerboseData struct {
|
||||
Hash string
|
||||
Difficulty float64
|
||||
SelectedParentHash string
|
||||
TransactionIDs []string
|
||||
IsHeaderOnly bool
|
||||
BlueScore uint64
|
||||
ChildrenHashes []string
|
||||
MergeSetBluesHashes []string
|
||||
MergeSetRedsHashes []string
|
||||
IsChainBlock bool
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ package appmessage
|
||||
type SubmitTransactionRequestMessage struct {
|
||||
baseMessage
|
||||
Transaction *RPCTransaction
|
||||
AllowOrphan bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -13,9 +14,10 @@ func (msg *SubmitTransactionRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewSubmitTransactionRequestMessage returns a instance of the message
|
||||
func NewSubmitTransactionRequestMessage(transaction *RPCTransaction) *SubmitTransactionRequestMessage {
|
||||
func NewSubmitTransactionRequestMessage(transaction *RPCTransaction, allowOrphan bool) *SubmitTransactionRequestMessage {
|
||||
return &SubmitTransactionRequestMessage{
|
||||
Transaction: transaction,
|
||||
AllowOrphan: allowOrphan,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,8 +51,8 @@ type RPCTransaction struct {
|
||||
LockTime uint64
|
||||
SubnetworkID string
|
||||
Gas uint64
|
||||
PayloadHash string
|
||||
Payload string
|
||||
VerboseData *RPCTransactionVerboseData
|
||||
}
|
||||
|
||||
// RPCTransactionInput is a kaspad transaction input representation
|
||||
@@ -59,6 +61,8 @@ type RPCTransactionInput struct {
|
||||
PreviousOutpoint *RPCOutpoint
|
||||
SignatureScript string
|
||||
Sequence uint64
|
||||
SigOpCount byte
|
||||
VerboseData *RPCTransactionInputVerboseData
|
||||
}
|
||||
|
||||
// RPCScriptPublicKey is a kaspad ScriptPublicKey representation
|
||||
@@ -72,6 +76,7 @@ type RPCScriptPublicKey struct {
|
||||
type RPCTransactionOutput struct {
|
||||
Amount uint64
|
||||
ScriptPublicKey *RPCScriptPublicKey
|
||||
VerboseData *RPCTransactionOutputVerboseData
|
||||
}
|
||||
|
||||
// RPCOutpoint is a kaspad outpoint representation meant to be used
|
||||
@@ -86,6 +91,25 @@ type RPCOutpoint struct {
|
||||
type RPCUTXOEntry struct {
|
||||
Amount uint64
|
||||
ScriptPublicKey *RPCScriptPublicKey
|
||||
BlockBlueScore uint64
|
||||
BlockDAAScore uint64
|
||||
IsCoinbase bool
|
||||
}
|
||||
|
||||
// RPCTransactionVerboseData holds verbose data about a transaction
|
||||
type RPCTransactionVerboseData struct {
|
||||
TransactionID string
|
||||
Hash string
|
||||
Mass uint64
|
||||
BlockHash string
|
||||
BlockTime uint64
|
||||
}
|
||||
|
||||
// RPCTransactionInputVerboseData holds data about a transaction input
|
||||
type RPCTransactionInputVerboseData struct {
|
||||
}
|
||||
|
||||
// RPCTransactionOutputVerboseData holds data about a transaction output
|
||||
type RPCTransactionOutputVerboseData struct {
|
||||
ScriptPublicKeyType string
|
||||
ScriptPublicKeyAddress string
|
||||
}
|
||||
|
||||
39
app/appmessage/rpc_unban.go
Normal file
39
app/appmessage/rpc_unban.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package appmessage
|
||||
|
||||
// UnbanRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type UnbanRequestMessage struct {
|
||||
baseMessage
|
||||
|
||||
IP string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *UnbanRequestMessage) Command() MessageCommand {
|
||||
return CmdUnbanRequestMessage
|
||||
}
|
||||
|
||||
// NewUnbanRequestMessage returns an instance of the message
|
||||
func NewUnbanRequestMessage(ip string) *UnbanRequestMessage {
|
||||
return &UnbanRequestMessage{
|
||||
IP: ip,
|
||||
}
|
||||
}
|
||||
|
||||
// UnbanResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type UnbanResponseMessage struct {
|
||||
baseMessage
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *UnbanResponseMessage) Command() MessageCommand {
|
||||
return CmdUnbanResponseMessage
|
||||
}
|
||||
|
||||
// NewUnbanResponseMessage returns a instance of the message
|
||||
func NewUnbanResponseMessage() *UnbanResponseMessage {
|
||||
return &UnbanResponseMessage{}
|
||||
}
|
||||
@@ -4,23 +4,19 @@ import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
|
||||
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol"
|
||||
"github.com/kaspanet/kaspad/app/rpc"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/dnsseed"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
@@ -50,8 +46,6 @@ func (a *ComponentManager) Start() {
|
||||
panics.Exit(log, fmt.Sprintf("Error starting the net adapter: %+v", err))
|
||||
}
|
||||
|
||||
a.maybeSeedFromDNS()
|
||||
|
||||
a.connectionManager.Start()
|
||||
}
|
||||
|
||||
@@ -72,6 +66,8 @@ func (a *ComponentManager) Stop() {
|
||||
log.Errorf("Error stopping the net adapter: %+v", err)
|
||||
}
|
||||
|
||||
a.protocolManager.Close()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -80,7 +76,16 @@ func (a *ComponentManager) Stop() {
|
||||
func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database, interrupt chan<- struct{}) (
|
||||
*ComponentManager, error) {
|
||||
|
||||
domain, err := domain.New(cfg.ActiveNetParams, db, cfg.IsArchivalNode)
|
||||
consensusConfig := consensus.Config{
|
||||
Params: *cfg.ActiveNetParams,
|
||||
IsArchival: cfg.IsArchivalNode,
|
||||
EnableSanityCheckPruningUTXOSet: cfg.EnableSanityCheckPruningUTXOSet,
|
||||
}
|
||||
mempoolConfig := mempool.DefaultConfig(&consensusConfig.Params)
|
||||
mempoolConfig.MaximumOrphanTransactionCount = cfg.MaxOrphanTxs
|
||||
mempoolConfig.MinimumRelayTransactionFee = cfg.MinRelayTxFee
|
||||
|
||||
domain, err := domain.New(&consensusConfig, mempoolConfig, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -90,14 +95,18 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addressManager, err := addressmanager.New(addressmanager.NewConfig(cfg))
|
||||
addressManager, err := addressmanager.New(addressmanager.NewConfig(cfg), db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var utxoIndex *utxoindex.UTXOIndex
|
||||
if cfg.UTXOIndex {
|
||||
utxoIndex = utxoindex.New(domain.Consensus(), db)
|
||||
utxoIndex, err = utxoindex.New(domain, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("UTXO index started")
|
||||
}
|
||||
|
||||
@@ -143,28 +152,13 @@ func setupRPC(
|
||||
utxoIndex,
|
||||
shutDownChan,
|
||||
)
|
||||
protocolManager.SetOnVirtualChange(rpcManager.NotifyVirtualChange)
|
||||
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
|
||||
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
||||
|
||||
return rpcManager
|
||||
}
|
||||
|
||||
func (a *ComponentManager) maybeSeedFromDNS() {
|
||||
if !a.cfg.DisableDNSSeed {
|
||||
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, appmessage.SFNodeNetwork, false, nil,
|
||||
a.cfg.Lookup, func(addresses []*appmessage.NetAddress) {
|
||||
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
|
||||
// IPs of nodes and not its own IP, we can not know real IP of
|
||||
// source. So we'll take first returned address as source.
|
||||
a.addressManager.AddAddresses(addresses...)
|
||||
})
|
||||
|
||||
dnsseed.SeedFromGRPC(a.cfg.NetParams(), a.cfg.GRPCSeed, appmessage.SFNodeNetwork, false, nil,
|
||||
func(addresses []*appmessage.NetAddress) {
|
||||
a.addressManager.AddAddresses(addresses...)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// P2PNodeID returns the network ID associated with this ComponentManager
|
||||
func (a *ComponentManager) P2PNodeID() *id.ID {
|
||||
return a.netAdapter.ID()
|
||||
|
||||
57
app/db_version.go
Normal file
57
app/db_version.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const currentDatabaseVersion = 1
|
||||
|
||||
func checkDatabaseVersion(dbPath string) (err error) {
|
||||
versionFileName := versionFilePath(dbPath)
|
||||
|
||||
versionBytes, err := os.ReadFile(versionFileName)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) { // If version file doesn't exist, we assume that the database is new
|
||||
return createDatabaseVersionFile(dbPath, versionFileName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
databaseVersion, err := strconv.Atoi(string(versionBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if databaseVersion != currentDatabaseVersion {
|
||||
// TODO: Once there's more then one database version, it might make sense to add upgrade logic at this point
|
||||
return errors.Errorf("Invalid database version %d. Expected version: %d", databaseVersion, currentDatabaseVersion)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createDatabaseVersionFile(dbPath string, versionFileName string) error {
|
||||
err := os.MkdirAll(dbPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
versionFile, err := os.Create(versionFileName)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer versionFile.Close()
|
||||
|
||||
versionString := strconv.Itoa(currentDatabaseVersion)
|
||||
_, err = versionFile.Write([]byte(versionString))
|
||||
return err
|
||||
}
|
||||
|
||||
func versionFilePath(dbPath string) string {
|
||||
dbVersionFileName := path.Join(dbPath, "version")
|
||||
return dbVersionFileName
|
||||
}
|
||||
@@ -7,8 +7,6 @@ package app
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KASD)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
var log = logger.RegisterSubSystem("KASD")
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
// Copyright (c) 2015-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blocklogger
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
)
|
||||
|
||||
var (
|
||||
receivedLogBlocks int64
|
||||
receivedLogTx int64
|
||||
lastBlockLogTime = mstime.Now()
|
||||
mtx sync.Mutex
|
||||
)
|
||||
|
||||
// LogBlock logs a new block blue score as an information message
|
||||
// to show progress to the user. In order to prevent spam, it limits logging to
|
||||
// one message every 10 seconds with duration and totals included.
|
||||
func LogBlock(block *externalapi.DomainBlock) {
|
||||
mtx.Lock()
|
||||
defer mtx.Unlock()
|
||||
|
||||
receivedLogBlocks++
|
||||
receivedLogTx += int64(len(block.Transactions))
|
||||
|
||||
now := mstime.Now()
|
||||
duration := now.Sub(lastBlockLogTime)
|
||||
if duration < time.Second*10 {
|
||||
return
|
||||
}
|
||||
|
||||
// Truncate the duration to 10s of milliseconds.
|
||||
tDuration := duration.Round(10 * time.Millisecond)
|
||||
|
||||
// Log information about new block blue score.
|
||||
blockStr := "blocks"
|
||||
if receivedLogBlocks == 1 {
|
||||
blockStr = "block"
|
||||
}
|
||||
txStr := "transactions"
|
||||
if receivedLogTx == 1 {
|
||||
txStr = "transaction"
|
||||
}
|
||||
|
||||
log.Infof("Processed %d %s in the last %s (%d %s, %s)",
|
||||
receivedLogBlocks, blockStr, tDuration, receivedLogTx,
|
||||
txStr, mstime.UnixMilliseconds(block.Header.TimeInMilliseconds()))
|
||||
|
||||
receivedLogBlocks = 0
|
||||
receivedLogTx = 0
|
||||
lastBlockLogTime = now
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -8,7 +10,18 @@ import (
|
||||
|
||||
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing
|
||||
// to/from routes.
|
||||
const DefaultTimeout = 30 * time.Second
|
||||
const DefaultTimeout = 120 * time.Second
|
||||
|
||||
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
|
||||
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")
|
||||
|
||||
type flowExecuteFunc func(peer *peerpkg.Peer)
|
||||
|
||||
// Flow is a a data structure that is used in order to associate a p2p flow to some route in a router.
|
||||
type Flow struct {
|
||||
Name string
|
||||
ExecuteFunc flowExecuteFunc
|
||||
}
|
||||
|
||||
// FlowInitializeFunc is a function that is used in order to initialize a flow
|
||||
type FlowInitializeFunc func(route *routerpkg.Route, peer *peerpkg.Peer) error
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
package flowcontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
|
||||
"time"
|
||||
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
||||
)
|
||||
|
||||
// OnNewBlock updates the mempool after a new block arrival, and
|
||||
// relays newly unorphaned transactions and possibly rebroadcast
|
||||
// manually added transactions when not in IBD.
|
||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
blockInsertionResult *externalapi.BlockInsertionResult) error {
|
||||
virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
|
||||
hash := consensushashing.BlockHash(block)
|
||||
log.Debugf("OnNewBlock start for block %s", hash)
|
||||
@@ -31,38 +31,54 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
|
||||
|
||||
newBlocks := []*externalapi.DomainBlock{block}
|
||||
newBlockInsertionResults := []*externalapi.BlockInsertionResult{blockInsertionResult}
|
||||
newVirtualChangeSets := []*externalapi.VirtualChangeSet{virtualChangeSet}
|
||||
for _, unorphaningResult := range unorphaningResults {
|
||||
newBlocks = append(newBlocks, unorphaningResult.block)
|
||||
newBlockInsertionResults = append(newBlockInsertionResults, unorphaningResult.blockInsertionResult)
|
||||
newVirtualChangeSets = append(newVirtualChangeSets, unorphaningResult.virtualChangeSet)
|
||||
}
|
||||
|
||||
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
|
||||
for i, newBlock := range newBlocks {
|
||||
blocklogger.LogBlock(block)
|
||||
|
||||
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
|
||||
_, err = f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
||||
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
|
||||
|
||||
if f.onBlockAddedToDAGHandler != nil {
|
||||
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
|
||||
blockInsertionResult = newBlockInsertionResults[i]
|
||||
err := f.onBlockAddedToDAGHandler(newBlock, blockInsertionResult)
|
||||
virtualChangeSet = newVirtualChangeSets[i]
|
||||
err := f.onBlockAddedToDAGHandler(newBlock, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
|
||||
}
|
||||
|
||||
// OnVirtualChange calls the handler function whenever the virtual block changes.
|
||||
func (f *FlowContext) OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
if f.onVirtualChangeHandler != nil && virtualChangeSet != nil {
|
||||
return f.onVirtualChangeHandler(virtualChangeSet)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
|
||||
if f.onPruningPointUTXOSetOverrideHandler != nil {
|
||||
return f.onPruningPointUTXOSetOverrideHandler()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
||||
block *externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error {
|
||||
|
||||
f.updateTransactionsToRebroadcast(block)
|
||||
addedBlocks []*externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error {
|
||||
|
||||
// Don't relay transactions when in IBD.
|
||||
if f.IsIBDRunning() {
|
||||
@@ -71,7 +87,12 @@ func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
||||
|
||||
var txIDsToRebroadcast []*externalapi.DomainTransactionID
|
||||
if f.shouldRebroadcastTransactions() {
|
||||
txIDsToRebroadcast = f.txIDsToRebroadcast()
|
||||
txsToRebroadcast, err := f.Domain().MiningManager().RevalidateHighPriorityTransactions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txIDsToRebroadcast = consensushashing.TransactionIDs(txsToRebroadcast)
|
||||
f.lastRebroadcastTime = time.Now()
|
||||
}
|
||||
|
||||
txIDsToBroadcast := make([]*externalapi.DomainTransactionID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
|
||||
@@ -82,33 +103,29 @@ func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
||||
for i, txID := range txIDsToRebroadcast {
|
||||
txIDsToBroadcast[offset+i] = txID
|
||||
}
|
||||
|
||||
if len(txIDsToBroadcast) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(txIDsToBroadcast) > appmessage.MaxInvPerTxInvMsg {
|
||||
txIDsToBroadcast = txIDsToBroadcast[:appmessage.MaxInvPerTxInvMsg]
|
||||
}
|
||||
inv := appmessage.NewMsgInvTransaction(txIDsToBroadcast)
|
||||
return f.Broadcast(inv)
|
||||
return f.EnqueueTransactionIDsForPropagation(txIDsToBroadcast)
|
||||
}
|
||||
|
||||
// SharedRequestedBlocks returns a *blockrelay.SharedRequestedBlocks for sharing
|
||||
// data about requested blocks between different peers.
|
||||
func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks {
|
||||
func (f *FlowContext) SharedRequestedBlocks() *SharedRequestedBlocks {
|
||||
return f.sharedRequestedBlocks
|
||||
}
|
||||
|
||||
// AddBlock adds the given block to the DAG and propagates it.
|
||||
func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
||||
blockInsertionResult, err := f.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(false, "cannot add header only block")
|
||||
}
|
||||
|
||||
virtualChangeSet, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = f.OnNewBlock(block, blockInsertionResult)
|
||||
err = f.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -148,7 +165,6 @@ func (f *FlowContext) UnsetIBDRunning() {
|
||||
}
|
||||
|
||||
f.ibdPeer = nil
|
||||
log.Infof("IBD finished")
|
||||
}
|
||||
|
||||
// IBDPeer returns the current IBD peer or null if the node is not
|
||||
|
||||
@@ -18,14 +18,19 @@ import (
|
||||
func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error) {
|
||||
isErrRouteClosed := errors.Is(err, router.ErrRouteClosed)
|
||||
if !isErrRouteClosed {
|
||||
if protocolErr := &(protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Errorf("error from %s: %+v", flowName, err)
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
}
|
||||
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
errChan <- err
|
||||
}
|
||||
}
|
||||
|
||||
// IsRecoverableError returns whether the error is recoverable
|
||||
func (*FlowContext) IsRecoverableError(err error) bool {
|
||||
return err == nil || errors.Is(err, router.ErrRouteClosed) || errors.As(err, &protocolerrors.ProtocolError{})
|
||||
}
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
package flowcontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
@@ -21,7 +20,14 @@ import (
|
||||
|
||||
// OnBlockAddedToDAGHandler is a handler function that's triggered
|
||||
// when a block is added to the DAG
|
||||
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
|
||||
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
|
||||
// OnVirtualChangeHandler is a handler function that's triggered when the virtual changes
|
||||
type OnVirtualChangeHandler func(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
|
||||
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
type OnPruningPointUTXOSetOverrideHandler func() error
|
||||
|
||||
// OnTransactionAddedToMempoolHandler is a handler function that's triggered
|
||||
// when a transaction is added to the mempool
|
||||
@@ -38,15 +44,15 @@ type FlowContext struct {
|
||||
|
||||
timeStarted int64
|
||||
|
||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||
onVirtualChangeHandler OnVirtualChangeHandler
|
||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
||||
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||
|
||||
transactionsToRebroadcastLock sync.Mutex
|
||||
transactionsToRebroadcast map[externalapi.DomainTransactionID]*externalapi.DomainTransaction
|
||||
lastRebroadcastTime time.Time
|
||||
sharedRequestedTransactions *transactionrelay.SharedRequestedTransactions
|
||||
lastRebroadcastTime time.Time
|
||||
sharedRequestedTransactions *SharedRequestedTransactions
|
||||
|
||||
sharedRequestedBlocks *blockrelay.SharedRequestedBlocks
|
||||
sharedRequestedBlocks *SharedRequestedBlocks
|
||||
|
||||
ibdPeer *peerpkg.Peer
|
||||
ibdPeerMutex sync.RWMutex
|
||||
@@ -56,6 +62,12 @@ type FlowContext struct {
|
||||
|
||||
orphans map[externalapi.DomainHash]*externalapi.DomainBlock
|
||||
orphansMutex sync.RWMutex
|
||||
|
||||
transactionIDsToPropagate []*externalapi.DomainTransactionID
|
||||
lastTransactionIDPropagationTime time.Time
|
||||
transactionIDPropagationLock sync.Mutex
|
||||
|
||||
shutdownChan chan struct{}
|
||||
}
|
||||
|
||||
// New returns a new instance of FlowContext.
|
||||
@@ -63,25 +75,48 @@ func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanage
|
||||
netAdapter *netadapter.NetAdapter, connectionManager *connmanager.ConnectionManager) *FlowContext {
|
||||
|
||||
return &FlowContext{
|
||||
cfg: cfg,
|
||||
netAdapter: netAdapter,
|
||||
domain: domain,
|
||||
addressManager: addressManager,
|
||||
connectionManager: connectionManager,
|
||||
sharedRequestedTransactions: transactionrelay.NewSharedRequestedTransactions(),
|
||||
sharedRequestedBlocks: blockrelay.NewSharedRequestedBlocks(),
|
||||
peers: make(map[id.ID]*peerpkg.Peer),
|
||||
transactionsToRebroadcast: make(map[externalapi.DomainTransactionID]*externalapi.DomainTransaction),
|
||||
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
||||
timeStarted: mstime.Now().UnixMilliseconds(),
|
||||
cfg: cfg,
|
||||
netAdapter: netAdapter,
|
||||
domain: domain,
|
||||
addressManager: addressManager,
|
||||
connectionManager: connectionManager,
|
||||
sharedRequestedTransactions: NewSharedRequestedTransactions(),
|
||||
sharedRequestedBlocks: NewSharedRequestedBlocks(),
|
||||
peers: make(map[id.ID]*peerpkg.Peer),
|
||||
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
||||
timeStarted: mstime.Now().UnixMilliseconds(),
|
||||
transactionIDsToPropagate: []*externalapi.DomainTransactionID{},
|
||||
lastTransactionIDPropagationTime: time.Now(),
|
||||
shutdownChan: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Close signals to all flows the the protocol manager is closed.
|
||||
func (f *FlowContext) Close() {
|
||||
close(f.shutdownChan)
|
||||
}
|
||||
|
||||
// ShutdownChan is a chan where flows can subscribe to shutdown
|
||||
// event.
|
||||
func (f *FlowContext) ShutdownChan() <-chan struct{} {
|
||||
return f.shutdownChan
|
||||
}
|
||||
|
||||
// SetOnVirtualChangeHandler sets the onVirtualChangeHandler handler
|
||||
func (f *FlowContext) SetOnVirtualChangeHandler(onVirtualChangeHandler OnVirtualChangeHandler) {
|
||||
f.onVirtualChangeHandler = onVirtualChangeHandler
|
||||
}
|
||||
|
||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
||||
func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
|
||||
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
||||
func (f *FlowContext) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler) {
|
||||
f.onPruningPointUTXOSetOverrideHandler = onPruningPointUTXOSetOverrideHandler
|
||||
}
|
||||
|
||||
// SetOnTransactionAddedToMempoolHandler sets the onTransactionAddedToMempool handler
|
||||
func (f *FlowContext) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler) {
|
||||
f.onTransactionAddedToMempoolHandler = onTransactionAddedToMempoolHandler
|
||||
|
||||
@@ -2,8 +2,6 @@ package flowcontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
|
||||
@@ -17,8 +17,8 @@ const maxOrphans = 600
|
||||
|
||||
// UnorphaningResult is the result of unorphaning a block
|
||||
type UnorphaningResult struct {
|
||||
block *externalapi.DomainBlock
|
||||
blockInsertionResult *externalapi.BlockInsertionResult
|
||||
block *externalapi.DomainBlock
|
||||
virtualChangeSet *externalapi.VirtualChangeSet
|
||||
}
|
||||
|
||||
// AddOrphan adds the block to the orphan set
|
||||
@@ -73,10 +73,10 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
||||
orphanBlock := f.orphans[orphanHash]
|
||||
|
||||
log.Debugf("Considering to unorphan block %s with parents %s",
|
||||
orphanHash, orphanBlock.Header.ParentHashes())
|
||||
orphanHash, orphanBlock.Header.DirectParents())
|
||||
|
||||
canBeUnorphaned := true
|
||||
for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
|
||||
for _, orphanBlockParentHash := range orphanBlock.Header.DirectParents() {
|
||||
orphanBlockParentInfo, err := f.domain.Consensus().GetBlockInfo(orphanBlockParentHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -90,14 +90,14 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
||||
}
|
||||
}
|
||||
if canBeUnorphaned {
|
||||
blockInsertionResult, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||
virtualChangeSet, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if unorphaningSucceeded {
|
||||
unorphaningResults = append(unorphaningResults, &UnorphaningResult{
|
||||
block: orphanBlock,
|
||||
blockInsertionResult: blockInsertionResult,
|
||||
block: orphanBlock,
|
||||
virtualChangeSet: virtualChangeSet,
|
||||
})
|
||||
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
|
||||
}
|
||||
@@ -133,7 +133,7 @@ func (f *FlowContext) addChildOrphansToProcessQueue(blockHash *externalapi.Domai
|
||||
func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash) []externalapi.DomainHash {
|
||||
var childOrphans []externalapi.DomainHash
|
||||
for orphanHash, orphanBlock := range f.orphans {
|
||||
for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
|
||||
for _, orphanBlockParentHash := range orphanBlock.Header.DirectParents() {
|
||||
if orphanBlockParentHash.Equal(blockHash) {
|
||||
childOrphans = append(childOrphans, orphanHash)
|
||||
break
|
||||
@@ -143,14 +143,14 @@ func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash)
|
||||
return childOrphans
|
||||
}
|
||||
|
||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.BlockInsertionResult, bool, error) {
|
||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.VirtualChangeSet, bool, error) {
|
||||
orphanBlock, ok := f.orphans[orphanHash]
|
||||
if !ok {
|
||||
return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
||||
}
|
||||
delete(f.orphans, orphanHash)
|
||||
|
||||
blockInsertionResult, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock)
|
||||
virtualChangeSet, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
|
||||
@@ -160,7 +160,7 @@ func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externa
|
||||
}
|
||||
|
||||
log.Infof("Unorphaned block %s", orphanHash)
|
||||
return blockInsertionResult, true, nil
|
||||
return virtualChangeSet, true, nil
|
||||
}
|
||||
|
||||
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
|
||||
@@ -194,11 +194,14 @@ func (f *FlowContext) GetOrphanRoots(orphan *externalapi.DomainHash) ([]*externa
|
||||
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
roots = append(roots, current)
|
||||
} else {
|
||||
log.Debugf("Block %s was skipped when checking for orphan roots: "+
|
||||
"exists: %t, status: %s", current, blockInfo.Exists, blockInfo.BlockStatus)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, parent := range block.Header.ParentHashes() {
|
||||
for _, parent := range block.Header.DirectParents() {
|
||||
if !addedToQueueSet.Contains(parent) {
|
||||
queue = append(queue, parent)
|
||||
addedToQueueSet.Add(parent)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockrelay
|
||||
package flowcontext
|
||||
|
||||
import (
|
||||
"sync"
|
||||
@@ -13,13 +13,15 @@ type SharedRequestedBlocks struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (s *SharedRequestedBlocks) remove(hash *externalapi.DomainHash) {
|
||||
// Remove removes a block from the set.
|
||||
func (s *SharedRequestedBlocks) Remove(hash *externalapi.DomainHash) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
delete(s.blocks, *hash)
|
||||
}
|
||||
|
||||
func (s *SharedRequestedBlocks) removeSet(blockHashes map[externalapi.DomainHash]struct{}) {
|
||||
// RemoveSet removes a set of blocks from the set.
|
||||
func (s *SharedRequestedBlocks) RemoveSet(blockHashes map[externalapi.DomainHash]struct{}) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for hash := range blockHashes {
|
||||
@@ -27,7 +29,8 @@ func (s *SharedRequestedBlocks) removeSet(blockHashes map[externalapi.DomainHash
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SharedRequestedBlocks) addIfNotExists(hash *externalapi.DomainHash) (exists bool) {
|
||||
// AddIfNotExists adds a block to the set if it doesn't exist yet.
|
||||
func (s *SharedRequestedBlocks) AddIfNotExists(hash *externalapi.DomainHash) (exists bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
_, ok := s.blocks[*hash]
|
||||
@@ -1,4 +1,4 @@
|
||||
package transactionrelay
|
||||
package flowcontext
|
||||
|
||||
import (
|
||||
"sync"
|
||||
@@ -13,13 +13,15 @@ type SharedRequestedTransactions struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (s *SharedRequestedTransactions) remove(txID *externalapi.DomainTransactionID) {
|
||||
// Remove removes a transaction from the set.
|
||||
func (s *SharedRequestedTransactions) Remove(txID *externalapi.DomainTransactionID) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
delete(s.transactions, *txID)
|
||||
}
|
||||
|
||||
func (s *SharedRequestedTransactions) removeMany(txIDs []*externalapi.DomainTransactionID) {
|
||||
// RemoveMany removes a set of transactions from the set.
|
||||
func (s *SharedRequestedTransactions) RemoveMany(txIDs []*externalapi.DomainTransactionID) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for _, txID := range txIDs {
|
||||
@@ -27,7 +29,8 @@ func (s *SharedRequestedTransactions) removeMany(txIDs []*externalapi.DomainTran
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SharedRequestedTransactions) addIfNotExists(txID *externalapi.DomainTransactionID) (exists bool) {
|
||||
// AddIfNotExists adds a transaction to the set if it doesn't exist yet.
|
||||
func (s *SharedRequestedTransactions) AddIfNotExists(txID *externalapi.DomainTransactionID) (exists bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
_, ok := s.transactions[*txID]
|
||||
@@ -3,7 +3,7 @@ package flowcontext
|
||||
import "github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
const (
|
||||
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 300_000
|
||||
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
|
||||
)
|
||||
|
||||
// ShouldMine returns whether it's ok to use block template from this node
|
||||
@@ -25,6 +25,10 @@ func (f *FlowContext) ShouldMine() (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if virtualSelectedParent.Equal(f.Config().NetParams().GenesisHash) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
||||
@@ -4,36 +4,22 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
)
|
||||
|
||||
// AddTransaction adds transaction to the mempool and propagates it.
|
||||
func (f *FlowContext) AddTransaction(tx *externalapi.DomainTransaction) error {
|
||||
f.transactionsToRebroadcastLock.Lock()
|
||||
defer f.transactionsToRebroadcastLock.Unlock()
|
||||
// TransactionIDPropagationInterval is the interval between transaction IDs propagations
|
||||
const TransactionIDPropagationInterval = 500 * time.Millisecond
|
||||
|
||||
err := f.Domain().MiningManager().ValidateAndInsertTransaction(tx, false)
|
||||
// AddTransaction adds transaction to the mempool and propagates it.
|
||||
func (f *FlowContext) AddTransaction(tx *externalapi.DomainTransaction, allowOrphan bool) error {
|
||||
acceptedTransactions, err := f.Domain().MiningManager().ValidateAndInsertTransaction(tx, true, allowOrphan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
transactionID := consensushashing.TransactionID(tx)
|
||||
f.transactionsToRebroadcast[*transactionID] = tx
|
||||
inv := appmessage.NewMsgInvTransaction([]*externalapi.DomainTransactionID{transactionID})
|
||||
return f.Broadcast(inv)
|
||||
}
|
||||
|
||||
func (f *FlowContext) updateTransactionsToRebroadcast(block *externalapi.DomainBlock) {
|
||||
f.transactionsToRebroadcastLock.Lock()
|
||||
defer f.transactionsToRebroadcastLock.Unlock()
|
||||
// Note: if the block is red, its transactions won't be rebroadcasted
|
||||
// anymore, although they are not included in the UTXO set.
|
||||
// This is probably ok, since red blocks are quite rare.
|
||||
for _, tx := range block.Transactions {
|
||||
delete(f.transactionsToRebroadcast, *consensushashing.TransactionID(tx))
|
||||
}
|
||||
acceptedTransactionIDs := consensushashing.TransactionIDs(acceptedTransactions)
|
||||
return f.EnqueueTransactionIDsForPropagation(acceptedTransactionIDs)
|
||||
}
|
||||
|
||||
func (f *FlowContext) shouldRebroadcastTransactions() bool {
|
||||
@@ -41,22 +27,9 @@ func (f *FlowContext) shouldRebroadcastTransactions() bool {
|
||||
return time.Since(f.lastRebroadcastTime) > rebroadcastInterval
|
||||
}
|
||||
|
||||
func (f *FlowContext) txIDsToRebroadcast() []*externalapi.DomainTransactionID {
|
||||
f.transactionsToRebroadcastLock.Lock()
|
||||
defer f.transactionsToRebroadcastLock.Unlock()
|
||||
|
||||
txIDs := make([]*externalapi.DomainTransactionID, len(f.transactionsToRebroadcast))
|
||||
i := 0
|
||||
for _, tx := range f.transactionsToRebroadcast {
|
||||
txIDs[i] = consensushashing.TransactionID(tx)
|
||||
i++
|
||||
}
|
||||
return txIDs
|
||||
}
|
||||
|
||||
// SharedRequestedTransactions returns a *transactionrelay.SharedRequestedTransactions for sharing
|
||||
// data about requested transactions between different peers.
|
||||
func (f *FlowContext) SharedRequestedTransactions() *transactionrelay.SharedRequestedTransactions {
|
||||
func (f *FlowContext) SharedRequestedTransactions() *SharedRequestedTransactions {
|
||||
return f.sharedRequestedTransactions
|
||||
}
|
||||
|
||||
@@ -67,3 +40,42 @@ func (f *FlowContext) OnTransactionAddedToMempool() {
|
||||
f.onTransactionAddedToMempoolHandler()
|
||||
}
|
||||
}
|
||||
|
||||
// EnqueueTransactionIDsForPropagation add the given transactions IDs to a set of IDs to
|
||||
// propagate. The IDs will be broadcast to all peers within a single transaction Inv message.
|
||||
// The broadcast itself may happen only during a subsequent call to this method
|
||||
func (f *FlowContext) EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error {
|
||||
f.transactionIDPropagationLock.Lock()
|
||||
defer f.transactionIDPropagationLock.Unlock()
|
||||
|
||||
f.transactionIDsToPropagate = append(f.transactionIDsToPropagate, transactionIDs...)
|
||||
|
||||
return f.maybePropagateTransactions()
|
||||
}
|
||||
|
||||
func (f *FlowContext) maybePropagateTransactions() error {
|
||||
if time.Since(f.lastTransactionIDPropagationTime) < TransactionIDPropagationInterval &&
|
||||
len(f.transactionIDsToPropagate) < appmessage.MaxInvPerTxInvMsg {
|
||||
return nil
|
||||
}
|
||||
|
||||
for len(f.transactionIDsToPropagate) > 0 {
|
||||
transactionIDsToBroadcast := f.transactionIDsToPropagate
|
||||
if len(transactionIDsToBroadcast) > appmessage.MaxInvPerTxInvMsg {
|
||||
transactionIDsToBroadcast = f.transactionIDsToPropagate[:len(transactionIDsToBroadcast)]
|
||||
}
|
||||
log.Debugf("Transaction propagation: broadcasting %d transactions", len(transactionIDsToBroadcast))
|
||||
|
||||
inv := appmessage.NewMsgInvTransaction(transactionIDsToBroadcast)
|
||||
err := f.Broadcast(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.transactionIDsToPropagate = f.transactionIDsToPropagate[len(transactionIDsToBroadcast):]
|
||||
}
|
||||
|
||||
f.lastTransactionIDPropagationTime = time.Now()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
func (flow *handleRelayInvsFlow) sendGetBlockLocator(lowHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, limit uint32) error {
|
||||
|
||||
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(lowHash, highHash, limit)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgBlockLocator, ok := message.(*appmessage.MsgBlockLocator)
|
||||
if !ok {
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
||||
}
|
||||
return msgBlockLocator.BlockLocatorHashes, nil
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandlePruningPointHashRequestsFlowContext is the interface for the context needed for the handlePruningPointHashRequestsFlow flow.
|
||||
type HandlePruningPointHashRequestsFlowContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handlePruningPointHashRequestsFlow struct {
|
||||
HandlePruningPointHashRequestsFlowContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandlePruningPointHashRequests listens to appmessage.MsgRequestPruningPointHashMessage messages and sends
|
||||
// the pruning point hash as response.
|
||||
func HandlePruningPointHashRequests(context HandlePruningPointHashRequestsFlowContext, incomingRoute,
|
||||
outgoingRoute *router.Route) error {
|
||||
flow := &handlePruningPointHashRequestsFlow{
|
||||
HandlePruningPointHashRequestsFlowContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handlePruningPointHashRequestsFlow) start() error {
|
||||
for {
|
||||
_, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Got request for a pruning point hash")
|
||||
|
||||
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewPruningPointHashMessage(pruningPoint))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Sent pruning point hash %s", pruningPoint)
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleRequestPruningPointUTXOSetAndBlockContext is the interface for the context needed for the HandleRequestPruningPointUTXOSetAndBlock flow.
|
||||
type HandleRequestPruningPointUTXOSetAndBlockContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestPruningPointUTXOSetAndBlockFlow struct {
|
||||
HandleRequestPruningPointUTXOSetAndBlockContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestPruningPointUTXOSetAndBlock listens to appmessage.MsgRequestPruningPointUTXOSetAndBlock messages and sends
|
||||
// the pruning point UTXO set and block body.
|
||||
func HandleRequestPruningPointUTXOSetAndBlock(context HandleRequestPruningPointUTXOSetAndBlockContext, incomingRoute,
|
||||
outgoingRoute *router.Route) error {
|
||||
flow := &handleRequestPruningPointUTXOSetAndBlockFlow{
|
||||
HandleRequestPruningPointUTXOSetAndBlockContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) start() error {
|
||||
for {
|
||||
msgRequestPruningPointUTXOSetAndBlock, err := flow.waitForRequestPruningPointUTXOSetAndBlockMessages()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.handleRequestPruningPointUTXOSetAndBlockMessage(msgRequestPruningPointUTXOSetAndBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) handleRequestPruningPointUTXOSetAndBlockMessage(
|
||||
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetAndBlockFlow")
|
||||
defer onEnd()
|
||||
|
||||
log.Debugf("Got request for PruningPointHash UTXOSet and Block")
|
||||
|
||||
err := flow.sendPruningPointBlock(msgRequestPruningPointUTXOSetAndBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSetAndBlock)
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) waitForRequestPruningPointUTXOSetAndBlockMessages() (
|
||||
*appmessage.MsgRequestPruningPointUTXOSetAndBlock, error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgRequestPruningPointUTXOSetAndBlock, ok := message.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSetAndBlock, message.Command())
|
||||
}
|
||||
return msgRequestPruningPointUTXOSetAndBlock, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) sendPruningPointBlock(
|
||||
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
|
||||
|
||||
block, err := flow.Domain().Consensus().GetBlock(msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Retrieved pruning block %s", msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
|
||||
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(block)))
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) sendPruningPointUTXOSet(
|
||||
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
|
||||
|
||||
// Send the UTXO set in `step`-sized chunks
|
||||
const step = 1000
|
||||
var fromOutpoint *externalapi.DomainOutpoint
|
||||
chunksSent := 0
|
||||
for {
|
||||
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
|
||||
msgRequestPruningPointUTXOSetAndBlock.PruningPointHash, fromOutpoint, step)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Retrieved %d UTXOs for pruning block %s",
|
||||
len(pruningPointUTXOs), msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
|
||||
|
||||
outpointAndUTXOEntryPairs :=
|
||||
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(pruningPointUTXOs) < step {
|
||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
||||
msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
|
||||
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
}
|
||||
|
||||
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
|
||||
chunksSent++
|
||||
|
||||
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
|
||||
if chunksSent%ibdBatchSize == 0 {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,534 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.DomainHash) error {
|
||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
||||
if !wasIBDNotRunning {
|
||||
log.Debugf("IBD is already running")
|
||||
return nil
|
||||
}
|
||||
defer flow.UnsetIBDRunning()
|
||||
|
||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
|
||||
log.Debugf("Syncing headers up to %s", highHash)
|
||||
err := flow.syncHeaders(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Finished syncing headers up to %s", highHash)
|
||||
|
||||
log.Debugf("Syncing the current pruning point UTXO set")
|
||||
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !syncedPruningPointUTXOSetSuccessfully {
|
||||
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Finished syncing the current pruning point UTXO set")
|
||||
|
||||
log.Debugf("Downloading block bodies up to %s", highHash)
|
||||
err = flow.syncMissingBlockBodies(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Finished downloading block bodies up to %s", highHash)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) syncHeaders(highHash *externalapi.DomainHash) error {
|
||||
highHashReceived := false
|
||||
for !highHashReceived {
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
err = flow.downloadHeaders(highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We're finished once highHash has been inserted into the DAG
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
highHashReceived = blockInfo.Exists
|
||||
log.Debugf("Headers downloaded from peer %s. Are further headers required: %t", flow.peer, !highHashReceived)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(targetHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
|
||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
highestHash, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if highestHashIndex == 0 ||
|
||||
// If the block locator contains only two adjacent chain blocks, the
|
||||
// syncer will always find the same highest chain block, so to avoid
|
||||
// an endless loop, we explicitly stop the loop in such situation.
|
||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
||||
|
||||
return highestHash, nil
|
||||
}
|
||||
|
||||
locatorHashAboveHighestHash := highestHash
|
||||
if highestHashIndex > 0 {
|
||||
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
|
||||
}
|
||||
|
||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
if err != nil {
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
|
||||
"restarting with full block locator")
|
||||
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return blockLocator, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) findHighestHashIndex(
|
||||
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
|
||||
|
||||
highestHashIndex := 0
|
||||
highestHashIndexFound := false
|
||||
for i, blockLocatorHash := range blockLocator {
|
||||
if highestHash.Equal(blockLocatorHash) {
|
||||
highestHashIndex = i
|
||||
highestHashIndexFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !highestHashIndexFound {
|
||||
return 0, protocolerrors.Errorf(true, "highest hash %s "+
|
||||
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
|
||||
}
|
||||
log.Debugf("The index of the highest hash in the original "+
|
||||
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
|
||||
|
||||
return highestHashIndex, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) fetchHighestHash(
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, error) {
|
||||
|
||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ibdBlockLocatorHighestHashMessage, ok := message.(*appmessage.MsgIBDBlockLocatorHighestHash)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
||||
}
|
||||
highestHash := ibdBlockLocatorHighestHashMessage.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash) error {
|
||||
|
||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Keep a short queue of blockHeadersMessages so that there's
|
||||
// never a moment when the node is not validating and inserting
|
||||
// headers
|
||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
||||
errChan := make(chan error)
|
||||
doneChan := make(chan interface{})
|
||||
spawn("handleRelayInvsFlow-downloadHeaders", func() {
|
||||
for {
|
||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if doneIBD {
|
||||
doneChan <- struct{}{}
|
||||
return
|
||||
}
|
||||
|
||||
blockHeadersMessageChan <- blockHeadersMessage
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case blockHeadersMessage := <-blockHeadersMessageChan:
|
||||
for _, header := range blockHeadersMessage.BlockHeaders {
|
||||
err = flow.processHeader(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case err := <-errChan:
|
||||
return err
|
||||
case <-doneChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneIBD bool, err error) {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.BlockHeadersMessage:
|
||||
return message, false, nil
|
||||
case *appmessage.MsgDoneHeaders:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s", appmessage.CmdHeader, appmessage.CmdDoneHeaders, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processHeader(msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||
block := &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: nil,
|
||||
}
|
||||
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
}
|
||||
_, err = flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
||||
} else {
|
||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) syncPruningPointUTXOSet() (bool, error) {
|
||||
log.Debugf("Checking if a new pruning point is available")
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointHashMessage())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
msgPruningPointHash, ok := message.(*appmessage.MsgPruningPointHashMessage)
|
||||
if !ok {
|
||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdPruningPointHash, message.Command())
|
||||
}
|
||||
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(msgPruningPointHash.Hash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !blockInfo.Exists {
|
||||
return false, errors.Errorf("The pruning point header is missing")
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
||||
log.Debugf("Already has the block data of the new suggested pruning point %s", msgPruningPointHash.Hash)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", msgPruningPointHash.Hash)
|
||||
isValid, err := flow.Domain().Consensus().IsValidPruningPoint(msgPruningPointHash.Hash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !isValid {
|
||||
log.Infof("The suggested pruning point %s is incompatible to this node DAG, so stopping IBD with this"+
|
||||
" peer", msgPruningPointHash.Hash)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Info("Fetching the pruning point UTXO set")
|
||||
succeed, err := flow.fetchMissingUTXOSet(msgPruningPointHash.Hash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !succeed {
|
||||
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Info("Fetched the new pruning point UTXO set")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) fetchMissingUTXOSet(pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
|
||||
defer func() {
|
||||
err := flow.Domain().Consensus().ClearImportedPruningPointData()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
|
||||
}
|
||||
}()
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSetAndBlock(pruningPointHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
block, err := flow.receivePruningPointBlock()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(pruningPointHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !receivedAll {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err = flow.Domain().Consensus().ValidateAndInsertImportedPruningPoint(block)
|
||||
if err != nil {
|
||||
// TODO: Find a better way to deal with finality conflicts.
|
||||
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
|
||||
return false, nil
|
||||
}
|
||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receivePruningPointBlock() (*externalapi.DomainBlock, error) {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receivePruningPointBlock")
|
||||
defer onEnd()
|
||||
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ibdBlockMessage, ok := message.(*appmessage.MsgIBDBlock)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||
}
|
||||
block := appmessage.MsgBlockToDomainBlock(ibdBlockMessage.MsgBlock)
|
||||
|
||||
log.Debugf("Received pruning point block %s", consensushashing.BlockHash(block))
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveAndInsertPruningPointUTXOSet(
|
||||
pruningPointHash *externalapi.DomainHash) (bool, error) {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
||||
defer onEnd()
|
||||
|
||||
receivedChunkCount := 0
|
||||
receivedUTXOCount := 0
|
||||
for {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgPruningPointUTXOSetChunk:
|
||||
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
||||
domainOutpointAndUTXOEntryPairs :=
|
||||
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
||||
|
||||
err := flow.Domain().Consensus().AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
receivedChunkCount++
|
||||
if receivedChunkCount%ibdBatchSize == 0 {
|
||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
receivedChunkCount, receivedUTXOCount)
|
||||
|
||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
||||
log.Debugf("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
||||
return true, nil
|
||||
|
||||
case *appmessage.MsgUnexpectedPruningPoint:
|
||||
log.Debugf("Could not receive the next UTXO chunk because the pruning point %s "+
|
||||
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
||||
return false, nil
|
||||
|
||||
default:
|
||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
||||
} else {
|
||||
hashesToRequest = hashes[offset:]
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, expectedHash := range hashesToRequest {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if !expectedHash.Equal(blockHash) {
|
||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||
}
|
||||
|
||||
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||
continue
|
||||
}
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block, blockInsertionResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dequeueIncomingMessageAndSkipInvs is a convenience method to be used during
|
||||
// IBD. Inv messages are expected to arrive at any given moment, but should be
|
||||
// ignored while we're in IBD
|
||||
func (flow *handleRelayInvsFlow) dequeueIncomingMessageAndSkipInvs(timeout time.Duration) (appmessage.Message, error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := message.(*appmessage.MsgInvRelayBlock); !ok {
|
||||
return message, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,7 @@ type HandleHandshakeContext interface {
|
||||
HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error)
|
||||
}
|
||||
|
||||
// HandleHandshake sets up the handshake protocol - It sends a version message and waits for an incoming
|
||||
// HandleHandshake sets up the new_handshake protocol - It sends a version message and waits for an incoming
|
||||
// version message, as well as a verack for the sent version
|
||||
func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.NetConnection,
|
||||
receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
|
||||
@@ -89,13 +89,16 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
|
||||
}
|
||||
|
||||
if peerAddress != nil {
|
||||
context.AddressManager().AddAddresses(peerAddress)
|
||||
err := context.AddressManager().AddAddresses(peerAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return peer, nil
|
||||
}
|
||||
|
||||
// Handshake is different from other flows, since in it should forward router.ErrRouteClosed to errChan
|
||||
// Therefore we implement a separate handleError for handshake
|
||||
// Therefore we implement a separate handleError for new_handshake
|
||||
func handleError(err error, flowName string, isStopping *uint32, errChan chan error) {
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
@@ -104,7 +107,7 @@ func handleError(err error, flowName string, isStopping *uint32, errChan chan er
|
||||
return
|
||||
}
|
||||
|
||||
if protocolErr := &(protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
log.Errorf("Handshake protocol error from %s: %s", flowName, err)
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
errChan <- err
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -17,7 +18,9 @@ var (
|
||||
|
||||
// minAcceptableProtocolVersion is the lowest protocol version that a
|
||||
// connected peer may support.
|
||||
minAcceptableProtocolVersion = appmessage.ProtocolVersion
|
||||
minAcceptableProtocolVersion = uint32(4)
|
||||
|
||||
maxAcceptableProtocolVersion = uint32(4)
|
||||
)
|
||||
|
||||
type receiveVersionFlow struct {
|
||||
@@ -60,7 +63,7 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
|
||||
}
|
||||
|
||||
if !allowSelfConnections && flow.NetAdapter().ID().IsEqual(msgVersion.ID) {
|
||||
return nil, protocolerrors.New(true, "connected to self")
|
||||
return nil, protocolerrors.New(false, "connected to self")
|
||||
}
|
||||
|
||||
// Disconnect and ban peers from a different network
|
||||
@@ -97,7 +100,12 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
|
||||
return nil, protocolerrors.New(false, "incompatible subnetworks")
|
||||
}
|
||||
|
||||
flow.peer.UpdateFieldsFromMsgVersion(msgVersion)
|
||||
if flow.Config().ProtocolVersion > maxAcceptableProtocolVersion {
|
||||
return nil, errors.Errorf("%d is a non existing protocol version", flow.Config().ProtocolVersion)
|
||||
}
|
||||
|
||||
maxProtocolVersion := flow.Config().ProtocolVersion
|
||||
flow.peer.UpdateFieldsFromMsgVersion(msgVersion, maxProtocolVersion)
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgVerAck())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -56,15 +57,18 @@ func (flow *sendVersionFlow) start() error {
|
||||
// Version message.
|
||||
localAddress := flow.AddressManager().BestLocalAddress(flow.peer.Connection().NetAddress())
|
||||
subnetworkID := flow.Config().SubnetworkID
|
||||
if flow.Config().ProtocolVersion < minAcceptableProtocolVersion {
|
||||
return errors.Errorf("configured protocol version %d is obsolete", flow.Config().ProtocolVersion)
|
||||
}
|
||||
msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(),
|
||||
flow.Config().ActiveNetParams.Name, subnetworkID)
|
||||
flow.Config().ActiveNetParams.Name, subnetworkID, flow.Config().ProtocolVersion)
|
||||
msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...)
|
||||
|
||||
// Advertise the services flag
|
||||
msg.Services = defaultServices
|
||||
|
||||
// Advertise our max supported protocol version.
|
||||
msg.ProtocolVersion = appmessage.ProtocolVersion
|
||||
msg.ProtocolVersion = flow.Config().ProtocolVersion
|
||||
|
||||
// Advertise if inv messages for transactions are desired.
|
||||
msg.DisableRelayTx = flow.Config().BlocksOnly
|
||||
|
||||
9
app/protocol/flows/ready/log.go
Normal file
9
app/protocol/flows/ready/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package ready
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
56
app/protocol/flows/ready/ready.go
Normal file
56
app/protocol/flows/ready/ready.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package ready
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"sync/atomic"
|
||||
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleReady notify the other peer that peer is ready for messages, and wait for the other peer
|
||||
// to send a ready message before start running the flows.
|
||||
func HandleReady(incomingRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
|
||||
peer *peerpkg.Peer,
|
||||
) error {
|
||||
|
||||
log.Debugf("Sending ready message to %s", peer)
|
||||
|
||||
isStopping := uint32(0)
|
||||
err := outgoingRoute.Enqueue(appmessage.NewMsgReady())
|
||||
if err != nil {
|
||||
return handleError(err, "HandleReady", &isStopping)
|
||||
}
|
||||
|
||||
_, err = incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return handleError(err, "HandleReady", &isStopping)
|
||||
}
|
||||
|
||||
log.Debugf("Got ready message from %s", peer)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ready is different from other flows, since in it should forward router.ErrRouteClosed to errChan
|
||||
// Therefore we implement a separate handleError for 'ready'
|
||||
func handleError(err error, flowName string, isStopping *uint32) error {
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
log.Errorf("Ready protocol error from %s: %s", flowName, err)
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
@@ -1,986 +0,0 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var orphanBlock = &externalapi.DomainBlock{
|
||||
Header: blockheader.NewImmutableBlockHeader(
|
||||
constants.MaxBlockVersion,
|
||||
[]*externalapi.DomainHash{unknownBlockHash},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
}
|
||||
|
||||
var validPruningPointBlock = &externalapi.DomainBlock{
|
||||
Header: blockheader.NewImmutableBlockHeader(
|
||||
constants.MaxBlockVersion,
|
||||
[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
}
|
||||
|
||||
var invalidPruningPointBlock = &externalapi.DomainBlock{
|
||||
Header: blockheader.NewImmutableBlockHeader(
|
||||
constants.MaxBlockVersion,
|
||||
[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
}
|
||||
|
||||
var unexpectedIBDBlock = &externalapi.DomainBlock{
|
||||
Header: blockheader.NewImmutableBlockHeader(
|
||||
constants.MaxBlockVersion,
|
||||
[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3})},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
}
|
||||
|
||||
var invalidBlock = &externalapi.DomainBlock{
|
||||
Header: blockheader.NewImmutableBlockHeader(
|
||||
constants.MaxBlockVersion,
|
||||
[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4})},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
}
|
||||
|
||||
var unknownBlockHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})
|
||||
var knownInvalidBlockHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})
|
||||
var validPruningPointHash = consensushashing.BlockHash(validPruningPointBlock)
|
||||
var invalidBlockHash = consensushashing.BlockHash(invalidBlock)
|
||||
var invalidPruningPointHash = consensushashing.BlockHash(invalidPruningPointBlock)
|
||||
var orphanBlockHash = consensushashing.BlockHash(orphanBlock)
|
||||
|
||||
var fakeRelayInvsContextMap = map[externalapi.DomainHash]*externalapi.BlockInfo{
|
||||
*knownInvalidBlockHash: {
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusInvalid,
|
||||
},
|
||||
*validPruningPointHash: {
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
},
|
||||
*invalidPruningPointHash: {
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
},
|
||||
}
|
||||
|
||||
type fakeRelayInvsContext struct {
|
||||
params *dagconfig.Params
|
||||
askedOrphanBlockInfo bool
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) BuildBlock(coinbaseData *externalapi.DomainCoinbaseData, transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) ValidateAndInsertBlock(block *externalapi.DomainBlock) (*externalapi.BlockInsertionResult, error) {
|
||||
hash := consensushashing.BlockHash(block)
|
||||
if hash.Equal(orphanBlockHash) {
|
||||
return nil, ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes())
|
||||
}
|
||||
|
||||
if hash.Equal(invalidBlockHash) {
|
||||
return nil, ruleerrors.ErrBadMerkleRoot
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) ValidateTransactionAndPopulateWithConsensusData(transaction *externalapi.DomainTransaction) error {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetBlock(blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetBlockHeader(blockHash *externalapi.DomainHash) (externalapi.BlockHeader, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalapi.BlockInfo, error) {
|
||||
if info, ok := fakeRelayInvsContextMap[*blockHash]; ok {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Second time we ask for orphan block it's in the end of IBD, to
|
||||
// check if the IBD has finished.
|
||||
// Since we don't actually process headers, we just say the orphan
|
||||
// exists in the second time we're asked about it to indicate IBD
|
||||
// has finished.
|
||||
if blockHash.Equal(orphanBlockHash) {
|
||||
if f.askedOrphanBlockInfo {
|
||||
return &externalapi.BlockInfo{Exists: true}, nil
|
||||
}
|
||||
f.askedOrphanBlockInfo = true
|
||||
}
|
||||
|
||||
return &externalapi.BlockInfo{
|
||||
Exists: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetBlockAcceptanceData(blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetHashesBetween(lowHash, highHash *externalapi.DomainHash, maxBlueScoreDifference uint64) ([]*externalapi.DomainHash, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetMissingBlockBodyHashes(highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
// This is done so we can test getting invalid block during IBD.
|
||||
return []*externalapi.DomainHash{invalidBlockHash}, nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetPruningPointUTXOs(expectedPruningPointHash *externalapi.DomainHash, fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) PruningPoint() (*externalapi.DomainHash, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) ClearImportedPruningPointData() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) ValidateAndInsertImportedPruningPoint(newPruningPoint *externalapi.DomainBlock) error {
|
||||
if consensushashing.BlockHash(newPruningPoint).Equal(invalidPruningPointHash) {
|
||||
return ruleerrors.ErrBadMerkleRoot
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetVirtualSelectedParent() (*externalapi.DomainHash, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) CreateBlockLocator(lowHash, highHash *externalapi.DomainHash, limit uint32) (externalapi.BlockLocator, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) CreateHeadersSelectedChainBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
return externalapi.BlockLocator{
|
||||
f.params.GenesisHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) CreateFullHeadersSelectedChainBlockLocator() (externalapi.BlockLocator, error) {
|
||||
return externalapi.BlockLocator{
|
||||
f.params.GenesisHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetSyncInfo() (*externalapi.SyncInfo, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) Tips() ([]*externalapi.DomainHash, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetVirtualInfo() (*externalapi.VirtualInfo, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) IsValidPruningPoint(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetVirtualSelectedParentChainFromBlock(blockHash *externalapi.DomainHash) (*externalapi.SelectedChainPath, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) IsInSelectedParentChainOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetHeadersSelectedTip() (*externalapi.DomainHash, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) MiningManager() miningmanager.MiningManager {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) Consensus() externalapi.Consensus {
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) Domain() domain.Domain {
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) Config() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: f.params,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) OnNewBlock(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks {
|
||||
return blockrelay.NewSharedRequestedBlocks()
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) Broadcast(message appmessage.Message) error {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) IsOrphan(blockHash *externalapi.DomainHash) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) IsIBDRunning() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) UnsetIBDRunning() {
|
||||
}
|
||||
|
||||
func TestHandleRelayInvsErrors(t *testing.T) {
|
||||
triggerIBD := func(incomingRoute, outgoingRoute *router.Route) {
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(consensushashing.BlockHash(orphanBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestBlockLocator)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgBlockLocator(nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
funcToExecute func(incomingRoute, outgoingRoute *router.Route)
|
||||
expectsProtocolError bool
|
||||
expectsBan bool
|
||||
expectsErrToContain string
|
||||
}{
|
||||
{
|
||||
name: "sending unexpected message type",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgBlockLocator(nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "message in the block relay handleRelayInvsFlow while expecting an inv message",
|
||||
},
|
||||
{
|
||||
name: "sending a known invalid inv",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(knownInvalidBlockHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "sent inv of an invalid block",
|
||||
},
|
||||
{
|
||||
name: "sending unrequested block",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(unknownBlockHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgBlock(&appmessage.MsgBlockHeader{
|
||||
Version: 0,
|
||||
ParentHashes: nil,
|
||||
HashMerkleRoot: &externalapi.DomainHash{},
|
||||
AcceptedIDMerkleRoot: &externalapi.DomainHash{},
|
||||
UTXOCommitment: &externalapi.DomainHash{},
|
||||
Timestamp: mstime.Time{},
|
||||
Bits: 0,
|
||||
Nonce: 0,
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "got unrequested block",
|
||||
},
|
||||
{
|
||||
name: "sending invalid block",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(invalidBlockHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(invalidBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "got invalid block",
|
||||
},
|
||||
{
|
||||
name: "sending unexpected message instead of block locator",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(consensushashing.BlockHash(orphanBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestBlockLocator)
|
||||
|
||||
// Sending a block while expected a block locator
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: fmt.Sprintf("received unexpected message type. expected: %s",
|
||||
appmessage.CmdBlockLocator),
|
||||
},
|
||||
{
|
||||
name: "sending unknown highest hash",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
triggerIBD(incomingRoute, outgoingRoute)
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgIBDBlockLocator)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlockLocatorHighestHash(unknownBlockHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "is not in the original blockLocator",
|
||||
},
|
||||
{
|
||||
name: "sending unexpected type instead of highest hash",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
triggerIBD(incomingRoute, outgoingRoute)
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgIBDBlockLocator)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgBlockLocator(nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: fmt.Sprintf("received unexpected message type. expected: %s",
|
||||
appmessage.CmdIBDBlockLocatorHighestHash),
|
||||
},
|
||||
{
|
||||
name: "sending unexpected type instead of a header",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
triggerIBD(incomingRoute, outgoingRoute)
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
|
||||
highestHash := msg.(*appmessage.MsgIBDBlockLocator).BlockLocatorHashes[0]
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlockLocatorHighestHash(highestHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
// Sending unrequested block locator
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgBlockLocator(nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: fmt.Sprintf("received unexpected message type. expected: %s or %s",
|
||||
appmessage.CmdHeader, appmessage.CmdDoneHeaders),
|
||||
},
|
||||
{
|
||||
name: "sending unexpected type instead of pruning point hash",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
triggerIBD(incomingRoute, outgoingRoute)
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
|
||||
highestHash := msg.(*appmessage.MsgIBDBlockLocator).BlockLocatorHashes[0]
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlockLocatorHighestHash(highestHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
// Sending unrequested block locator
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgBlockLocator(nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: fmt.Sprintf("received unexpected message type. expected: %s",
|
||||
appmessage.CmdPruningPointHash),
|
||||
},
|
||||
{
|
||||
name: "sending unexpected type instead of pruning point block",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
triggerIBD(incomingRoute, outgoingRoute)
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
|
||||
highestHash := msg.(*appmessage.MsgIBDBlockLocator).BlockLocatorHashes[0]
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlockLocatorHighestHash(highestHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(validPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
// Sending unrequested block locator
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgBlockLocator(nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: fmt.Sprintf("received unexpected message type. expected: %s",
|
||||
appmessage.CmdIBDBlock),
|
||||
},
|
||||
{
|
||||
name: "sending unexpected type instead of UTXO chunk",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
triggerIBD(incomingRoute, outgoingRoute)
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
|
||||
highestHash := msg.(*appmessage.MsgIBDBlockLocator).BlockLocatorHashes[0]
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlockLocatorHighestHash(highestHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(validPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(validPruningPointBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
// Sending unrequested block locator
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgBlockLocator(nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: fmt.Sprintf("received unexpected message type. "+
|
||||
"expected: %s or %s or %s", appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint),
|
||||
},
|
||||
{
|
||||
name: "sending invalid pruning point",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
triggerIBD(incomingRoute, outgoingRoute)
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
|
||||
highestHash := msg.(*appmessage.MsgIBDBlockLocator).BlockLocatorHashes[0]
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlockLocatorHighestHash(highestHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidPruningPointBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "error with pruning point UTXO set",
|
||||
},
|
||||
{
|
||||
name: "sending unexpected type instead of IBD block",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
triggerIBD(incomingRoute, outgoingRoute)
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
|
||||
highestHash := msg.(*appmessage.MsgIBDBlockLocator).BlockLocatorHashes[0]
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlockLocatorHighestHash(highestHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(validPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(validPruningPointBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestIBDBlocks)
|
||||
|
||||
// Sending unrequested block locator
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgBlockLocator(nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: fmt.Sprintf("received unexpected message type. expected: %s",
|
||||
appmessage.CmdIBDBlock),
|
||||
},
|
||||
{
|
||||
name: "sending unexpected IBD block",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
triggerIBD(incomingRoute, outgoingRoute)
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
|
||||
highestHash := msg.(*appmessage.MsgIBDBlockLocator).BlockLocatorHashes[0]
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlockLocatorHighestHash(highestHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(validPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(validPruningPointBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestIBDBlocks)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(unexpectedIBDBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "expected block",
|
||||
},
|
||||
{
|
||||
name: "sending invalid IBD block",
|
||||
funcToExecute: func(incomingRoute, outgoingRoute *router.Route) {
|
||||
triggerIBD(incomingRoute, outgoingRoute)
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
|
||||
highestHash := msg.(*appmessage.MsgIBDBlockLocator).BlockLocatorHashes[0]
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlockLocatorHighestHash(highestHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(validPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(validPruningPointBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err = outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestIBDBlocks)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "invalid block",
|
||||
},
|
||||
}
|
||||
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
|
||||
for _, test := range tests {
|
||||
incomingRoute := router.NewRoute()
|
||||
outgoingRoute := router.NewRoute()
|
||||
peer := peerpkg.New(nil)
|
||||
errChan := make(chan error)
|
||||
go func() {
|
||||
errChan <- blockrelay.HandleRelayInvs(&fakeRelayInvsContext{
|
||||
params: params,
|
||||
}, incomingRoute, outgoingRoute, peer)
|
||||
}()
|
||||
|
||||
test.funcToExecute(incomingRoute, outgoingRoute)
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
checkFlowError(t, err, test.expectsProtocolError, test.expectsBan, test.expectsErrToContain)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("timed out after %s", time.Second)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -35,6 +35,5 @@ func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Rou
|
||||
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
|
||||
}
|
||||
|
||||
context.AddressManager().AddAddresses(msgAddresses.AddressList...)
|
||||
return nil
|
||||
return context.AddressManager().AddAddresses(msgAddresses.AddressList...)
|
||||
}
|
||||
33
app/protocol/flows/v4/blockrelay/block_locator.go
Normal file
33
app/protocol/flows/v4/blockrelay/block_locator.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.DomainHash, limit uint32) error {
|
||||
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, limit)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
case *appmessage.MsgBlockLocator:
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
@@ -44,7 +45,9 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists {
|
||||
|
||||
// The IBD block locator is checking only existing blocks with bodies.
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -70,8 +73,14 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
||||
}
|
||||
|
||||
if !foundHighestHashInTheSelectedParentChainOfTargetHash {
|
||||
return protocolerrors.Errorf(true, "no hash was found in the blockLocator "+
|
||||
log.Warnf("no hash was found in the blockLocator "+
|
||||
"that was in the selected parent chain of targetHash %s", targetHash)
|
||||
|
||||
ibdBlockLocatorHighestHashNotFoundMessage := appmessage.NewMsgIBDBlockLocatorHighestHashNotFound()
|
||||
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashNotFoundMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -48,7 +48,7 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("sent %d out of %d", i, len(msgRequestIBDBlocks.Hashes))
|
||||
log.Debugf("sent %d out of %d", i+1, len(msgRequestIBDBlocks.Hashes))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,145 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
|
||||
type PruningPointAndItsAnticoneRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
var isBusy uint32
|
||||
|
||||
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
|
||||
// the pruning point and its anticone to the requesting peer.
|
||||
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
err := func() error {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
|
||||
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
|
||||
}
|
||||
defer atomic.StoreUint32(&isBusy, 0)
|
||||
|
||||
log.Debugf("Got request for pruning point and its anticone from %s", peer)
|
||||
|
||||
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
|
||||
for i, header := range pruningPointHeaders {
|
||||
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
windowSize := context.Config().NetParams().DifficultyAdjustmentWindowSize
|
||||
daaWindowBlocks := make([]*externalapi.TrustedDataDataDAAHeader, 0, windowSize)
|
||||
daaWindowHashesToIndex := make(map[externalapi.DomainHash]int, windowSize)
|
||||
trustedDataDAABlockIndexes := make(map[externalapi.DomainHash][]uint64)
|
||||
|
||||
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, 0)
|
||||
ghostdagDataHashToIndex := make(map[externalapi.DomainHash]int)
|
||||
trustedDataGHOSTDAGDataIndexes := make(map[externalapi.DomainHash][]uint64)
|
||||
for _, blockHash := range pointAndItsAnticone {
|
||||
blockDAAWindowHashes, err := context.Domain().Consensus().BlockDAAWindowHashes(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trustedDataDAABlockIndexes[*blockHash] = make([]uint64, 0, windowSize)
|
||||
for i, daaBlockHash := range blockDAAWindowHashes {
|
||||
index, exists := daaWindowHashesToIndex[*daaBlockHash]
|
||||
if !exists {
|
||||
trustedDataDataDAAHeader, err := context.Domain().Consensus().TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daaWindowBlocks = append(daaWindowBlocks, trustedDataDataDAAHeader)
|
||||
index = len(daaWindowBlocks) - 1
|
||||
daaWindowHashesToIndex[*daaBlockHash] = index
|
||||
}
|
||||
|
||||
trustedDataDAABlockIndexes[*blockHash] = append(trustedDataDAABlockIndexes[*blockHash], uint64(index))
|
||||
}
|
||||
|
||||
ghostdagDataBlockHashes, err := context.Domain().Consensus().TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trustedDataGHOSTDAGDataIndexes[*blockHash] = make([]uint64, 0, context.Config().NetParams().K)
|
||||
for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes {
|
||||
index, exists := ghostdagDataHashToIndex[*ghostdagDataBlockHash]
|
||||
if !exists {
|
||||
data, err := context.Domain().Consensus().TrustedGHOSTDAGData(ghostdagDataBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ghostdagData = append(ghostdagData, &externalapi.BlockGHOSTDAGDataHashPair{
|
||||
Hash: ghostdagDataBlockHash,
|
||||
GHOSTDAGData: data,
|
||||
})
|
||||
index = len(ghostdagData) - 1
|
||||
ghostdagDataHashToIndex[*ghostdagDataBlockHash] = index
|
||||
}
|
||||
|
||||
trustedDataGHOSTDAGDataIndexes[*blockHash] = append(trustedDataGHOSTDAGDataIndexes[*blockHash], uint64(index))
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainTrustedDataToTrustedData(daaWindowBlocks, ghostdagData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, blockHash := range pointAndItsAnticone {
|
||||
block, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sent pruning point and its anticone to %s", peer)
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// PruningPointProofRequestsContext is the interface for the context needed for the HandlePruningPointProofRequests flow.
|
||||
type PruningPointProofRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandlePruningPointProofRequests listens to appmessage.MsgRequestPruningPointProof messages and sends
|
||||
// the pruning point proof to the requesting peer.
|
||||
func HandlePruningPointProofRequests(context PruningPointProofRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Got request for pruning point proof from %s", peer)
|
||||
|
||||
pruningPointProof, err := context.Domain().Consensus().BuildPruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pruningPointProofMessage := appmessage.DomainPruningPointProofToMsgPruningPointProof(pruningPointProof)
|
||||
err = outgoingRoute.Enqueue(pruningPointProofMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sent pruning point proof to %s", peer)
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package blockrelay
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
@@ -23,15 +24,16 @@ var orphanResolutionRange uint32 = 5
|
||||
type RelayInvsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
|
||||
SharedRequestedBlocks() *SharedRequestedBlocks
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
||||
Broadcast(message appmessage.Message) error
|
||||
AddOrphan(orphanBlock *externalapi.DomainBlock)
|
||||
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
|
||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||
UnsetIBDRunning()
|
||||
IsRecoverableError(err error) bool
|
||||
}
|
||||
|
||||
type handleRelayInvsFlow struct {
|
||||
@@ -53,7 +55,10 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
|
||||
peer: peer,
|
||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
||||
}
|
||||
return flow.start()
|
||||
err := flow.start()
|
||||
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
||||
close(peer.IBDRequestChannel())
|
||||
return err
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) start() error {
|
||||
@@ -79,7 +84,18 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsOrphan(inv.Hash) {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
|
||||
err := flow.AddOrphanRootsToQueue(inv.Hash)
|
||||
if err != nil {
|
||||
@@ -104,9 +120,24 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
missingParents, blockInsertionResult, err := flow.processBlock(block)
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) {
|
||||
log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block))
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Infof("Ignoring duplicate block %s", inv.Hash)
|
||||
continue
|
||||
@@ -115,7 +146,7 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
}
|
||||
if len(missingParents) > 0 {
|
||||
log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents)
|
||||
err := flow.processOrphan(block, missingParents)
|
||||
err := flow.processOrphan(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -128,13 +159,22 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
return err
|
||||
}
|
||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||
err = flow.OnNewBlock(block, blockInsertionResult)
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||
consensushashing.BlockHash(block))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
@@ -156,14 +196,14 @@ func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error)
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||
exists := flow.SharedRequestedBlocks().addIfNotExists(requestHash)
|
||||
exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash)
|
||||
if exists {
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
|
||||
// clean from any pending blocks.
|
||||
defer flow.SharedRequestedBlocks().remove(requestHash)
|
||||
defer flow.SharedRequestedBlocks().Remove(requestHash)
|
||||
|
||||
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
|
||||
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
|
||||
@@ -206,9 +246,9 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.BlockInsertionResult, error) {
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
@@ -221,7 +261,7 @@ func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
return nil, blockInsertionResult, nil
|
||||
return nil, virtualChangeSet, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
||||
@@ -229,7 +269,7 @@ func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) erro
|
||||
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, missingParents []*externalapi.DomainHash) error {
|
||||
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) error {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
|
||||
// Return if the block has been orphaned from elsewhere already
|
||||
@@ -244,6 +284,19 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, m
|
||||
return err
|
||||
}
|
||||
if isBlockInOrphanResolutionRange {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", blockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Block %s is within orphan resolution range. "+
|
||||
"Adding it to the orphan set", blockHash)
|
||||
flow.AddOrphan(block)
|
||||
@@ -254,7 +307,28 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, m
|
||||
// Start IBD unless we already are in IBD
|
||||
log.Debugf("Block %s is out of orphan resolution range. "+
|
||||
"Attempting to start IBD against it.", blockHash)
|
||||
return flow.runIBDIfNotRunning(blockHash)
|
||||
|
||||
// Send the block to IBD flow via the IBDRequestChannel.
|
||||
// Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it
|
||||
select {
|
||||
case flow.peer.IBDRequestChannel() <- block:
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool {
|
||||
parents := block.Header.DirectParents()
|
||||
return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash)
|
||||
}
|
||||
|
||||
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
|
||||
@@ -263,8 +337,7 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, m
|
||||
// In the response, if we know none of the hashes, we should retrieve the given
|
||||
// blockHash via IBD. Otherwise, via unorphaning.
|
||||
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
lowHash := flow.Config().ActiveNetParams.GenesisHash
|
||||
err := flow.sendGetBlockLocator(lowHash, blockHash, orphanResolutionRange)
|
||||
err := flow.sendGetBlockLocator(blockHash, orphanResolutionRange)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -32,20 +32,19 @@ func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) start() error {
|
||||
for {
|
||||
lowHash, highHash, limit, err := flow.receiveGetBlockLocator()
|
||||
highHash, limit, err := flow.receiveGetBlockLocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received getBlockLocator with lowHash: %s, highHash: %s, limit: %d",
|
||||
lowHash, highHash, limit)
|
||||
log.Debugf("Received getBlockLocator with highHash: %s, limit: %d", highHash, limit)
|
||||
|
||||
locator, err := flow.Domain().Consensus().CreateBlockLocator(lowHash, highHash, limit)
|
||||
locator, err := flow.Domain().Consensus().CreateBlockLocatorFromPruningPoint(highHash, limit)
|
||||
if err != nil || len(locator) == 0 {
|
||||
if err != nil {
|
||||
log.Debugf("Received error from CreateBlockLocator: %s", err)
|
||||
log.Debugf("Received error from CreateBlockLocatorFromPruningPoint: %s", err)
|
||||
}
|
||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||
"locator between blocks %s and %s", lowHash, highHash)
|
||||
"locator between the pruning point and %s", highHash)
|
||||
}
|
||||
|
||||
err = flow.sendBlockLocator(locator)
|
||||
@@ -55,16 +54,15 @@ func (flow *handleRequestBlockLocatorFlow) start() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (lowHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, limit uint32, err error) {
|
||||
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (highHash *externalapi.DomainHash, limit uint32, err error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, 0, err
|
||||
}
|
||||
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
|
||||
|
||||
return msgGetBlockLocator.LowHash, msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
|
||||
return msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {
|
||||
@@ -12,31 +12,31 @@ import (
|
||||
|
||||
const ibdBatchSize = router.DefaultMaxMessages
|
||||
|
||||
// RequestIBDBlocksContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestIBDBlocksContext interface {
|
||||
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestHeadersContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestBlocksFlow struct {
|
||||
RequestIBDBlocksContext
|
||||
type handleRequestHeadersFlow struct {
|
||||
RequestHeadersContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peer.Peer
|
||||
}
|
||||
|
||||
// HandleRequestHeaders handles RequestHeaders messages
|
||||
func HandleRequestHeaders(context RequestIBDBlocksContext, incomingRoute *router.Route,
|
||||
func HandleRequestHeaders(context RequestHeadersContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
flow := &handleRequestBlocksFlow{
|
||||
RequestIBDBlocksContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
flow := &handleRequestHeadersFlow{
|
||||
RequestHeadersContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlocksFlow) start() error {
|
||||
func (flow *handleRequestHeadersFlow) start() error {
|
||||
for {
|
||||
lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute)
|
||||
if err != nil {
|
||||
@@ -49,8 +49,9 @@ func (flow *handleRequestBlocksFlow) start() error {
|
||||
|
||||
// GetHashesBetween is a relatively heavy operation so we limit it
|
||||
// in order to avoid locking the consensus for too long
|
||||
const maxBlueScoreDifference = 1 << 10
|
||||
blockHashes, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlueScoreDifference)
|
||||
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
||||
const maxBlocks = 1 << 10
|
||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -0,0 +1,140 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleRequestPruningPointUTXOSetContext is the interface for the context needed for the HandleRequestPruningPointUTXOSet flow.
|
||||
type HandleRequestPruningPointUTXOSetContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestPruningPointUTXOSetFlow struct {
|
||||
HandleRequestPruningPointUTXOSetContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestPruningPointUTXOSet listens to appmessage.MsgRequestPruningPointUTXOSet messages and sends
|
||||
// the pruning point UTXO set and block body.
|
||||
func HandleRequestPruningPointUTXOSet(context HandleRequestPruningPointUTXOSetContext, incomingRoute,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestPruningPointUTXOSetFlow{
|
||||
HandleRequestPruningPointUTXOSetContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) start() error {
|
||||
for {
|
||||
msgRequestPruningPointUTXOSet, err := flow.waitForRequestPruningPointUTXOSetMessages()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.handleRequestPruningPointUTXOSetMessage(msgRequestPruningPointUTXOSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) handleRequestPruningPointUTXOSetMessage(
|
||||
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetFlow")
|
||||
defer onEnd()
|
||||
|
||||
log.Debugf("Got request for pruning point UTXO set")
|
||||
|
||||
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSet)
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXOSetMessages() (
|
||||
*appmessage.MsgRequestPruningPointUTXOSet, error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
|
||||
if !ok {
|
||||
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
|
||||
}
|
||||
return msgRequestPruningPointUTXOSet, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
|
||||
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
||||
|
||||
// Send the UTXO set in `step`-sized chunks
|
||||
const step = 1000
|
||||
var fromOutpoint *externalapi.DomainOutpoint
|
||||
chunksSent := 0
|
||||
for {
|
||||
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash, fromOutpoint, step)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Retrieved %d UTXOs for pruning block %s",
|
||||
len(pruningPointUTXOs), msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
outpointAndUTXOEntryPairs :=
|
||||
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
finished := len(pruningPointUTXOs) < step
|
||||
if finished && chunksSent%ibdBatchSize != 0 {
|
||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
}
|
||||
|
||||
if len(pruningPointUTXOs) > 0 {
|
||||
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
|
||||
}
|
||||
chunksSent++
|
||||
|
||||
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
|
||||
if chunksSent%ibdBatchSize == 0 {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
|
||||
if !ok {
|
||||
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||
return protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
|
||||
}
|
||||
|
||||
if finished {
|
||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
636
app/protocol/flows/v4/blockrelay/ibd.go
Normal file
636
app/protocol/flows/v4/blockrelay/ibd.go
Normal file
@@ -0,0 +1,636 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// IBDContext is the interface for the context needed for the HandleIBD flow.
|
||||
type IBDContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||
UnsetIBDRunning()
|
||||
IsRecoverableError(err error) bool
|
||||
}
|
||||
|
||||
type handleIBDFlow struct {
|
||||
IBDContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
}
|
||||
|
||||
// HandleIBD handles IBD
|
||||
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||
peer *peerpkg.Peer) error {
|
||||
|
||||
flow := &handleIBDFlow{
|
||||
IBDContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) start() error {
|
||||
for {
|
||||
// Wait for IBD requests triggered by other flows
|
||||
block, ok := <-flow.peer.IBDRequestChannel()
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
err := flow.runIBDIfNotRunning(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
||||
if !wasIBDNotRunning {
|
||||
log.Debugf("IBD is already running")
|
||||
return nil
|
||||
}
|
||||
|
||||
isFinishedSuccessfully := false
|
||||
defer func() {
|
||||
flow.UnsetIBDRunning()
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
}()
|
||||
|
||||
highHash := consensushashing.BlockHash(block)
|
||||
log.Criticalf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
log.Criticalf("Syncing blocks up to %s", highHash)
|
||||
log.Criticalf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Criticalf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
checkpoint, err := externalapi.NewDomainHashFromString("05ff0f2e1d201dcaee7c5e567cc2c1d42ca3cce9fefbd3b519dc68b5bb89d0b9")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := flow.Domain().Consensus().GetBlockInfo(checkpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.Exists {
|
||||
isInSelectedParentChainOf, err := flow.Domain().Consensus().IsInSelectedParentChainOf(checkpoint, highestSharedBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !isInSelectedParentChainOf {
|
||||
log.Criticalf("Stopped IBD because the checkpoint %s is not in the selected chain of %s", checkpoint, highestSharedBlockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !shouldSync {
|
||||
return nil
|
||||
}
|
||||
|
||||
if shouldDownloadHeadersProof {
|
||||
log.Infof("Starting IBD with headers proof")
|
||||
err := flow.ibdWithHeadersProof(highHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", highHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncMissingBlockBodies(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Finished syncing blocks up to %s", highHash)
|
||||
isFinishedSuccessfully = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
||||
successString := "successfully"
|
||||
if !isFinishedSuccessfully {
|
||||
successString = "(interrupted)"
|
||||
}
|
||||
log.Infof("IBD finished %s", successString)
|
||||
}
|
||||
|
||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) findHighestSharedBlockHash(
|
||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
for {
|
||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !highestHashFound {
|
||||
return nil, false, nil
|
||||
}
|
||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if highestHashIndex == 0 ||
|
||||
// If the block locator contains only two adjacent chain blocks, the
|
||||
// syncer will always find the same highest chain block, so to avoid
|
||||
// an endless loop, we explicitly stop the loop in such situation.
|
||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
||||
|
||||
return highestHash, true, nil
|
||||
}
|
||||
|
||||
locatorHashAboveHighestHash := highestHash
|
||||
if highestHashIndex > 0 {
|
||||
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
|
||||
}
|
||||
|
||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
if err != nil {
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
|
||||
"restarting with full block locator")
|
||||
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return blockLocator, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) findHighestHashIndex(
|
||||
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
|
||||
|
||||
highestHashIndex := 0
|
||||
highestHashIndexFound := false
|
||||
for i, blockLocatorHash := range blockLocator {
|
||||
if highestHash.Equal(blockLocatorHash) {
|
||||
highestHashIndex = i
|
||||
highestHashIndexFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !highestHashIndexFound {
|
||||
return 0, protocolerrors.Errorf(true, "highest hash %s "+
|
||||
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
|
||||
}
|
||||
log.Debugf("The index of the highest hash in the original "+
|
||||
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
|
||||
|
||||
return highestHashIndex, nil
|
||||
}
|
||||
|
||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) fetchHighestHash(
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
||||
highestHash := message.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, true, nil
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
||||
return nil, false, nil
|
||||
default:
|
||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
|
||||
log.Infof("Downloading headers from %s", flow.peer)
|
||||
|
||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestSharedBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScore, "block headers")
|
||||
|
||||
// Keep a short queue of BlockHeadersMessages so that there's
|
||||
// never a moment when the node is not validating and inserting
|
||||
// headers
|
||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
||||
errChan := make(chan error)
|
||||
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
|
||||
for {
|
||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if doneIBD {
|
||||
close(blockHeadersMessageChan)
|
||||
return
|
||||
}
|
||||
|
||||
blockHeadersMessageChan <- blockHeadersMessage
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
// If the highHash has not been received, the peer is misbehaving
|
||||
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !highHashBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"highHash block %s from peer %s during block download", highHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
_, err := flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1]
|
||||
progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore)
|
||||
case err := <-errChan:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.BlockHeadersMessage:
|
||||
return message, false, nil
|
||||
case *appmessage.MsgDoneHeaders:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s",
|
||||
appmessage.CmdBlockHeaders,
|
||||
appmessage.CmdDoneHeaders,
|
||||
message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) (bool, error) {
|
||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||
block := &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: nil,
|
||||
}
|
||||
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return false, nil
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return false, errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
||||
} else {
|
||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||
return false, protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
|
||||
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
|
||||
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
|
||||
"tip is smaller than the current selected tip")
|
||||
}
|
||||
|
||||
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
|
||||
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
|
||||
return protocolerrors.Errorf(false, "difference between the timestamps of "+
|
||||
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
||||
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
||||
defer onEnd()
|
||||
|
||||
receivedChunkCount := 0
|
||||
receivedUTXOCount := 0
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgPruningPointUTXOSetChunk:
|
||||
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
||||
domainOutpointAndUTXOEntryPairs :=
|
||||
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
||||
|
||||
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
receivedChunkCount++
|
||||
if receivedChunkCount%ibdBatchSize == 0 {
|
||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
receivedChunkCount, receivedUTXOCount)
|
||||
|
||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
||||
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
||||
return true, nil
|
||||
|
||||
case *appmessage.MsgUnexpectedPruningPoint:
|
||||
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
|
||||
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
||||
return false, nil
|
||||
|
||||
default:
|
||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(hashes) == 0 {
|
||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
||||
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
||||
log.Debugf("No missing block body hashes found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
|
||||
highestProcessedDAAScore := lowBlockHeader.DAAScore()
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
||||
} else {
|
||||
hashesToRequest = hashes[offset:]
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, expectedHash := range hashesToRequest {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if !expectedHash.Equal(blockHash) {
|
||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||
continue
|
||||
}
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestProcessedDAAScore = block.Header.DAAScore()
|
||||
}
|
||||
|
||||
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
return flow.resolveVirtual(highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||
consensushashing.BlockHash(block))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
|
||||
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if i%10 == 0 {
|
||||
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var percents int
|
||||
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||
percents = 100
|
||||
} else {
|
||||
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||
}
|
||||
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||
}
|
||||
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isCompletelyResolved {
|
||||
log.Infof("Resolved virtual")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
32
app/protocol/flows/v4/blockrelay/ibd_progress_reporter.go
Normal file
32
app/protocol/flows/v4/blockrelay/ibd_progress_reporter.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package blockrelay
|
||||
|
||||
type ibdProgressReporter struct {
|
||||
lowDAAScore uint64
|
||||
highDAAScore uint64
|
||||
objectName string
|
||||
totalDAAScoreDifference uint64
|
||||
lastReportedProgressPercent int
|
||||
processed int
|
||||
}
|
||||
|
||||
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
|
||||
return &ibdProgressReporter{
|
||||
lowDAAScore: lowDAAScore,
|
||||
highDAAScore: highDAAScore,
|
||||
objectName: objectName,
|
||||
totalDAAScoreDifference: highDAAScore - lowDAAScore,
|
||||
lastReportedProgressPercent: 0,
|
||||
processed: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
|
||||
ipr.processed += processedDelta
|
||||
|
||||
relativeDAAScore := highestProcessedDAAScore - ipr.lowDAAScore
|
||||
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
|
||||
if progressPercent > ipr.lastReportedProgressPercent {
|
||||
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
|
||||
ipr.lastReportedProgressPercent = progressPercent
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user