Compare commits

..

19 Commits

Author SHA1 Message Date
cbitensky
a5df278855 Updating changelog.txt 2021-07-05 11:50:43 +03:00
cbitensky
de0bdfcd33 Make use of maxBlocks instead of maxBlueScoreDifference in antiPastHashesBetween (#1774) 2021-07-01 11:33:22 +03:00
tal
eaf4180082 Update to version 0.10.4 2021-06-06 14:54:41 +03:00
Ori Newman
5ef24ae37e Merge branch 'v0.10.3-dev' of github.com:kaspanet/kaspad into v0.10.3-dev 2021-05-31 18:24:19 +03:00
stasatdaglabs
83e631548f Implement NotifyVirtualDaaScoreChanged (#1737)
* Add notifyVirtualDaaScoreChanged to protowire.

* Add notifyVirtualDaaScoreChanged to the rest of kaspad.

* Add notifyVirtualDaaScoreChanged to the rest of kaspad.

* Test the DAA score notification in TestVirtualSelectedParentBlueScore.

* Rename TestVirtualSelectedParentBlueScore to TestVirtualSelectedParentBlueScoreAndVirtualDAAScore.
2021-05-31 18:13:44 +03:00
stasatdaglabs
0adfb2dfbb Implement NotifyVirtualDaaScoreChanged (#1737)
* Add notifyVirtualDaaScoreChanged to protowire.

* Add notifyVirtualDaaScoreChanged to the rest of kaspad.

* Add notifyVirtualDaaScoreChanged to the rest of kaspad.

* Test the DAA score notification in TestVirtualSelectedParentBlueScore.

* Rename TestVirtualSelectedParentBlueScore to TestVirtualSelectedParentBlueScoreAndVirtualDAAScore.
2021-05-31 18:11:08 +03:00
stasatdaglabs
4461f56ca3 Update to version 0.10.3 2021-05-18 13:54:33 +03:00
Ori Newman
a18f2f8802 Fix overflow when checking coinbase maturity and don't ban peers that send transactions with immature spend (#1722)
* Fix overflow when checking coinbase maturity and don't ban peers that send transactions with immature spend

* Fix tests

Co-authored-by: Svarog <feanorr@gmail.com>
2021-05-14 13:02:50 +03:00
Ori Newman
04dc1947ff Fix calcTxSequenceLockFromReferencedUTXOEntries for loop break condition (#1723)
Co-authored-by: Svarog <feanorr@gmail.com>
2021-05-14 12:49:53 +03:00
Ori Newman
36c56f73bf Add VirtualDaaScore to GetBlockDagInfo (#1719)
Co-authored-by: Svarog <feanorr@gmail.com>
2021-05-14 12:36:20 +03:00
Ori Newman
b76ca41109 serializeAddress should always serialize as IPv6, since it assumes the IP size is 16 bytes (#1720) 2021-05-13 17:42:59 +03:00
Ori Newman
50fd86e287 Fix getBlock and getBlocks RPC commands to return blocks and transactions properly (#1716)
* Fix getBlock RPC command to return transactions

* Fix getBlocks RPC command to return transactions and blocks

* Add GetBlockEvenIfHeaderOnly and use it for getBlock and getBlocks

* Implement GetBlockEvenIfHeaderOnly for fakeRelayInvsContext

* Use less nested code
2021-05-13 15:45:03 +03:00
stasatdaglabs
79cf0d64d0 Update to version 0.10.2 2021-05-09 14:33:47 +03:00
Ori Newman
b405ea50e5 Calculate virtual's acceptance data and multiset after importing a new pruning point (#1700) 2021-05-05 18:13:00 +03:00
Ori Newman
ccfe8a45dd Create v0.10.1 2021-05-05 17:09:30 +03:00
stasatdaglabs
9dd8136e4b Add v0.10.0 to the changelog. (#1692) 2021-04-26 15:01:02 +03:00
stasatdaglabs
eb1703b948 Fix the mempool-limits stability test (#1690)
* Add -v to the `go test` command.

* Generate a new keypair for mempool-limits.

* Set mempool-limits to time out only after 24 hours.
2021-04-25 16:27:47 +03:00
stasatdaglabs
a6da3251d0 Generalize stability-tests/docker/Dockerfile. (#1685) 2021-04-21 12:53:37 +03:00
stasatdaglabs
bf198948c4 Update the testnet version to testnet-5. (#1683) 2021-04-20 14:35:41 +03:00
550 changed files with 13150 additions and 28586 deletions

View File

@@ -11,8 +11,8 @@
#>
param(
[System.UInt64] $MinimumSize = 16gb ,
[System.UInt64] $MaximumSize = 16gb ,
[System.UInt64] $MinimumSize = 8gb ,
[System.UInt64] $MaximumSize = 8gb ,
[System.String] $DiskRoot = "D:"
)

View File

@@ -1,7 +1,7 @@
name: Build and upload assets
name: Build and Upload assets
on:
release:
types: [ published ]
types: [published]
jobs:
build:
@@ -10,33 +10,34 @@ jobs:
fail-fast: false
matrix:
os: [ ubuntu-latest, windows-latest, macos-latest ]
name: Building, ${{ matrix.os }}
name: Building For ${{ matrix.os }}
steps:
- name: Fix CRLF on Windows
if: runner.os == 'Windows'
- name: Fix windows CRLF
run: git config --global core.autocrlf false
- name: Check out code into the Go module directory
uses: actions/checkout@v2
# Increase the pagefile size on Windows to aviod running out of memory
- name: Increase pagefile size on Windows
# We need to increase the page size because the tests run out of memory on github CI windows.
# Use the powershell script from this github action: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
- name: Increase page size on windows
if: runner.os == 'Windows'
run: powershell -command .github\workflows\SetPageFileSize.ps1
shell: powershell
run: powershell -command .\.github\workflows\SetPageFileSize.ps1
- name: Setup Go
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Build on Linux
- name: Build on linux
if: runner.os == 'Linux'
# `-extldflags=-static` - means static link everything,
# `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
# `-extldflags=-static` - means static link everything, `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
# `-s -w` strips the binary to produce smaller size binaries
run: |
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ . ./cmd/...
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
zip -r "${archive}" ./bin/*
@@ -47,7 +48,7 @@ jobs:
if: runner.os == 'Windows'
shell: bash
run: |
go build -v -ldflags="-s -w" -o bin/ . ./cmd/...
go build -v -ldflags="-s -w" -o bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
powershell "Compress-Archive bin/* \"${archive}\""
@@ -57,7 +58,7 @@ jobs:
- name: Build on MacOS
if: runner.os == 'macOS'
run: |
go build -v -ldflags="-s -w" -o ./bin/ . ./cmd/...
go build -v -ldflags="-s -w" -o ./bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
zip -r "${archive}" ./bin/*
@@ -65,7 +66,7 @@ jobs:
echo "asset_name=${asset_name}" >> $GITHUB_ENV
- name: Upload release asset
- name: Upload Release Asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,4 +1,4 @@
name: Race
name: Go-Race
on:
schedule:
@@ -7,7 +7,7 @@ on:
jobs:
race_test:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
@@ -19,10 +19,10 @@ jobs:
with:
fetch-depth: 0
- name: Setup Go
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.16
go-version: 1.15
- name: Set scheduled branch name
shell: bash
@@ -46,4 +46,4 @@ jobs:
run: |
git checkout "${{ env.run_on }}"
git status
go test -timeout 20m -race ./...
go test -race ./...

View File

@@ -1,36 +1,38 @@
name: Tests
name: Go
on:
push:
pull_request:
# edtited - because base branch can be modified
# synchronize - update commits on PR
types: [opened, synchronize, edited]
# edtited - "title, body, or the base branch of the PR is modified"
# synchronize - "commit(s) pushed to the pull request"
types: [opened, synchronize, edited, reopened]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ ubuntu-latest, macos-latest ]
name: Tests, ${{ matrix.os }}
os: [ ubuntu-16.04, macos-10.15, windows-2019 ]
name: Testing on on ${{ matrix.os }}
steps:
- name: Fix CRLF on Windows
if: runner.os == 'Windows'
- name: Fix windows CRLF
run: git config --global core.autocrlf false
- name: Check out code into the Go module directory
uses: actions/checkout@v2
# Increase the pagefile size on Windows to aviod running out of memory
- name: Increase pagefile size on Windows
# We need to increase the page size because the tests run out of memory on github CI windows.
# Use the powershell script from this github action: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
- name: Increase page size on windows
if: runner.os == 'Windows'
run: powershell -command .github\workflows\SetPageFileSize.ps1
shell: powershell
run: powershell -command .\.github\workflows\SetPageFileSize.ps1
- name: Setup Go
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.16
@@ -49,41 +51,14 @@ jobs:
shell: bash
run: ./build_and_test.sh -v
stability-test-fast:
runs-on: ubuntu-latest
name: Fast stability tests, ${{ github.head_ref }}
steps:
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install kaspad
run: go install ./...
- name: Install golint
run: go get -u golang.org/x/lint/golint
- name: Run fast stability tests
working-directory: stability-tests
run: ./install_and_test.sh
coverage:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
name: Produce code coverage
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Setup Go
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.16
@@ -95,4 +70,4 @@ jobs:
run: go test -v -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./...
- name: Upload coverage file
run: bash <(curl -s https://codecov.io/bash)
run: bash <(curl -s https://codecov.io/bash)

View File

@@ -12,7 +12,8 @@ If you want to make a big change it's better to discuss it first by opening an i
## Pull Request process
Any pull request should be opened against the development branch `dev`.
Any pull request should be opened against the development branch of the target version. The development branch format is
as follows: `vx.y.z-dev`, for example: `v0.8.5-dev`.
All pull requests should pass the checks written in `build_and_test.sh`, so it's recommended to run this script before
submitting your PR.

View File

@@ -1,13 +1,16 @@
Kaspad
====
Warning: This is pre-alpha software. There's no guarantee anything works.
====
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad)
Kaspad is the reference full node Kaspa implementation written in Go (golang).
This project is currently under active development and is in Beta state.
This project is currently under active development and is in a pre-Alpha state.
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
## What is kaspa
@@ -60,8 +63,6 @@ Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues)
is used for this project.
Issue priorities may be seen at https://github.com/orgs/kaspanet/projects/4
## Documentation
The [documentation](https://github.com/kaspanet/docs) is a work-in-progress

View File

@@ -20,10 +20,7 @@ import (
"github.com/kaspanet/kaspad/version"
)
const (
leveldbCacheSizeMiB = 256
defaultDataDirname = "datadir2"
)
const leveldbCacheSizeMiB = 256
var desiredLimits = &limits.DesiredLimits{
FileLimitWant: 2048,
@@ -87,7 +84,6 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
if app.cfg.Profile != "" {
profiling.Start(app.cfg.Profile, log)
}
profiling.TrackHeap(app.cfg.AppDir, log)
// Return now if an interrupt signal was triggered.
if signal.InterruptRequested(interrupt) {
@@ -105,7 +101,7 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
// Open the database
databaseContext, err := openDB(app.cfg)
if err != nil {
log.Errorf("Loading database failed: %+v", err)
log.Error(err)
return err
}
@@ -163,7 +159,7 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
// dbPath returns the path to the block database given a database type.
func databasePath(cfg *config.Config) string {
return filepath.Join(cfg.AppDir, defaultDataDirname)
return filepath.Join(cfg.AppDir, "data")
}
func removeDatabase(cfg *config.Config) error {

View File

@@ -2,9 +2,6 @@ package appmessage
import (
"encoding/hex"
"github.com/pkg/errors"
"math/big"
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
@@ -31,17 +28,13 @@ func DomainBlockToMsgBlock(domainBlock *externalapi.DomainBlock) *MsgBlock {
func DomainBlockHeaderToBlockHeader(domainBlockHeader externalapi.BlockHeader) *MsgBlockHeader {
return &MsgBlockHeader{
Version: domainBlockHeader.Version(),
Parents: domainBlockHeader.Parents(),
ParentHashes: domainBlockHeader.ParentHashes(),
HashMerkleRoot: domainBlockHeader.HashMerkleRoot(),
AcceptedIDMerkleRoot: domainBlockHeader.AcceptedIDMerkleRoot(),
UTXOCommitment: domainBlockHeader.UTXOCommitment(),
Timestamp: mstime.UnixMilliseconds(domainBlockHeader.TimeInMilliseconds()),
Bits: domainBlockHeader.Bits(),
Nonce: domainBlockHeader.Nonce(),
BlueScore: domainBlockHeader.BlueScore(),
DAAScore: domainBlockHeader.DAAScore(),
BlueWork: domainBlockHeader.BlueWork(),
PruningPoint: domainBlockHeader.PruningPoint(),
}
}
@@ -62,17 +55,13 @@ func MsgBlockToDomainBlock(msgBlock *MsgBlock) *externalapi.DomainBlock {
func BlockHeaderToDomainBlockHeader(blockHeader *MsgBlockHeader) externalapi.BlockHeader {
return blockheader.NewImmutableBlockHeader(
blockHeader.Version,
blockHeader.Parents,
blockHeader.ParentHashes,
blockHeader.HashMerkleRoot,
blockHeader.AcceptedIDMerkleRoot,
blockHeader.UTXOCommitment,
blockHeader.Timestamp.UnixMilliseconds(),
blockHeader.Bits,
blockHeader.Nonce,
blockHeader.DAAScore,
blockHeader.BlueScore,
blockHeader.BlueWork,
blockHeader.PruningPoint,
)
}
@@ -111,7 +100,6 @@ func domainTransactionInputToTxIn(domainTransactionInput *externalapi.DomainTran
PreviousOutpoint: *domainOutpointToOutpoint(domainTransactionInput.PreviousOutpoint),
SignatureScript: domainTransactionInput.SignatureScript,
Sequence: domainTransactionInput.Sequence,
SigOpCount: domainTransactionInput.SigOpCount,
}
}
@@ -160,7 +148,6 @@ func txInToDomainTransactionInput(txIn *TxIn) *externalapi.DomainTransactionInpu
return &externalapi.DomainTransactionInput{
PreviousOutpoint: *outpointToDomainOutpoint(&txIn.PreviousOutpoint), //TODO
SignatureScript: txIn.SignatureScript,
SigOpCount: txIn.SigOpCount,
Sequence: txIn.Sequence,
}
}
@@ -176,10 +163,14 @@ func outpointToDomainOutpoint(outpoint *Outpoint) *externalapi.DomainOutpoint {
func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externalapi.DomainTransaction, error) {
inputs := make([]*externalapi.DomainTransactionInput, len(rpcTransaction.Inputs))
for i, input := range rpcTransaction.Inputs {
previousOutpoint, err := RPCOutpointToDomainOutpoint(input.PreviousOutpoint)
transactionID, err := transactionid.FromString(input.PreviousOutpoint.TransactionID)
if err != nil {
return nil, err
}
previousOutpoint := &externalapi.DomainOutpoint{
TransactionID: *transactionID,
Index: input.PreviousOutpoint.Index,
}
signatureScript, err := hex.DecodeString(input.SignatureScript)
if err != nil {
return nil, err
@@ -188,7 +179,6 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
PreviousOutpoint: *previousOutpoint,
SignatureScript: signatureScript,
Sequence: input.Sequence,
SigOpCount: input.SigOpCount,
}
}
outputs := make([]*externalapi.DomainTransactionOutput, len(rpcTransaction.Outputs))
@@ -223,36 +213,6 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
}, nil
}
// RPCOutpointToDomainOutpoint converts RPCOutpoint to DomainOutpoint
func RPCOutpointToDomainOutpoint(outpoint *RPCOutpoint) (*externalapi.DomainOutpoint, error) {
transactionID, err := transactionid.FromString(outpoint.TransactionID)
if err != nil {
return nil, err
}
return &externalapi.DomainOutpoint{
TransactionID: *transactionID,
Index: outpoint.Index,
}, nil
}
// RPCUTXOEntryToUTXOEntry converts RPCUTXOEntry to UTXOEntry
func RPCUTXOEntryToUTXOEntry(entry *RPCUTXOEntry) (externalapi.UTXOEntry, error) {
script, err := hex.DecodeString(entry.ScriptPublicKey.Script)
if err != nil {
return nil, err
}
return utxo.NewUTXOEntry(
entry.Amount,
&externalapi.ScriptPublicKey{
Script: script,
Version: entry.ScriptPublicKey.Version,
},
entry.IsCoinbase,
entry.BlockDAAScore,
), nil
}
// DomainTransactionToRPCTransaction converts DomainTransactions to RPCTransactions
func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransaction) *RPCTransaction {
inputs := make([]*RPCTransactionInput, len(transaction.Inputs))
@@ -267,7 +227,6 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
PreviousOutpoint: previousOutpoint,
SignatureScript: signatureScript,
Sequence: input.Sequence,
SigOpCount: input.SigOpCount,
}
}
outputs := make([]*RPCTransactionOutput, len(transaction.Outputs))
@@ -298,27 +257,22 @@ func OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(
domainOutpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, len(outpointAndUTXOEntryPairs))
for i, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs {
domainOutpointAndUTXOEntryPairs[i] = outpointAndUTXOEntryPairToDomainOutpointAndUTXOEntryPair(outpointAndUTXOEntryPair)
domainOutpointAndUTXOEntryPairs[i] = &externalapi.OutpointAndUTXOEntryPair{
Outpoint: &externalapi.DomainOutpoint{
TransactionID: outpointAndUTXOEntryPair.Outpoint.TxID,
Index: outpointAndUTXOEntryPair.Outpoint.Index,
},
UTXOEntry: utxo.NewUTXOEntry(
outpointAndUTXOEntryPair.UTXOEntry.Amount,
outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey,
outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase,
outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore,
),
}
}
return domainOutpointAndUTXOEntryPairs
}
func outpointAndUTXOEntryPairToDomainOutpointAndUTXOEntryPair(
outpointAndUTXOEntryPair *OutpointAndUTXOEntryPair) *externalapi.OutpointAndUTXOEntryPair {
return &externalapi.OutpointAndUTXOEntryPair{
Outpoint: &externalapi.DomainOutpoint{
TransactionID: outpointAndUTXOEntryPair.Outpoint.TxID,
Index: outpointAndUTXOEntryPair.Outpoint.Index,
},
UTXOEntry: utxo.NewUTXOEntry(
outpointAndUTXOEntryPair.UTXOEntry.Amount,
outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey,
outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase,
outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore,
),
}
}
// DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs converts
// domain OutpointAndUTXOEntryPairs to OutpointAndUTXOEntryPairs
func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
@@ -344,25 +298,15 @@ func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
// DomainBlockToRPCBlock converts DomainBlocks to RPCBlocks
func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock {
parents := make([]*RPCBlockLevelParents, len(block.Header.Parents()))
for i, blockLevelParents := range block.Header.Parents() {
parents[i] = &RPCBlockLevelParents{
ParentHashes: hashes.ToStrings(blockLevelParents),
}
}
header := &RPCBlockHeader{
Version: uint32(block.Header.Version()),
Parents: parents,
ParentHashes: hashes.ToStrings(block.Header.ParentHashes()),
HashMerkleRoot: block.Header.HashMerkleRoot().String(),
AcceptedIDMerkleRoot: block.Header.AcceptedIDMerkleRoot().String(),
UTXOCommitment: block.Header.UTXOCommitment().String(),
Timestamp: block.Header.TimeInMilliseconds(),
Bits: block.Header.Bits(),
Nonce: block.Header.Nonce(),
DAAScore: block.Header.DAAScore(),
BlueScore: block.Header.BlueScore(),
BlueWork: block.Header.BlueWork().Text(16),
PruningPoint: block.Header.PruningPoint().String(),
}
transactions := make([]*RPCTransaction, len(block.Transactions))
for i, transaction := range block.Transactions {
@@ -376,16 +320,13 @@ func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock {
// RPCBlockToDomainBlock converts `block` into a DomainBlock
func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
parents := make([]externalapi.BlockLevelParents, len(block.Header.Parents))
for i, blockLevelParents := range block.Header.Parents {
parents[i] = make(externalapi.BlockLevelParents, len(blockLevelParents.ParentHashes))
for j, parentHash := range blockLevelParents.ParentHashes {
var err error
parents[i][j], err = externalapi.NewDomainHashFromString(parentHash)
if err != nil {
return nil, err
}
parentHashes := make([]*externalapi.DomainHash, len(block.Header.ParentHashes))
for i, parentHash := range block.Header.ParentHashes {
domainParentHashes, err := externalapi.NewDomainHashFromString(parentHash)
if err != nil {
return nil, err
}
parentHashes[i] = domainParentHashes
}
hashMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.HashMerkleRoot)
if err != nil {
@@ -399,27 +340,15 @@ func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
if err != nil {
return nil, err
}
blueWork, success := new(big.Int).SetString(block.Header.BlueWork, 16)
if !success {
return nil, errors.Errorf("failed to parse blue work: %s", block.Header.BlueWork)
}
pruningPoint, err := externalapi.NewDomainHashFromString(block.Header.PruningPoint)
if err != nil {
return nil, err
}
header := blockheader.NewImmutableBlockHeader(
uint16(block.Header.Version),
parents,
parentHashes,
hashMerkleRoot,
acceptedIDMerkleRoot,
utxoCommitment,
block.Header.Timestamp,
block.Header.Bits,
block.Header.Nonce,
block.Header.DAAScore,
block.Header.BlueScore,
blueWork,
pruningPoint)
block.Header.Nonce)
transactions := make([]*externalapi.DomainTransaction, len(block.Transactions))
for i, transaction := range block.Transactions {
domainTransaction, err := RPCTransactionToDomainTransaction(transaction)
@@ -433,169 +362,3 @@ func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
Transactions: transactions,
}, nil
}
// BlockWithTrustedDataToDomainBlockWithTrustedData converts *MsgBlockWithTrustedData to *externalapi.BlockWithTrustedData
func BlockWithTrustedDataToDomainBlockWithTrustedData(block *MsgBlockWithTrustedData) *externalapi.BlockWithTrustedData {
daaWindow := make([]*externalapi.TrustedDataDataDAAHeader, len(block.DAAWindow))
for i, daaBlock := range block.DAAWindow {
daaWindow[i] = &externalapi.TrustedDataDataDAAHeader{
Header: BlockHeaderToDomainBlockHeader(&daaBlock.Block.Header),
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData),
}
}
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, len(block.GHOSTDAGData))
for i, datum := range block.GHOSTDAGData {
ghostdagData[i] = &externalapi.BlockGHOSTDAGDataHashPair{
Hash: datum.Hash,
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(datum.GHOSTDAGData),
}
}
return &externalapi.BlockWithTrustedData{
Block: MsgBlockToDomainBlock(block.Block),
DAAWindow: daaWindow,
GHOSTDAGData: ghostdagData,
}
}
// TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader converts *TrustedDataDAAHeader to *externalapi.TrustedDataDataDAAHeader
func TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(daaBlock *TrustedDataDAAHeader) *externalapi.TrustedDataDataDAAHeader {
return &externalapi.TrustedDataDataDAAHeader{
Header: BlockHeaderToDomainBlockHeader(daaBlock.Header),
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData),
}
}
// GHOSTDAGHashPairToDomainGHOSTDAGHashPair converts *BlockGHOSTDAGDataHashPair to *externalapi.BlockGHOSTDAGDataHashPair
func GHOSTDAGHashPairToDomainGHOSTDAGHashPair(datum *BlockGHOSTDAGDataHashPair) *externalapi.BlockGHOSTDAGDataHashPair {
return &externalapi.BlockGHOSTDAGDataHashPair{
Hash: datum.Hash,
GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(datum.GHOSTDAGData),
}
}
func ghostdagDataToDomainGHOSTDAGData(data *BlockGHOSTDAGData) *externalapi.BlockGHOSTDAGData {
bluesAnticoneSizes := make(map[externalapi.DomainHash]externalapi.KType, len(data.BluesAnticoneSizes))
for _, pair := range data.BluesAnticoneSizes {
bluesAnticoneSizes[*pair.BlueHash] = pair.AnticoneSize
}
return externalapi.NewBlockGHOSTDAGData(
data.BlueScore,
data.BlueWork,
data.SelectedParent,
data.MergeSetBlues,
data.MergeSetReds,
bluesAnticoneSizes,
)
}
func domainGHOSTDAGDataGHOSTDAGData(data *externalapi.BlockGHOSTDAGData) *BlockGHOSTDAGData {
bluesAnticoneSizes := make([]*BluesAnticoneSizes, 0, len(data.BluesAnticoneSizes()))
for blueHash, anticoneSize := range data.BluesAnticoneSizes() {
blueHashCopy := blueHash
bluesAnticoneSizes = append(bluesAnticoneSizes, &BluesAnticoneSizes{
BlueHash: &blueHashCopy,
AnticoneSize: anticoneSize,
})
}
return &BlockGHOSTDAGData{
BlueScore: data.BlueScore(),
BlueWork: data.BlueWork(),
SelectedParent: data.SelectedParent(),
MergeSetBlues: data.MergeSetBlues(),
MergeSetReds: data.MergeSetReds(),
BluesAnticoneSizes: bluesAnticoneSizes,
}
}
// DomainBlockWithTrustedDataToBlockWithTrustedData converts *externalapi.BlockWithTrustedData to *MsgBlockWithTrustedData
func DomainBlockWithTrustedDataToBlockWithTrustedData(block *externalapi.BlockWithTrustedData) *MsgBlockWithTrustedData {
daaWindow := make([]*TrustedDataDataDAABlock, len(block.DAAWindow))
for i, daaBlock := range block.DAAWindow {
daaWindow[i] = &TrustedDataDataDAABlock{
Block: &MsgBlock{
Header: *DomainBlockHeaderToBlockHeader(daaBlock.Header),
},
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData),
}
}
ghostdagData := make([]*BlockGHOSTDAGDataHashPair, len(block.GHOSTDAGData))
for i, datum := range block.GHOSTDAGData {
ghostdagData[i] = &BlockGHOSTDAGDataHashPair{
Hash: datum.Hash,
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(datum.GHOSTDAGData),
}
}
return &MsgBlockWithTrustedData{
Block: DomainBlockToMsgBlock(block.Block),
DAAScore: block.Block.Header.DAAScore(),
DAAWindow: daaWindow,
GHOSTDAGData: ghostdagData,
}
}
// DomainBlockWithTrustedDataToBlockWithTrustedDataV4 converts a set of *externalapi.DomainBlock, daa window indices and ghostdag data indices
// to *MsgBlockWithTrustedDataV4
func DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block *externalapi.DomainBlock, daaWindowIndices, ghostdagDataIndices []uint64) *MsgBlockWithTrustedDataV4 {
return &MsgBlockWithTrustedDataV4{
Block: DomainBlockToMsgBlock(block),
DAAWindowIndices: daaWindowIndices,
GHOSTDAGDataIndices: ghostdagDataIndices,
}
}
// DomainTrustedDataToTrustedData converts *externalapi.BlockWithTrustedData to *MsgBlockWithTrustedData
func DomainTrustedDataToTrustedData(domainDAAWindow []*externalapi.TrustedDataDataDAAHeader, domainGHOSTDAGData []*externalapi.BlockGHOSTDAGDataHashPair) *MsgTrustedData {
daaWindow := make([]*TrustedDataDAAHeader, len(domainDAAWindow))
for i, daaBlock := range domainDAAWindow {
daaWindow[i] = &TrustedDataDAAHeader{
Header: DomainBlockHeaderToBlockHeader(daaBlock.Header),
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData),
}
}
ghostdagData := make([]*BlockGHOSTDAGDataHashPair, len(domainGHOSTDAGData))
for i, datum := range domainGHOSTDAGData {
ghostdagData[i] = &BlockGHOSTDAGDataHashPair{
Hash: datum.Hash,
GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(datum.GHOSTDAGData),
}
}
return &MsgTrustedData{
DAAWindow: daaWindow,
GHOSTDAGData: ghostdagData,
}
}
// MsgPruningPointProofToDomainPruningPointProof converts *MsgPruningPointProof to *externalapi.PruningPointProof
func MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage *MsgPruningPointProof) *externalapi.PruningPointProof {
headers := make([][]externalapi.BlockHeader, len(pruningPointProofMessage.Headers))
for blockLevel, blockLevelParents := range pruningPointProofMessage.Headers {
headers[blockLevel] = make([]externalapi.BlockHeader, len(blockLevelParents))
for i, header := range blockLevelParents {
headers[blockLevel][i] = BlockHeaderToDomainBlockHeader(header)
}
}
return &externalapi.PruningPointProof{
Headers: headers,
}
}
// DomainPruningPointProofToMsgPruningPointProof converts *externalapi.PruningPointProof to *MsgPruningPointProof
func DomainPruningPointProofToMsgPruningPointProof(pruningPointProof *externalapi.PruningPointProof) *MsgPruningPointProof {
headers := make([][]*MsgBlockHeader, len(pruningPointProof.Headers))
for blockLevel, blockLevelParents := range pruningPointProof.Headers {
headers[blockLevel] = make([]*MsgBlockHeader, len(blockLevelParents))
for i, header := range blockLevelParents {
headers[blockLevel][i] = DomainBlockHeaderToBlockHeader(header)
}
}
return &MsgPruningPointProof{
Headers: headers,
}
}

View File

@@ -38,10 +38,6 @@ type RPCError struct {
Message string
}
func (err RPCError) Error() string {
return err.Message
}
// RPCErrorf formats according to a format specifier and returns the string
// as an RPCError.
func RPCErrorf(format string, args ...interface{}) *RPCError {

View File

@@ -45,30 +45,24 @@ const (
CmdRequestRelayBlocks
CmdInvTransaction
CmdRequestTransactions
CmdIBDBlock
CmdDoneHeaders
CmdTransactionNotFound
CmdReject
CmdHeader
CmdRequestNextHeaders
CmdRequestPruningPointUTXOSet
CmdRequestPruningPointUTXOSetAndBlock
CmdPruningPointUTXOSetChunk
CmdRequestIBDBlocks
CmdUnexpectedPruningPoint
CmdRequestPruningPointHash
CmdPruningPointHash
CmdIBDBlockLocator
CmdIBDBlockLocatorHighestHash
CmdIBDBlockLocatorHighestHashNotFound
CmdBlockHeaders
CmdRequestNextPruningPointUTXOSetChunk
CmdDonePruningPointUTXOSetChunks
CmdBlockWithTrustedData
CmdDoneBlocksWithTrustedData
CmdRequestPruningPointAndItsAnticone
CmdIBDBlock
CmdRequestIBDBlocks
CmdPruningPoints
CmdRequestPruningPointProof
CmdPruningPointProof
CmdReady
CmdTrustedData
CmdBlockWithTrustedDataV4
// rpc
CmdGetCurrentNetworkRequestMessage
@@ -127,8 +121,6 @@ const (
CmdStopNotifyingUTXOsChangedResponseMessage
CmdGetUTXOsByAddressesRequestMessage
CmdGetUTXOsByAddressesResponseMessage
CmdGetBalanceByAddressRequestMessage
CmdGetBalanceByAddressResponseMessage
CmdGetVirtualSelectedParentBlueScoreRequestMessage
CmdGetVirtualSelectedParentBlueScoreResponseMessage
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
@@ -145,13 +137,9 @@ const (
CmdPruningPointUTXOSetOverrideNotificationMessage
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage
CmdEstimateNetworkHashesPerSecondRequestMessage
CmdEstimateNetworkHashesPerSecondResponseMessage
CmdNotifyVirtualDaaScoreChangedRequestMessage
CmdNotifyVirtualDaaScoreChangedResponseMessage
CmdVirtualDaaScoreChangedNotificationMessage
CmdGetBalancesByAddressesRequestMessage
CmdGetBalancesByAddressesResponseMessage
)
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
@@ -160,7 +148,7 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
CmdVerAck: "VerAck",
CmdRequestAddresses: "RequestAddresses",
CmdAddresses: "Addresses",
CmdRequestHeaders: "CmdRequestHeaders",
CmdRequestHeaders: "RequestHeaders",
CmdBlock: "Block",
CmdTx: "Tx",
CmdPing: "Ping",
@@ -171,30 +159,24 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
CmdRequestRelayBlocks: "RequestRelayBlocks",
CmdInvTransaction: "InvTransaction",
CmdRequestTransactions: "RequestTransactions",
CmdIBDBlock: "IBDBlock",
CmdDoneHeaders: "DoneHeaders",
CmdTransactionNotFound: "TransactionNotFound",
CmdReject: "Reject",
CmdHeader: "Header",
CmdRequestNextHeaders: "RequestNextHeaders",
CmdRequestPruningPointUTXOSet: "RequestPruningPointUTXOSet",
CmdRequestPruningPointUTXOSetAndBlock: "RequestPruningPointUTXOSetAndBlock",
CmdPruningPointUTXOSetChunk: "PruningPointUTXOSetChunk",
CmdRequestIBDBlocks: "RequestIBDBlocks",
CmdUnexpectedPruningPoint: "UnexpectedPruningPoint",
CmdRequestPruningPointHash: "RequestPruningPointHashHash",
CmdPruningPointHash: "PruningPointHash",
CmdIBDBlockLocator: "IBDBlockLocator",
CmdIBDBlockLocatorHighestHash: "IBDBlockLocatorHighestHash",
CmdIBDBlockLocatorHighestHashNotFound: "IBDBlockLocatorHighestHashNotFound",
CmdBlockHeaders: "BlockHeaders",
CmdRequestNextPruningPointUTXOSetChunk: "RequestNextPruningPointUTXOSetChunk",
CmdDonePruningPointUTXOSetChunks: "DonePruningPointUTXOSetChunks",
CmdBlockWithTrustedData: "BlockWithTrustedData",
CmdDoneBlocksWithTrustedData: "DoneBlocksWithTrustedData",
CmdRequestPruningPointAndItsAnticone: "RequestPruningPointAndItsAnticoneHeaders",
CmdIBDBlock: "IBDBlock",
CmdRequestIBDBlocks: "RequestIBDBlocks",
CmdPruningPoints: "PruningPoints",
CmdRequestPruningPointProof: "RequestPruningPointProof",
CmdPruningPointProof: "PruningPointProof",
CmdReady: "Ready",
CmdTrustedData: "TrustedData",
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
}
// RPCMessageCommandToString maps all MessageCommands to their string representation
@@ -253,8 +235,6 @@ var RPCMessageCommandToString = map[MessageCommand]string{
CmdStopNotifyingUTXOsChangedResponseMessage: "StopNotifyingUTXOsChangedResponse",
CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest",
CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse",
CmdGetBalanceByAddressRequestMessage: "GetBalanceByAddressRequest",
CmdGetBalanceByAddressResponseMessage: "GetBalancesByAddressResponse",
CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest",
CmdGetVirtualSelectedParentBlueScoreResponseMessage: "GetVirtualSelectedParentBlueScoreResponse",
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest",
@@ -271,13 +251,9 @@ var RPCMessageCommandToString = map[MessageCommand]string{
CmdPruningPointUTXOSetOverrideNotificationMessage: "PruningPointUTXOSetOverrideNotification",
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: "StopNotifyingPruningPointUTXOSetOverrideRequest",
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage: "StopNotifyingPruningPointUTXOSetOverrideResponse",
CmdEstimateNetworkHashesPerSecondRequestMessage: "EstimateNetworkHashesPerSecondRequest",
CmdEstimateNetworkHashesPerSecondResponseMessage: "EstimateNetworkHashesPerSecondResponse",
CmdNotifyVirtualDaaScoreChangedRequestMessage: "NotifyVirtualDaaScoreChangedRequest",
CmdNotifyVirtualDaaScoreChangedResponseMessage: "NotifyVirtualDaaScoreChangedResponse",
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
}
// Message is an interface that describes a kaspa message. A type that

View File

@@ -18,21 +18,16 @@ import (
// TestBlock tests the MsgBlock API.
func TestBlock(t *testing.T) {
pver := uint32(4)
pver := ProtocolVersion
// Block 1 header.
parents := blockOne.Header.Parents
parentHashes := blockOne.Header.ParentHashes
hashMerkleRoot := blockOne.Header.HashMerkleRoot
acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot
utxoCommitment := blockOne.Header.UTXOCommitment
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
daaScore := blockOne.Header.DAAScore
blueScore := blockOne.Header.BlueScore
blueWork := blockOne.Header.BlueWork
pruningPoint := blockOne.Header.PruningPoint
bh := NewBlockHeader(1, parents, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce,
daaScore, blueScore, blueWork, pruningPoint)
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
// Ensure the command is expected value.
wantCmd := MessageCommand(5)
@@ -136,7 +131,7 @@ func TestConvertToPartial(t *testing.T) {
var blockOne = MsgBlock{
Header: MsgBlockHeader{
Version: 0,
Parents: []externalapi.BlockLevelParents{[]*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}},
ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,

View File

@@ -5,12 +5,13 @@
package appmessage
import (
"math/big"
"math"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
)
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
@@ -38,8 +39,8 @@ type MsgBlockHeader struct {
// Version of the block. This is not the same as the protocol version.
Version uint16
// Parents are the parent block hashes of the block in the DAG per superblock level.
Parents []externalapi.BlockLevelParents
// Hashes of the parent block headers in the blockDAG.
ParentHashes []*externalapi.DomainHash
// HashMerkleRoot is the merkle tree reference to hash of all transactions for the block.
HashMerkleRoot *externalapi.DomainHash
@@ -59,16 +60,15 @@ type MsgBlockHeader struct {
// Nonce used to generate the block.
Nonce uint64
}
// DAASCore is the DAA score of the block.
DAAScore uint64
BlueScore uint64
// BlueWork is the blue work of the block.
BlueWork *big.Int
PruningPoint *externalapi.DomainHash
// NumParentBlocks return the number of entries in ParentHashes
func (h *MsgBlockHeader) NumParentBlocks() byte {
numParents := len(h.ParentHashes)
if numParents > math.MaxUint8 {
panic(errors.Errorf("number of parents is %d, which is more than one byte can fit", numParents))
}
return byte(numParents)
}
// BlockHash computes the block identifier hash for the given block header.
@@ -76,27 +76,33 @@ func (h *MsgBlockHeader) BlockHash() *externalapi.DomainHash {
return consensushashing.HeaderHash(BlockHeaderToDomainBlockHeader(h))
}
// IsGenesis returns true iff this block is a genesis block
func (h *MsgBlockHeader) IsGenesis() bool {
return h.NumParentBlocks() == 0
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (h *MsgBlockHeader) Command() MessageCommand {
return CmdHeader
}
// NewBlockHeader returns a new MsgBlockHeader using the provided version, previous
// block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the
// block with defaults or calclulated values for the remaining fields.
func NewBlockHeader(version uint16, parents []externalapi.BlockLevelParents, hashMerkleRoot *externalapi.DomainHash,
acceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce,
daaScore, blueScore uint64, blueWork *big.Int, pruningPoint *externalapi.DomainHash) *MsgBlockHeader {
func NewBlockHeader(version uint16, parentHashes []*externalapi.DomainHash, hashMerkleRoot *externalapi.DomainHash,
acceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce uint64) *MsgBlockHeader {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
return &MsgBlockHeader{
Version: version,
Parents: parents,
ParentHashes: parentHashes,
HashMerkleRoot: hashMerkleRoot,
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment,
Timestamp: mstime.Now(),
Bits: bits,
Nonce: nonce,
DAAScore: daaScore,
BlueScore: blueScore,
BlueWork: blueWork,
PruningPoint: pruningPoint,
}
}

View File

@@ -5,34 +5,29 @@
package appmessage
import (
"math/big"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/mstime"
)
// TestBlockHeader tests the MsgBlockHeader API.
func TestBlockHeader(t *testing.T) {
nonce := uint64(0xba4d87a69924a93d)
parents := []externalapi.BlockLevelParents{[]*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}}
hashes := []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}
merkleHash := mainnetGenesisMerkleRoot
acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot
bits := uint32(0x1d00ffff)
daaScore := uint64(123)
blueScore := uint64(456)
blueWork := big.NewInt(789)
pruningPoint := simnetGenesisHash
bh := NewBlockHeader(1, parents, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce,
daaScore, blueScore, blueWork, pruningPoint)
bh := NewBlockHeader(1, hashes, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce)
// Ensure we get the same data back out.
if !reflect.DeepEqual(bh.Parents, parents) {
t.Errorf("NewBlockHeader: wrong parents - got %v, want %v",
spew.Sprint(bh.Parents), spew.Sprint(parents))
if !reflect.DeepEqual(bh.ParentHashes, hashes) {
t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v",
spew.Sprint(bh.ParentHashes), spew.Sprint(hashes))
}
if bh.HashMerkleRoot != merkleHash {
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
@@ -46,20 +41,44 @@ func TestBlockHeader(t *testing.T) {
t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v",
bh.Nonce, nonce)
}
if bh.DAAScore != daaScore {
t.Errorf("NewBlockHeader: wrong daaScore - got %v, want %v",
bh.DAAScore, daaScore)
}
func TestIsGenesis(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &MsgBlockHeader{
Version: 1,
ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
if bh.BlueScore != blueScore {
t.Errorf("NewBlockHeader: wrong blueScore - got %v, want %v",
bh.BlueScore, blueScore)
genesisBlockHdr := &MsgBlockHeader{
Version: 1,
ParentHashes: []*externalapi.DomainHash{},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
if bh.BlueWork != blueWork {
t.Errorf("NewBlockHeader: wrong blueWork - got %v, want %v",
bh.BlueWork, blueWork)
tests := []struct {
in *MsgBlockHeader // Block header to encode
isGenesis bool // Expected result for call of .IsGenesis
}{
{genesisBlockHdr, true},
{baseBlockHdr, false},
}
if !bh.PruningPoint.Equal(pruningPoint) {
t.Errorf("NewBlockHeader: wrong pruningPoint - got %v, want %v",
bh.PruningPoint, pruningPoint)
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
isGenesis := test.in.IsGenesis()
if isGenesis != test.isGenesis {
t.Errorf("MsgBlockHeader.IsGenesis: #%d got: %t, want: %t",
i, isGenesis, test.isGenesis)
}
}
}

View File

@@ -1,54 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"math/big"
)
// MsgBlockWithTrustedData represents a kaspa BlockWithTrustedData message
type MsgBlockWithTrustedData struct {
baseMessage
Block *MsgBlock
DAAScore uint64
DAAWindow []*TrustedDataDataDAABlock
GHOSTDAGData []*BlockGHOSTDAGDataHashPair
}
// Command returns the protocol command string for the message
func (msg *MsgBlockWithTrustedData) Command() MessageCommand {
return CmdBlockWithTrustedData
}
// NewMsgBlockWithTrustedData returns a new MsgBlockWithTrustedData.
func NewMsgBlockWithTrustedData() *MsgBlockWithTrustedData {
return &MsgBlockWithTrustedData{}
}
// TrustedDataDataDAABlock is an appmessage representation of externalapi.TrustedDataDataDAABlock
type TrustedDataDataDAABlock struct {
Block *MsgBlock
GHOSTDAGData *BlockGHOSTDAGData
}
// BlockGHOSTDAGData is an appmessage representation of externalapi.BlockGHOSTDAGData
type BlockGHOSTDAGData struct {
BlueScore uint64
BlueWork *big.Int
SelectedParent *externalapi.DomainHash
MergeSetBlues []*externalapi.DomainHash
MergeSetReds []*externalapi.DomainHash
BluesAnticoneSizes []*BluesAnticoneSizes
}
// BluesAnticoneSizes is an appmessage representation of the BluesAnticoneSizes part of GHOSTDAG data.
type BluesAnticoneSizes struct {
BlueHash *externalapi.DomainHash
AnticoneSize externalapi.KType
}
// BlockGHOSTDAGDataHashPair is an appmessage representation of externalapi.BlockGHOSTDAGDataHashPair
type BlockGHOSTDAGDataHashPair struct {
Hash *externalapi.DomainHash
GHOSTDAGData *BlockGHOSTDAGData
}

View File

@@ -1,20 +0,0 @@
package appmessage
// MsgBlockWithTrustedDataV4 represents a kaspa BlockWithTrustedDataV4 message
type MsgBlockWithTrustedDataV4 struct {
baseMessage
Block *MsgBlock
DAAWindowIndices []uint64
GHOSTDAGDataIndices []uint64
}
// Command returns the protocol command string for the message
func (msg *MsgBlockWithTrustedDataV4) Command() MessageCommand {
return CmdBlockWithTrustedDataV4
}
// NewMsgBlockWithTrustedDataV4 returns a new MsgBlockWithTrustedDataV4.
func NewMsgBlockWithTrustedDataV4() *MsgBlockWithTrustedDataV4 {
return &MsgBlockWithTrustedDataV4{}
}

View File

@@ -1,21 +0,0 @@
package appmessage
// MsgDoneBlocksWithTrustedData implements the Message interface and represents a kaspa
// DoneBlocksWithTrustedData message
//
// This message has no payload.
type MsgDoneBlocksWithTrustedData struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgDoneBlocksWithTrustedData) Command() MessageCommand {
return CmdDoneBlocksWithTrustedData
}
// NewMsgDoneBlocksWithTrustedData returns a new kaspa DoneBlocksWithTrustedData message that conforms to the
// Message interface.
func NewMsgDoneBlocksWithTrustedData() *MsgDoneBlocksWithTrustedData {
return &MsgDoneBlocksWithTrustedData{}
}

View File

@@ -1,16 +0,0 @@
package appmessage
// MsgRequestPruningPointAndItsAnticone represents a kaspa RequestPruningPointAndItsAnticone message
type MsgRequestPruningPointAndItsAnticone struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgRequestPruningPointAndItsAnticone) Command() MessageCommand {
return CmdRequestPruningPointAndItsAnticone
}
// NewMsgRequestPruningPointAndItsAnticone returns a new MsgRequestPruningPointAndItsAnticone.
func NewMsgRequestPruningPointAndItsAnticone() *MsgRequestPruningPointAndItsAnticone {
return &MsgRequestPruningPointAndItsAnticone{}
}

View File

@@ -0,0 +1,65 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
// TestIBDBlock tests the MsgIBDBlock API.
func TestIBDBlock(t *testing.T) {
pver := ProtocolVersion
// Block 1 header.
parentHashes := blockOne.Header.ParentHashes
hashMerkleRoot := blockOne.Header.HashMerkleRoot
acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot
utxoCommitment := blockOne.Header.UTXOCommitment
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
// Ensure the command is expected value.
wantCmd := MessageCommand(15)
msg := NewMsgIBDBlock(NewMsgBlock(bh))
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgIBDBlock: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure max payload is expected value for latest protocol version.
wantPayload := uint32(1024 * 1024 * 32)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+
"protocol version %d - got %v, want %v", pver,
maxPayload, wantPayload)
}
// Ensure we get the same block header data back out.
if !reflect.DeepEqual(&msg.Header, bh) {
t.Errorf("NewMsgIBDBlock: wrong block header - got %v, want %v",
spew.Sdump(&msg.Header), spew.Sdump(bh))
}
// Ensure transactions are added properly.
tx := blockOne.Transactions[0].Copy()
msg.AddTransaction(tx)
if !reflect.DeepEqual(msg.Transactions, blockOne.Transactions) {
t.Errorf("AddTransaction: wrong transactions - got %v, want %v",
spew.Sdump(msg.Transactions),
spew.Sdump(blockOne.Transactions))
}
// Ensure transactions are properly cleared.
msg.ClearTransactions()
if len(msg.Transactions) != 0 {
t.Errorf("ClearTransactions: wrong transactions - got %v, want %v",
len(msg.Transactions), 0)
}
}

View File

@@ -0,0 +1,23 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgPruningPointHashMessage represents a kaspa PruningPointHash message
type MsgPruningPointHashMessage struct {
baseMessage
Hash *externalapi.DomainHash
}
// Command returns the protocol command string for the message
func (msg *MsgPruningPointHashMessage) Command() MessageCommand {
return CmdPruningPointHash
}
// NewPruningPointHashMessage returns a new kaspa PruningPointHash message
func NewPruningPointHashMessage(hash *externalapi.DomainHash) *MsgPruningPointHashMessage {
return &MsgPruningPointHashMessage{
Hash: hash,
}
}

View File

@@ -1,20 +0,0 @@
package appmessage
// MsgPruningPointProof represents a kaspa PruningPointProof message
type MsgPruningPointProof struct {
baseMessage
Headers [][]*MsgBlockHeader
}
// Command returns the protocol command string for the message
func (msg *MsgPruningPointProof) Command() MessageCommand {
return CmdPruningPointProof
}
// NewMsgPruningPointProof returns a new MsgPruningPointProof.
func NewMsgPruningPointProof(headers [][]*MsgBlockHeader) *MsgPruningPointProof {
return &MsgPruningPointProof{
Headers: headers,
}
}

View File

@@ -1,20 +0,0 @@
package appmessage
// MsgPruningPoints represents a kaspa PruningPoints message
type MsgPruningPoints struct {
baseMessage
Headers []*MsgBlockHeader
}
// Command returns the protocol command string for the message
func (msg *MsgPruningPoints) Command() MessageCommand {
return CmdPruningPoints
}
// NewMsgPruningPoints returns a new MsgPruningPoints.
func NewMsgPruningPoints(headers []*MsgBlockHeader) *MsgPruningPoints {
return &MsgPruningPoints{
Headers: headers,
}
}

View File

@@ -10,6 +10,7 @@ import (
// The locator is returned via a locator message (MsgBlockLocator).
type MsgRequestBlockLocator struct {
baseMessage
LowHash *externalapi.DomainHash
HighHash *externalapi.DomainHash
Limit uint32
}
@@ -23,8 +24,9 @@ func (msg *MsgRequestBlockLocator) Command() MessageCommand {
// NewMsgRequestBlockLocator returns a new RequestBlockLocator message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequestBlockLocator(highHash *externalapi.DomainHash, limit uint32) *MsgRequestBlockLocator {
func NewMsgRequestBlockLocator(lowHash, highHash *externalapi.DomainHash, limit uint32) *MsgRequestBlockLocator {
return &MsgRequestBlockLocator{
LowHash: lowHash,
HighHash: highHash,
Limit: limit,
}

View File

@@ -16,7 +16,7 @@ func TestRequestBlockLocator(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(9)
msg := NewMsgRequestBlockLocator(highHash, 0)
msg := NewMsgRequestBlockLocator(highHash, &externalapi.DomainHash{}, 0)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequestBlockLocator: wrong command - got %v want %v",
cmd, wantCmd)

View File

@@ -10,7 +10,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// TestRequstIBDBlocks tests the MsgRequestIBDBlocks API.
// TestRequstIBDBlocks tests the MsgRequestHeaders API.
func TestRequstIBDBlocks(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
lowHash, err := externalapi.NewDomainHashFromString(hashStr)
@@ -27,14 +27,14 @@ func TestRequstIBDBlocks(t *testing.T) {
// Ensure we get the same data back out.
msg := NewMsgRequstHeaders(lowHash, highHash)
if !msg.HighHash.Equal(highHash) {
t.Errorf("NewMsgRequstIBDBlocks: wrong high hash - got %v, want %v",
t.Errorf("NewMsgRequstHeaders: wrong high hash - got %v, want %v",
msg.HighHash, highHash)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(4)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequstIBDBlocks: wrong command - got %v want %v",
t.Errorf("NewMsgRequstHeaders: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -1,16 +0,0 @@
package appmessage
// MsgRequestPruningPointProof represents a kaspa RequestPruningPointProof message
type MsgRequestPruningPointProof struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgRequestPruningPointProof) Command() MessageCommand {
return CmdRequestPruningPointProof
}
// NewMsgRequestPruningPointProof returns a new MsgRequestPruningPointProof.
func NewMsgRequestPruningPointProof() *MsgRequestPruningPointProof {
return &MsgRequestPruningPointProof{}
}

View File

@@ -4,20 +4,20 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestPruningPointUTXOSet represents a kaspa RequestPruningPointUTXOSet message
type MsgRequestPruningPointUTXOSet struct {
// MsgRequestPruningPointUTXOSetAndBlock represents a kaspa RequestPruningPointUTXOSetAndBlock message
type MsgRequestPruningPointUTXOSetAndBlock struct {
baseMessage
PruningPointHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message
func (msg *MsgRequestPruningPointUTXOSet) Command() MessageCommand {
return CmdRequestPruningPointUTXOSet
func (msg *MsgRequestPruningPointUTXOSetAndBlock) Command() MessageCommand {
return CmdRequestPruningPointUTXOSetAndBlock
}
// NewMsgRequestPruningPointUTXOSet returns a new MsgRequestPruningPointUTXOSet
func NewMsgRequestPruningPointUTXOSet(pruningPointHash *externalapi.DomainHash) *MsgRequestPruningPointUTXOSet {
return &MsgRequestPruningPointUTXOSet{
// NewMsgRequestPruningPointUTXOSetAndBlock returns a new MsgRequestPruningPointUTXOSetAndBlock
func NewMsgRequestPruningPointUTXOSetAndBlock(pruningPointHash *externalapi.DomainHash) *MsgRequestPruningPointUTXOSetAndBlock {
return &MsgRequestPruningPointUTXOSetAndBlock{
PruningPointHash: pruningPointHash,
}
}

View File

@@ -1,25 +0,0 @@
package appmessage
// MsgTrustedData represents a kaspa TrustedData message
type MsgTrustedData struct {
baseMessage
DAAWindow []*TrustedDataDAAHeader
GHOSTDAGData []*BlockGHOSTDAGDataHashPair
}
// Command returns the protocol command string for the message
func (msg *MsgTrustedData) Command() MessageCommand {
return CmdTrustedData
}
// NewMsgTrustedData returns a new MsgTrustedData.
func NewMsgTrustedData() *MsgTrustedData {
return &MsgTrustedData{}
}
// TrustedDataDAAHeader is an appmessage representation of externalapi.TrustedDataDataDAAHeader
type TrustedDataDAAHeader struct {
Header *MsgBlockHeader
GHOSTDAGData *BlockGHOSTDAGData
}

View File

@@ -90,18 +90,16 @@ type TxIn struct {
PreviousOutpoint Outpoint
SignatureScript []byte
Sequence uint64
SigOpCount byte
}
// NewTxIn returns a new kaspa transaction input with the provided
// previous outpoint point and signature script with a default sequence of
// MaxTxInSequenceNum.
func NewTxIn(prevOut *Outpoint, signatureScript []byte, sequence uint64, sigOpCount byte) *TxIn {
func NewTxIn(prevOut *Outpoint, signatureScript []byte, sequence uint64) *TxIn {
return &TxIn{
PreviousOutpoint: *prevOut,
SignatureScript: signatureScript,
Sequence: sequence,
SigOpCount: sigOpCount,
}
}
@@ -208,7 +206,6 @@ func (msg *MsgTx) Copy() *MsgTx {
PreviousOutpoint: newOutpoint,
SignatureScript: newScript,
Sequence: oldTxIn.Sequence,
SigOpCount: oldTxIn.SigOpCount,
}
// Finally, append this fully copied txin.

View File

@@ -22,7 +22,7 @@ import (
// TestTx tests the MsgTx API.
func TestTx(t *testing.T) {
pver := uint32(4)
pver := ProtocolVersion
txIDStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
txID, err := transactionid.FromString(txIDStr)
@@ -68,7 +68,7 @@ func TestTx(t *testing.T) {
// Ensure we get the same transaction input back out.
sigScript := []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}
txIn := NewTxIn(prevOut, sigScript, constants.MaxTxInSequenceNum, 1)
txIn := NewTxIn(prevOut, sigScript, constants.MaxTxInSequenceNum)
if !reflect.DeepEqual(&txIn.PreviousOutpoint, prevOut) {
t.Errorf("NewTxIn: wrong prev outpoint - got %v, want %v",
spew.Sprint(&txIn.PreviousOutpoint),

View File

@@ -82,12 +82,12 @@ func (msg *MsgVersion) Command() MessageCommand {
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
subnetworkID *externalapi.DomainSubnetworkID, protocolVersion uint32) *MsgVersion {
subnetworkID *externalapi.DomainSubnetworkID) *MsgVersion {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
return &MsgVersion{
ProtocolVersion: protocolVersion,
ProtocolVersion: ProtocolVersion,
Network: network,
Services: 0,
Timestamp: mstime.Now(),

View File

@@ -15,7 +15,7 @@ import (
// TestVersion tests the MsgVersion API.
func TestVersion(t *testing.T) {
pver := uint32(4)
pver := ProtocolVersion
// Create version message data.
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
@@ -26,7 +26,7 @@ func TestVersion(t *testing.T) {
}
// Ensure we get the correct data back out.
msg := NewMsgVersion(me, generatedID, "mainnet", nil, 4)
msg := NewMsgVersion(me, generatedID, "mainnet", nil)
if msg.ProtocolVersion != pver {
t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v",
msg.ProtocolVersion, pver)

View File

@@ -5,9 +5,8 @@
package appmessage
import (
"net"
"github.com/kaspanet/kaspad/util/mstime"
"net"
)
// NetAddress defines information about a peer on the network including the time
@@ -58,7 +57,3 @@ func NewNetAddressTimestamp(
func NewNetAddress(addr *net.TCPAddr) *NetAddress {
return NewNetAddressIPPort(addr.IP, uint16(addr.Port))
}
func (na NetAddress) String() string {
return na.TCPAddress().String()
}

View File

@@ -1,22 +0,0 @@
package appmessage
// MsgReady implements the Message interface and represents a kaspa
// Ready message. It is used to notify that the peer is ready to receive
// messages.
//
// This message has no payload.
type MsgReady struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgReady) Command() MessageCommand {
return CmdReady
}
// NewMsgReady returns a new kaspa Ready message that conforms to the
// Message interface.
func NewMsgReady() *MsgReady {
return &MsgReady{}
}

View File

@@ -0,0 +1,16 @@
package appmessage
// MsgRequestPruningPointHashMessage represents a kaspa RequestPruningPointHashMessage message
type MsgRequestPruningPointHashMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgRequestPruningPointHashMessage) Command() MessageCommand {
return CmdRequestPruningPointHash
}
// NewMsgRequestPruningPointHashMessage returns a new kaspa RequestPruningPointHash message
func NewMsgRequestPruningPointHashMessage() *MsgRequestPruningPointHashMessage {
return &MsgRequestPruningPointHashMessage{}
}

View File

@@ -11,6 +11,9 @@ import (
)
const (
// ProtocolVersion is the latest protocol version this package supports.
ProtocolVersion uint32 = 1
// DefaultServices describes the default services that are supported by
// the server.
DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF

View File

@@ -1,43 +0,0 @@
package appmessage
// EstimateNetworkHashesPerSecondRequestMessage is an appmessage corresponding to
// its respective RPC message
type EstimateNetworkHashesPerSecondRequestMessage struct {
baseMessage
StartHash string
WindowSize uint32
}
// Command returns the protocol command string for the message
func (msg *EstimateNetworkHashesPerSecondRequestMessage) Command() MessageCommand {
return CmdEstimateNetworkHashesPerSecondRequestMessage
}
// NewEstimateNetworkHashesPerSecondRequestMessage returns a instance of the message
func NewEstimateNetworkHashesPerSecondRequestMessage(startHash string, windowSize uint32) *EstimateNetworkHashesPerSecondRequestMessage {
return &EstimateNetworkHashesPerSecondRequestMessage{
StartHash: startHash,
WindowSize: windowSize,
}
}
// EstimateNetworkHashesPerSecondResponseMessage is an appmessage corresponding to
// its respective RPC message
type EstimateNetworkHashesPerSecondResponseMessage struct {
baseMessage
NetworkHashesPerSecond uint64
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *EstimateNetworkHashesPerSecondResponseMessage) Command() MessageCommand {
return CmdEstimateNetworkHashesPerSecondResponseMessage
}
// NewEstimateNetworkHashesPerSecondResponseMessage returns a instance of the message
func NewEstimateNetworkHashesPerSecondResponseMessage(networkHashesPerSecond uint64) *EstimateNetworkHashesPerSecondResponseMessage {
return &EstimateNetworkHashesPerSecondResponseMessage{
NetworkHashesPerSecond: networkHashesPerSecond,
}
}

View File

@@ -1,41 +0,0 @@
package appmessage
// GetBalanceByAddressRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetBalanceByAddressRequestMessage struct {
baseMessage
Address string
}
// Command returns the protocol command string for the message
func (msg *GetBalanceByAddressRequestMessage) Command() MessageCommand {
return CmdGetBalanceByAddressRequestMessage
}
// NewGetBalanceByAddressRequest returns a instance of the message
func NewGetBalanceByAddressRequest(address string) *GetBalanceByAddressRequestMessage {
return &GetBalanceByAddressRequestMessage{
Address: address,
}
}
// GetBalanceByAddressResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetBalanceByAddressResponseMessage struct {
baseMessage
Balance uint64
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetBalanceByAddressResponseMessage) Command() MessageCommand {
return CmdGetBalanceByAddressResponseMessage
}
// NewGetBalanceByAddressResponse returns an instance of the message
func NewGetBalanceByAddressResponse(Balance uint64) *GetBalanceByAddressResponseMessage {
return &GetBalanceByAddressResponseMessage{
Balance: Balance,
}
}

View File

@@ -1,47 +0,0 @@
package appmessage
// GetBalancesByAddressesRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetBalancesByAddressesRequestMessage struct {
baseMessage
Addresses []string
}
// Command returns the protocol command string for the message
func (msg *GetBalancesByAddressesRequestMessage) Command() MessageCommand {
return CmdGetBalancesByAddressesRequestMessage
}
// NewGetBalancesByAddressesRequest returns a instance of the message
func NewGetBalancesByAddressesRequest(addresses []string) *GetBalancesByAddressesRequestMessage {
return &GetBalancesByAddressesRequestMessage{
Addresses: addresses,
}
}
// BalancesByAddressesEntry represents the balance of some address
type BalancesByAddressesEntry struct {
Address string
Balance uint64
}
// GetBalancesByAddressesResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetBalancesByAddressesResponseMessage struct {
baseMessage
Entries []*BalancesByAddressesEntry
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetBalancesByAddressesResponseMessage) Command() MessageCommand {
return CmdGetBalancesByAddressesResponseMessage
}
// NewGetBalancesByAddressesResponse returns an instance of the message
func NewGetBalancesByAddressesResponse(entries []*BalancesByAddressesEntry) *GetBalancesByAddressesResponseMessage {
return &GetBalancesByAddressesResponseMessage{
Entries: entries,
}
}

View File

@@ -4,8 +4,8 @@ package appmessage
// its respective RPC message
type GetBlockRequestMessage struct {
baseMessage
Hash string
IncludeTransactions bool
Hash string
IncludeTransactionVerboseData bool
}
// Command returns the protocol command string for the message
@@ -14,10 +14,10 @@ func (msg *GetBlockRequestMessage) Command() MessageCommand {
}
// NewGetBlockRequestMessage returns a instance of the message
func NewGetBlockRequestMessage(hash string, includeTransactions bool) *GetBlockRequestMessage {
func NewGetBlockRequestMessage(hash string, includeTransactionVerboseData bool) *GetBlockRequestMessage {
return &GetBlockRequestMessage{
Hash: hash,
IncludeTransactions: includeTransactions,
Hash: hash,
IncludeTransactionVerboseData: includeTransactionVerboseData,
}
}

View File

@@ -4,9 +4,9 @@ package appmessage
// its respective RPC message
type GetBlocksRequestMessage struct {
baseMessage
LowHash string
IncludeBlocks bool
IncludeTransactions bool
LowHash string
IncludeBlocks bool
IncludeTransactionVerboseData bool
}
// Command returns the protocol command string for the message
@@ -16,11 +16,11 @@ func (msg *GetBlocksRequestMessage) Command() MessageCommand {
// NewGetBlocksRequestMessage returns a instance of the message
func NewGetBlocksRequestMessage(lowHash string, includeBlocks bool,
includeTransactions bool) *GetBlocksRequestMessage {
includeTransactionVerboseData bool) *GetBlocksRequestMessage {
return &GetBlocksRequestMessage{
LowHash: lowHash,
IncludeBlocks: includeBlocks,
IncludeTransactions: includeTransactions,
LowHash: lowHash,
IncludeBlocks: includeBlocks,
IncludeTransactionVerboseData: includeTransactionVerboseData,
}
}

View File

@@ -20,9 +20,8 @@ func NewGetInfoRequestMessage() *GetInfoRequestMessage {
// its respective RPC message
type GetInfoResponseMessage struct {
baseMessage
P2PID string
MempoolSize uint64
ServerVersion string
P2PID string
MempoolSize uint64
Error *RPCError
}
@@ -33,10 +32,9 @@ func (msg *GetInfoResponseMessage) Command() MessageCommand {
}
// NewGetInfoResponseMessage returns a instance of the message
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string) *GetInfoResponseMessage {
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64) *GetInfoResponseMessage {
return &GetInfoResponseMessage{
P2PID: p2pID,
MempoolSize: mempoolSize,
ServerVersion: serverVersion,
P2PID: p2pID,
MempoolSize: mempoolSize,
}
}

View File

@@ -24,7 +24,7 @@ func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlockHashes []string
AddedChainBlocks []*ChainBlock
Error *RPCError
}
@@ -35,11 +35,11 @@ func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() Mess
}
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes,
addedChainBlockHashes []string) *GetVirtualSelectedParentChainFromBlockResponseMessage {
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes []string,
addedChainBlocks []*ChainBlock) *GetVirtualSelectedParentChainFromBlockResponseMessage {
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlockHashes: addedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
}
}

View File

@@ -38,7 +38,19 @@ func NewNotifyVirtualSelectedParentChainChangedResponseMessage() *NotifyVirtualS
type VirtualSelectedParentChainChangedNotificationMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlockHashes []string
AddedChainBlocks []*ChainBlock
}
// ChainBlock represents a DAG chain-block
type ChainBlock struct {
Hash string
AcceptedBlocks []*AcceptedBlock
}
// AcceptedBlock represents a block accepted into the DAG
type AcceptedBlock struct {
Hash string
AcceptedTransactionIDs []string
}
// Command returns the protocol command string for the message
@@ -47,11 +59,11 @@ func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() Messa
}
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes,
addedChainBlocks []string) *VirtualSelectedParentChainChangedNotificationMessage {
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes []string,
addedChainBlocks []*ChainBlock) *VirtualSelectedParentChainChangedNotificationMessage {
return &VirtualSelectedParentChainChangedNotificationMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlockHashes: addedChainBlocks,
AddedChainBlocks: addedChainBlocks,
}
}

View File

@@ -4,8 +4,7 @@ package appmessage
// its respective RPC message
type SubmitBlockRequestMessage struct {
baseMessage
Block *RPCBlock
AllowNonDAABlocks bool
Block *RPCBlock
}
// Command returns the protocol command string for the message
@@ -14,10 +13,9 @@ func (msg *SubmitBlockRequestMessage) Command() MessageCommand {
}
// NewSubmitBlockRequestMessage returns a instance of the message
func NewSubmitBlockRequestMessage(block *RPCBlock, allowNonDAABlocks bool) *SubmitBlockRequestMessage {
func NewSubmitBlockRequestMessage(block *RPCBlock) *SubmitBlockRequestMessage {
return &SubmitBlockRequestMessage{
Block: block,
AllowNonDAABlocks: allowNonDAABlocks,
Block: block,
}
}
@@ -55,7 +53,7 @@ func (msg *SubmitBlockResponseMessage) Command() MessageCommand {
return CmdSubmitBlockResponseMessage
}
// NewSubmitBlockResponseMessage returns an instance of the message
// NewSubmitBlockResponseMessage returns a instance of the message
func NewSubmitBlockResponseMessage() *SubmitBlockResponseMessage {
return &SubmitBlockResponseMessage{}
}
@@ -72,34 +70,22 @@ type RPCBlock struct {
// used over RPC
type RPCBlockHeader struct {
Version uint32
Parents []*RPCBlockLevelParents
ParentHashes []string
HashMerkleRoot string
AcceptedIDMerkleRoot string
UTXOCommitment string
Timestamp int64
Bits uint32
Nonce uint64
DAAScore uint64
BlueScore uint64
BlueWork string
PruningPoint string
}
// RPCBlockLevelParents holds parent hashes for one block level
type RPCBlockLevelParents struct {
ParentHashes []string
}
// RPCBlockVerboseData holds verbose data about a block
type RPCBlockVerboseData struct {
Hash string
Difficulty float64
SelectedParentHash string
TransactionIDs []string
IsHeaderOnly bool
BlueScore uint64
ChildrenHashes []string
MergeSetBluesHashes []string
MergeSetRedsHashes []string
IsChainBlock bool
Hash string
Difficulty float64
SelectedParentHash string
TransactionIDs []string
IsHeaderOnly bool
BlueScore uint64
ChildrenHashes []string
}

View File

@@ -5,7 +5,6 @@ package appmessage
type SubmitTransactionRequestMessage struct {
baseMessage
Transaction *RPCTransaction
AllowOrphan bool
}
// Command returns the protocol command string for the message
@@ -14,10 +13,9 @@ func (msg *SubmitTransactionRequestMessage) Command() MessageCommand {
}
// NewSubmitTransactionRequestMessage returns a instance of the message
func NewSubmitTransactionRequestMessage(transaction *RPCTransaction, allowOrphan bool) *SubmitTransactionRequestMessage {
func NewSubmitTransactionRequestMessage(transaction *RPCTransaction) *SubmitTransactionRequestMessage {
return &SubmitTransactionRequestMessage{
Transaction: transaction,
AllowOrphan: allowOrphan,
}
}
@@ -61,7 +59,6 @@ type RPCTransactionInput struct {
PreviousOutpoint *RPCOutpoint
SignatureScript string
Sequence uint64
SigOpCount byte
VerboseData *RPCTransactionInputVerboseData
}
@@ -99,7 +96,7 @@ type RPCUTXOEntry struct {
type RPCTransactionVerboseData struct {
TransactionID string
Hash string
Mass uint64
Size uint64
BlockHash string
BlockTime uint64
}

View File

@@ -4,8 +4,7 @@ import (
"fmt"
"sync/atomic"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol"
"github.com/kaspanet/kaspad/app/rpc"
"github.com/kaspanet/kaspad/domain"
@@ -15,6 +14,7 @@ import (
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/dnsseed"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/panics"
@@ -46,6 +46,8 @@ func (a *ComponentManager) Start() {
panics.Exit(log, fmt.Sprintf("Error starting the net adapter: %+v", err))
}
a.maybeSeedFromDNS()
a.connectionManager.Start()
}
@@ -81,11 +83,8 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
IsArchival: cfg.IsArchivalNode,
EnableSanityCheckPruningUTXOSet: cfg.EnableSanityCheckPruningUTXOSet,
}
mempoolConfig := mempool.DefaultConfig(&consensusConfig.Params)
mempoolConfig.MaximumOrphanTransactionCount = cfg.MaxOrphanTxs
mempoolConfig.MinimumRelayTransactionFee = cfg.MinRelayTxFee
domain, err := domain.New(&consensusConfig, mempoolConfig, db)
domain, err := domain.New(&consensusConfig, db)
if err != nil {
return nil, err
}
@@ -102,7 +101,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
var utxoIndex *utxoindex.UTXOIndex
if cfg.UTXOIndex {
utxoIndex, err = utxoindex.New(domain, db)
utxoIndex, err = utxoindex.New(domain.Consensus(), db)
if err != nil {
return nil, err
}
@@ -152,13 +151,29 @@ func setupRPC(
utxoIndex,
shutDownChan,
)
protocolManager.SetOnVirtualChange(rpcManager.NotifyVirtualChange)
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
return rpcManager
}
func (a *ComponentManager) maybeSeedFromDNS() {
if !a.cfg.DisableDNSSeed {
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, false, nil,
a.cfg.Lookup, func(addresses []*appmessage.NetAddress) {
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
// IPs of nodes and not its own IP, we can not know real IP of
// source. So we'll take first returned address as source.
a.addressManager.AddAddresses(addresses...)
})
dnsseed.SeedFromGRPC(a.cfg.NetParams(), a.cfg.GRPCSeed, false, nil,
func(addresses []*appmessage.NetAddress) {
a.addressManager.AddAddresses(addresses...)
})
}
}
// P2PNodeID returns the network ID associated with this ComponentManager
func (a *ComponentManager) P2PNodeID() *id.ID {
return a.netAdapter.ID()

View File

@@ -1,8 +1,6 @@
package common
import (
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"time"
"github.com/pkg/errors"
@@ -10,18 +8,7 @@ import (
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing
// to/from routes.
const DefaultTimeout = 120 * time.Second
const DefaultTimeout = 30 * time.Second
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")
type flowExecuteFunc func(peer *peerpkg.Peer)
// Flow is a a data structure that is used in order to associate a p2p flow to some route in a router.
type Flow struct {
Name string
ExecuteFunc flowExecuteFunc
}
// FlowInitializeFunc is a function that is used in order to initialize a flow
type FlowInitializeFunc func(route *routerpkg.Route, peer *peerpkg.Peer) error

View File

@@ -1,23 +1,23 @@
package flowcontext
import (
"time"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
)
// OnNewBlock updates the mempool after a new block arrival, and
// relays newly unorphaned transactions and possibly rebroadcast
// manually added transactions when not in IBD.
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
virtualChangeSet *externalapi.VirtualChangeSet) error {
blockInsertionResult *externalapi.BlockInsertionResult) error {
hash := consensushashing.BlockHash(block)
log.Debugf("OnNewBlock start for block %s", hash)
@@ -31,10 +31,10 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
newBlocks := []*externalapi.DomainBlock{block}
newVirtualChangeSets := []*externalapi.VirtualChangeSet{virtualChangeSet}
newBlockInsertionResults := []*externalapi.BlockInsertionResult{blockInsertionResult}
for _, unorphaningResult := range unorphaningResults {
newBlocks = append(newBlocks, unorphaningResult.block)
newVirtualChangeSets = append(newVirtualChangeSets, unorphaningResult.virtualChangeSet)
newBlockInsertionResults = append(newBlockInsertionResults, unorphaningResult.blockInsertionResult)
}
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
@@ -48,8 +48,8 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
if f.onBlockAddedToDAGHandler != nil {
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
virtualChangeSet = newVirtualChangeSets[i]
err := f.onBlockAddedToDAGHandler(newBlock, virtualChangeSet)
blockInsertionResult = newBlockInsertionResults[i]
err := f.onBlockAddedToDAGHandler(newBlock, blockInsertionResult)
if err != nil {
return err
}
@@ -59,15 +59,6 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
}
// OnVirtualChange calls the handler function whenever the virtual block changes.
func (f *FlowContext) OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
if f.onVirtualChangeHandler != nil && virtualChangeSet != nil {
return f.onVirtualChangeHandler(virtualChangeSet)
}
return nil
}
// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set
// resets due to pruning point change via IBD.
func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
@@ -80,6 +71,8 @@ func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
addedBlocks []*externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error {
f.updateTransactionsToRebroadcast(addedBlocks)
// Don't relay transactions when in IBD.
if f.IsIBDRunning() {
return nil
@@ -87,12 +80,7 @@ func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
var txIDsToRebroadcast []*externalapi.DomainTransactionID
if f.shouldRebroadcastTransactions() {
txsToRebroadcast, err := f.Domain().MiningManager().RevalidateHighPriorityTransactions()
if err != nil {
return err
}
txIDsToRebroadcast = consensushashing.TransactionIDs(txsToRebroadcast)
f.lastRebroadcastTime = time.Now()
txIDsToRebroadcast = f.txIDsToRebroadcast()
}
txIDsToBroadcast := make([]*externalapi.DomainTransactionID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
@@ -103,12 +91,20 @@ func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
for i, txID := range txIDsToRebroadcast {
txIDsToBroadcast[offset+i] = txID
}
return f.EnqueueTransactionIDsForPropagation(txIDsToBroadcast)
if len(txIDsToBroadcast) == 0 {
return nil
}
if len(txIDsToBroadcast) > appmessage.MaxInvPerTxInvMsg {
txIDsToBroadcast = txIDsToBroadcast[:appmessage.MaxInvPerTxInvMsg]
}
inv := appmessage.NewMsgInvTransaction(txIDsToBroadcast)
return f.Broadcast(inv)
}
// SharedRequestedBlocks returns a *blockrelay.SharedRequestedBlocks for sharing
// data about requested blocks between different peers.
func (f *FlowContext) SharedRequestedBlocks() *SharedRequestedBlocks {
func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks {
return f.sharedRequestedBlocks
}
@@ -118,14 +114,14 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
return protocolerrors.Errorf(false, "cannot add header only block")
}
virtualChangeSet, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
blockInsertionResult, err := f.Domain().Consensus().ValidateAndInsertBlock(block)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
}
return err
}
err = f.OnNewBlock(block, virtualChangeSet)
err = f.OnNewBlock(block, blockInsertionResult)
if err != nil {
return err
}
@@ -165,6 +161,7 @@ func (f *FlowContext) UnsetIBDRunning() {
}
f.ibdPeer = nil
log.Infof("IBD finished")
}
// IBDPeer returns the current IBD peer or null if the node is not

View File

@@ -29,8 +29,3 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
errChan <- err
}
}
// IsRecoverableError returns whether the error is recoverable
func (*FlowContext) IsRecoverableError(err error) bool {
return err == nil || errors.Is(err, router.ErrRouteClosed) || errors.As(err, &protocolerrors.ProtocolError{})
}

View File

@@ -1,15 +1,16 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/util/mstime"
"sync"
"time"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
@@ -20,10 +21,7 @@ import (
// OnBlockAddedToDAGHandler is a handler function that's triggered
// when a block is added to the DAG
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
// OnVirtualChangeHandler is a handler function that's triggered when the virtual changes
type OnVirtualChangeHandler func(virtualChangeSet *externalapi.VirtualChangeSet) error
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
// resets due to pruning point change via IBD.
@@ -44,15 +42,16 @@ type FlowContext struct {
timeStarted int64
onVirtualChangeHandler OnVirtualChangeHandler
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
lastRebroadcastTime time.Time
sharedRequestedTransactions *SharedRequestedTransactions
transactionsToRebroadcastLock sync.Mutex
transactionsToRebroadcast map[externalapi.DomainTransactionID]*externalapi.DomainTransaction
lastRebroadcastTime time.Time
sharedRequestedTransactions *transactionrelay.SharedRequestedTransactions
sharedRequestedBlocks *SharedRequestedBlocks
sharedRequestedBlocks *blockrelay.SharedRequestedBlocks
ibdPeer *peerpkg.Peer
ibdPeerMutex sync.RWMutex
@@ -63,10 +62,6 @@ type FlowContext struct {
orphans map[externalapi.DomainHash]*externalapi.DomainBlock
orphansMutex sync.RWMutex
transactionIDsToPropagate []*externalapi.DomainTransactionID
lastTransactionIDPropagationTime time.Time
transactionIDPropagationLock sync.Mutex
shutdownChan chan struct{}
}
@@ -75,19 +70,18 @@ func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanage
netAdapter *netadapter.NetAdapter, connectionManager *connmanager.ConnectionManager) *FlowContext {
return &FlowContext{
cfg: cfg,
netAdapter: netAdapter,
domain: domain,
addressManager: addressManager,
connectionManager: connectionManager,
sharedRequestedTransactions: NewSharedRequestedTransactions(),
sharedRequestedBlocks: NewSharedRequestedBlocks(),
peers: make(map[id.ID]*peerpkg.Peer),
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
timeStarted: mstime.Now().UnixMilliseconds(),
transactionIDsToPropagate: []*externalapi.DomainTransactionID{},
lastTransactionIDPropagationTime: time.Now(),
shutdownChan: make(chan struct{}),
cfg: cfg,
netAdapter: netAdapter,
domain: domain,
addressManager: addressManager,
connectionManager: connectionManager,
sharedRequestedTransactions: transactionrelay.NewSharedRequestedTransactions(),
sharedRequestedBlocks: blockrelay.NewSharedRequestedBlocks(),
peers: make(map[id.ID]*peerpkg.Peer),
transactionsToRebroadcast: make(map[externalapi.DomainTransactionID]*externalapi.DomainTransaction),
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
timeStarted: mstime.Now().UnixMilliseconds(),
shutdownChan: make(chan struct{}),
}
}
@@ -102,11 +96,6 @@ func (f *FlowContext) ShutdownChan() <-chan struct{} {
return f.shutdownChan
}
// SetOnVirtualChangeHandler sets the onVirtualChangeHandler handler
func (f *FlowContext) SetOnVirtualChangeHandler(onVirtualChangeHandler OnVirtualChangeHandler) {
f.onVirtualChangeHandler = onVirtualChangeHandler
}
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler

View File

@@ -17,8 +17,8 @@ const maxOrphans = 600
// UnorphaningResult is the result of unorphaning a block
type UnorphaningResult struct {
block *externalapi.DomainBlock
virtualChangeSet *externalapi.VirtualChangeSet
block *externalapi.DomainBlock
blockInsertionResult *externalapi.BlockInsertionResult
}
// AddOrphan adds the block to the orphan set
@@ -73,10 +73,10 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
orphanBlock := f.orphans[orphanHash]
log.Debugf("Considering to unorphan block %s with parents %s",
orphanHash, orphanBlock.Header.DirectParents())
orphanHash, orphanBlock.Header.ParentHashes())
canBeUnorphaned := true
for _, orphanBlockParentHash := range orphanBlock.Header.DirectParents() {
for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
orphanBlockParentInfo, err := f.domain.Consensus().GetBlockInfo(orphanBlockParentHash)
if err != nil {
return nil, err
@@ -90,14 +90,14 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
}
}
if canBeUnorphaned {
virtualChangeSet, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
blockInsertionResult, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
if err != nil {
return nil, err
}
if unorphaningSucceeded {
unorphaningResults = append(unorphaningResults, &UnorphaningResult{
block: orphanBlock,
virtualChangeSet: virtualChangeSet,
block: orphanBlock,
blockInsertionResult: blockInsertionResult,
})
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
}
@@ -133,7 +133,7 @@ func (f *FlowContext) addChildOrphansToProcessQueue(blockHash *externalapi.Domai
func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash) []externalapi.DomainHash {
var childOrphans []externalapi.DomainHash
for orphanHash, orphanBlock := range f.orphans {
for _, orphanBlockParentHash := range orphanBlock.Header.DirectParents() {
for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
if orphanBlockParentHash.Equal(blockHash) {
childOrphans = append(childOrphans, orphanHash)
break
@@ -143,14 +143,14 @@ func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash)
return childOrphans
}
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.VirtualChangeSet, bool, error) {
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.BlockInsertionResult, bool, error) {
orphanBlock, ok := f.orphans[orphanHash]
if !ok {
return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
}
delete(f.orphans, orphanHash)
virtualChangeSet, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
blockInsertionResult, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
@@ -160,7 +160,7 @@ func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externa
}
log.Infof("Unorphaned block %s", orphanHash)
return virtualChangeSet, true, nil
return blockInsertionResult, true, nil
}
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
@@ -201,7 +201,7 @@ func (f *FlowContext) GetOrphanRoots(orphan *externalapi.DomainHash) ([]*externa
continue
}
for _, parent := range block.Header.DirectParents() {
for _, parent := range block.Header.ParentHashes() {
if !addedToQueueSet.Contains(parent) {
queue = append(queue, parent)
addedToQueueSet.Add(parent)

View File

@@ -25,10 +25,6 @@ func (f *FlowContext) ShouldMine() (bool, error) {
return false, err
}
if virtualSelectedParent.Equal(f.Config().NetParams().GenesisHash) {
return false, nil
}
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
if err != nil {
return false, err

View File

@@ -4,22 +4,39 @@ import (
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
)
// TransactionIDPropagationInterval is the interval between transaction IDs propagations
const TransactionIDPropagationInterval = 500 * time.Millisecond
// AddTransaction adds transaction to the mempool and propagates it.
func (f *FlowContext) AddTransaction(tx *externalapi.DomainTransaction, allowOrphan bool) error {
acceptedTransactions, err := f.Domain().MiningManager().ValidateAndInsertTransaction(tx, true, allowOrphan)
func (f *FlowContext) AddTransaction(tx *externalapi.DomainTransaction) error {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
err := f.Domain().MiningManager().ValidateAndInsertTransaction(tx, false)
if err != nil {
return err
}
acceptedTransactionIDs := consensushashing.TransactionIDs(acceptedTransactions)
return f.EnqueueTransactionIDsForPropagation(acceptedTransactionIDs)
transactionID := consensushashing.TransactionID(tx)
f.transactionsToRebroadcast[*transactionID] = tx
inv := appmessage.NewMsgInvTransaction([]*externalapi.DomainTransactionID{transactionID})
return f.Broadcast(inv)
}
func (f *FlowContext) updateTransactionsToRebroadcast(addedBlocks []*externalapi.DomainBlock) {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
for _, block := range addedBlocks {
// Note: if a transaction is included in the DAG but not accepted,
// it won't be rebroadcast anymore, although it is not included in
// the UTXO set
for _, tx := range block.Transactions {
delete(f.transactionsToRebroadcast, *consensushashing.TransactionID(tx))
}
}
}
func (f *FlowContext) shouldRebroadcastTransactions() bool {
@@ -27,9 +44,22 @@ func (f *FlowContext) shouldRebroadcastTransactions() bool {
return time.Since(f.lastRebroadcastTime) > rebroadcastInterval
}
func (f *FlowContext) txIDsToRebroadcast() []*externalapi.DomainTransactionID {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
txIDs := make([]*externalapi.DomainTransactionID, len(f.transactionsToRebroadcast))
i := 0
for _, tx := range f.transactionsToRebroadcast {
txIDs[i] = consensushashing.TransactionID(tx)
i++
}
return txIDs
}
// SharedRequestedTransactions returns a *transactionrelay.SharedRequestedTransactions for sharing
// data about requested transactions between different peers.
func (f *FlowContext) SharedRequestedTransactions() *SharedRequestedTransactions {
func (f *FlowContext) SharedRequestedTransactions() *transactionrelay.SharedRequestedTransactions {
return f.sharedRequestedTransactions
}
@@ -40,42 +70,3 @@ func (f *FlowContext) OnTransactionAddedToMempool() {
f.onTransactionAddedToMempoolHandler()
}
}
// EnqueueTransactionIDsForPropagation add the given transactions IDs to a set of IDs to
// propagate. The IDs will be broadcast to all peers within a single transaction Inv message.
// The broadcast itself may happen only during a subsequent call to this method
func (f *FlowContext) EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error {
f.transactionIDPropagationLock.Lock()
defer f.transactionIDPropagationLock.Unlock()
f.transactionIDsToPropagate = append(f.transactionIDsToPropagate, transactionIDs...)
return f.maybePropagateTransactions()
}
func (f *FlowContext) maybePropagateTransactions() error {
if time.Since(f.lastTransactionIDPropagationTime) < TransactionIDPropagationInterval &&
len(f.transactionIDsToPropagate) < appmessage.MaxInvPerTxInvMsg {
return nil
}
for len(f.transactionIDsToPropagate) > 0 {
transactionIDsToBroadcast := f.transactionIDsToPropagate
if len(transactionIDsToBroadcast) > appmessage.MaxInvPerTxInvMsg {
transactionIDsToBroadcast = f.transactionIDsToPropagate[:len(transactionIDsToBroadcast)]
}
log.Debugf("Transaction propagation: broadcasting %d transactions", len(transactionIDsToBroadcast))
inv := appmessage.NewMsgInvTransaction(transactionIDsToBroadcast)
err := f.Broadcast(inv)
if err != nil {
return err
}
f.transactionIDsToPropagate = f.transactionIDsToPropagate[len(transactionIDsToBroadcast):]
}
f.lastTransactionIDPropagationTime = time.Now()
return nil
}

View File

@@ -0,0 +1,29 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
func (flow *handleRelayInvsFlow) sendGetBlockLocator(lowHash *externalapi.DomainHash,
highHash *externalapi.DomainHash, limit uint32) error {
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(lowHash, highHash, limit)
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
}
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgBlockLocator, ok := message.(*appmessage.MsgBlockLocator)
if !ok {
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
}
return msgBlockLocator.BlockLocatorHashes, nil
}

View File

@@ -5,7 +5,6 @@ import (
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
@@ -45,9 +44,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
if err != nil {
return err
}
// The IBD block locator is checking only existing blocks with bodies.
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
if !blockInfo.Exists {
continue
}

View File

@@ -48,7 +48,7 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
if err != nil {
return err
}
log.Debugf("sent %d out of %d", i+1, len(msgRequestIBDBlocks.Hashes))
log.Debugf("sent %d out of %d", i, len(msgRequestIBDBlocks.Hashes))
}
}
}

View File

@@ -0,0 +1,51 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandlePruningPointHashRequestsFlowContext is the interface for the context needed for the handlePruningPointHashRequestsFlow flow.
type HandlePruningPointHashRequestsFlowContext interface {
Domain() domain.Domain
}
type handlePruningPointHashRequestsFlow struct {
HandlePruningPointHashRequestsFlowContext
incomingRoute, outgoingRoute *router.Route
}
// HandlePruningPointHashRequests listens to appmessage.MsgRequestPruningPointHashMessage messages and sends
// the pruning point hash as response.
func HandlePruningPointHashRequests(context HandlePruningPointHashRequestsFlowContext, incomingRoute,
outgoingRoute *router.Route) error {
flow := &handlePruningPointHashRequestsFlow{
HandlePruningPointHashRequestsFlowContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handlePruningPointHashRequestsFlow) start() error {
for {
_, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
log.Debugf("Got request for a pruning point hash")
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return err
}
err = flow.outgoingRoute.Enqueue(appmessage.NewPruningPointHashMessage(pruningPoint))
if err != nil {
return err
}
log.Debugf("Sent pruning point hash %s", pruningPoint)
}
}

View File

@@ -3,7 +3,6 @@ package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
@@ -24,16 +23,16 @@ var orphanResolutionRange uint32 = 5
type RelayInvsContext interface {
Domain() domain.Domain
Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
OnNewBlock(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
OnPruningPointUTXOSetOverride() error
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
SharedRequestedBlocks() *SharedRequestedBlocks
Broadcast(message appmessage.Message) error
AddOrphan(orphanBlock *externalapi.DomainBlock)
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
IsOrphan(blockHash *externalapi.DomainHash) bool
IsIBDRunning() bool
IsRecoverableError(err error) bool
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
UnsetIBDRunning()
}
type handleRelayInvsFlow struct {
@@ -55,10 +54,7 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
peer: peer,
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
}
err := flow.start()
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
close(peer.IBDRequestChannel())
return err
return flow.start()
}
func (flow *handleRelayInvsFlow) start() error {
@@ -84,18 +80,7 @@ func (flow *handleRelayInvsFlow) start() error {
continue
}
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if flow.IsOrphan(inv.Hash) {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent {
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", inv.Hash)
continue
}
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
err := flow.AddOrphanRootsToQueue(inv.Hash)
if err != nil {
@@ -125,13 +110,8 @@ func (flow *handleRelayInvsFlow) start() error {
return err
}
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) {
log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block))
continue
}
log.Debugf("Processing block %s", inv.Hash)
missingParents, virtualChangeSet, err := flow.processBlock(block)
missingParents, blockInsertionResult, err := flow.processBlock(block)
if err != nil {
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
log.Infof("Ignoring pruned block %s", inv.Hash)
@@ -146,7 +126,7 @@ func (flow *handleRelayInvsFlow) start() error {
}
if len(missingParents) > 0 {
log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents)
err := flow.processOrphan(block)
err := flow.processOrphan(block, missingParents)
if err != nil {
return err
}
@@ -159,7 +139,7 @@ func (flow *handleRelayInvsFlow) start() error {
return err
}
log.Infof("Accepted block %s via relay", inv.Hash)
err = flow.OnNewBlock(block, virtualChangeSet)
err = flow.OnNewBlock(block, blockInsertionResult)
if err != nil {
return err
}
@@ -196,14 +176,14 @@ func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error)
}
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash)
exists := flow.SharedRequestedBlocks().addIfNotExists(requestHash)
if exists {
return nil, true, nil
}
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
// clean from any pending blocks.
defer flow.SharedRequestedBlocks().Remove(requestHash)
defer flow.SharedRequestedBlocks().remove(requestHash)
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
@@ -246,9 +226,9 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
}
}
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.BlockInsertionResult, error) {
blockHash := consensushashing.BlockHash(block)
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
@@ -261,7 +241,7 @@ func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
}
return nil, virtualChangeSet, nil
return nil, blockInsertionResult, nil
}
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
@@ -269,7 +249,7 @@ func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) erro
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
}
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) error {
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, missingParents []*externalapi.DomainHash) error {
blockHash := consensushashing.BlockHash(block)
// Return if the block has been orphaned from elsewhere already
@@ -284,19 +264,6 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) e
return err
}
if isBlockInOrphanResolutionRange {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if isGenesisVirtualSelectedParent {
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", blockHash)
return nil
}
}
log.Debugf("Block %s is within orphan resolution range. "+
"Adding it to the orphan set", blockHash)
flow.AddOrphan(block)
@@ -307,28 +274,7 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) e
// Start IBD unless we already are in IBD
log.Debugf("Block %s is out of orphan resolution range. "+
"Attempting to start IBD against it.", blockHash)
// Send the block to IBD flow via the IBDRequestChannel.
// Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it
select {
case flow.peer.IBDRequestChannel() <- block:
default:
}
return nil
}
func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
}
func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool {
parents := block.Header.DirectParents()
return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash)
return flow.runIBDIfNotRunning(blockHash)
}
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
@@ -337,7 +283,8 @@ func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock
// In the response, if we know none of the hashes, we should retrieve the given
// blockHash via IBD. Otherwise, via unorphaning.
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
err := flow.sendGetBlockLocator(blockHash, orphanResolutionRange)
lowHash := flow.Config().ActiveNetParams.GenesisHash
err := flow.sendGetBlockLocator(lowHash, blockHash, orphanResolutionRange)
if err != nil {
return false, err
}

View File

@@ -32,19 +32,20 @@ func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute
func (flow *handleRequestBlockLocatorFlow) start() error {
for {
highHash, limit, err := flow.receiveGetBlockLocator()
lowHash, highHash, limit, err := flow.receiveGetBlockLocator()
if err != nil {
return err
}
log.Debugf("Received getBlockLocator with highHash: %s, limit: %d", highHash, limit)
log.Debugf("Received getBlockLocator with lowHash: %s, highHash: %s, limit: %d",
lowHash, highHash, limit)
locator, err := flow.Domain().Consensus().CreateBlockLocatorFromPruningPoint(highHash, limit)
locator, err := flow.Domain().Consensus().CreateBlockLocator(lowHash, highHash, limit)
if err != nil || len(locator) == 0 {
if err != nil {
log.Debugf("Received error from CreateBlockLocatorFromPruningPoint: %s", err)
log.Debugf("Received error from CreateBlockLocator: %s", err)
}
return protocolerrors.Errorf(true, "couldn't build a block "+
"locator between the pruning point and %s", highHash)
"locator between blocks %s and %s", lowHash, highHash)
}
err = flow.sendBlockLocator(locator)
@@ -54,15 +55,16 @@ func (flow *handleRequestBlockLocatorFlow) start() error {
}
}
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (highHash *externalapi.DomainHash, limit uint32, err error) {
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (lowHash *externalapi.DomainHash,
highHash *externalapi.DomainHash, limit uint32, err error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, 0, err
return nil, nil, 0, err
}
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
return msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
return msgGetBlockLocator.LowHash, msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
}
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {

View File

@@ -12,26 +12,26 @@ import (
const ibdBatchSize = router.DefaultMaxMessages
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
type RequestHeadersContext interface {
// RequestIBDBlocksContext is the interface for the context needed for the HandleRequestHeaders flow.
type RequestIBDBlocksContext interface {
Domain() domain.Domain
}
type handleRequestHeadersFlow struct {
RequestHeadersContext
RequestIBDBlocksContext
incomingRoute, outgoingRoute *router.Route
peer *peer.Peer
}
// HandleRequestHeaders handles RequestHeaders messages
func HandleRequestHeaders(context RequestHeadersContext, incomingRoute *router.Route,
func HandleRequestHeaders(context RequestIBDBlocksContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peer.Peer) error {
flow := &handleRequestHeadersFlow{
RequestHeadersContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
RequestIBDBlocksContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}

View File

@@ -0,0 +1,144 @@
package blockrelay
import (
"errors"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleRequestPruningPointUTXOSetAndBlockContext is the interface for the context needed for the HandleRequestPruningPointUTXOSetAndBlock flow.
type HandleRequestPruningPointUTXOSetAndBlockContext interface {
Domain() domain.Domain
}
type handleRequestPruningPointUTXOSetAndBlockFlow struct {
HandleRequestPruningPointUTXOSetAndBlockContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestPruningPointUTXOSetAndBlock listens to appmessage.MsgRequestPruningPointUTXOSetAndBlock messages and sends
// the pruning point UTXO set and block body.
func HandleRequestPruningPointUTXOSetAndBlock(context HandleRequestPruningPointUTXOSetAndBlockContext, incomingRoute,
outgoingRoute *router.Route) error {
flow := &handleRequestPruningPointUTXOSetAndBlockFlow{
HandleRequestPruningPointUTXOSetAndBlockContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) start() error {
for {
msgRequestPruningPointUTXOSetAndBlock, err := flow.waitForRequestPruningPointUTXOSetAndBlockMessages()
if err != nil {
return err
}
err = flow.handleRequestPruningPointUTXOSetAndBlockMessage(msgRequestPruningPointUTXOSetAndBlock)
if err != nil {
return err
}
}
}
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) handleRequestPruningPointUTXOSetAndBlockMessage(
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetAndBlockFlow")
defer onEnd()
log.Debugf("Got request for PruningPointHash UTXOSet and Block")
err := flow.sendPruningPointBlock(msgRequestPruningPointUTXOSetAndBlock)
if err != nil {
return err
}
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSetAndBlock)
}
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) waitForRequestPruningPointUTXOSetAndBlockMessages() (
*appmessage.MsgRequestPruningPointUTXOSetAndBlock, error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
msgRequestPruningPointUTXOSetAndBlock, ok := message.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSetAndBlock, message.Command())
}
return msgRequestPruningPointUTXOSetAndBlock, nil
}
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) sendPruningPointBlock(
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
block, err := flow.Domain().Consensus().GetBlock(msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
if err != nil {
return err
}
log.Debugf("Retrieved pruning block %s", msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(block)))
}
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) sendPruningPointUTXOSet(
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
// Send the UTXO set in `step`-sized chunks
const step = 1000
var fromOutpoint *externalapi.DomainOutpoint
chunksSent := 0
for {
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
msgRequestPruningPointUTXOSetAndBlock.PruningPointHash, fromOutpoint, step)
if err != nil {
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
}
}
log.Debugf("Retrieved %d UTXOs for pruning block %s",
len(pruningPointUTXOs), msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
outpointAndUTXOEntryPairs :=
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
if err != nil {
return err
}
if len(pruningPointUTXOs) < step {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
chunksSent++
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
if chunksSent%ibdBatchSize == 0 {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
}
}
}
}

View File

@@ -1,181 +1,100 @@
package blockrelay
import (
"fmt"
"time"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
"math/big"
"time"
)
// IBDContext is the interface for the context needed for the HandleIBD flow.
type IBDContext interface {
Domain() domain.Domain
Config() *config.Config
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
OnPruningPointUTXOSetOverride() error
IsIBDRunning() bool
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
UnsetIBDRunning()
IsRecoverableError(err error) bool
}
type handleIBDFlow struct {
IBDContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// HandleIBD handles IBD
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
flow := &handleIBDFlow{
IBDContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleIBDFlow) start() error {
for {
// Wait for IBD requests triggered by other flows
block, ok := <-flow.peer.IBDRequestChannel()
if !ok {
return nil
}
err := flow.runIBDIfNotRunning(block)
if err != nil {
return err
}
}
}
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
highHash := consensushashing.BlockHash(block)
// Temp code to avoid IBD from lagging nodes publishing their side-chain
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err == nil {
virtualSelectedParentHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualSelectedParent)
if err == nil {
if virtualSelectedParentHeader.DAAScore() > block.Header.DAAScore()+2641 {
virtualDifficulty := difficulty.CalcWork(virtualSelectedParentHeader.Bits())
var virtualSub, difficultyMul big.Int
if difficultyMul.Mul(virtualDifficulty, big.NewInt(180)).
Cmp(virtualSub.Sub(virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())) < 0 {
log.Criticalf("Avoiding IBD triggered by relay %s because it is coming from " +
"a deep (%d DAA score depth) side-chain which has much lower blue work (%d, %d)",
highHash,
virtualSelectedParentHeader.DAAScore()-block.Header.DAAScore(),
virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())
return nil
}
}
}
}
func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.DomainHash) error {
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
if !wasIBDNotRunning {
log.Debugf("IBD is already running")
return nil
}
defer flow.UnsetIBDRunning()
isFinishedSuccessfully := false
defer func() {
flow.UnsetIBDRunning()
flow.logIBDFinished(isFinishedSuccessfully)
}()
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
log.Criticalf("IBD started with peer %s and highHash %s", flow.peer, highHash)
log.Criticalf("Syncing blocks up to %s", highHash)
log.Criticalf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
log.Debugf("Syncing headers up to %s", highHash)
headersSynced, err := flow.syncHeaders(highHash)
if err != nil {
return err
}
log.Criticalf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
if err != nil {
return err
}
if !shouldSync {
if !headersSynced {
log.Debugf("Aborting IBD because the headers failed to sync")
return nil
}
log.Debugf("Finished syncing headers up to %s", highHash)
if shouldDownloadHeadersProof {
log.Infof("Starting IBD with headers proof")
err := flow.ibdWithHeadersProof(highHash, block.Header.DAAScore())
if err != nil {
return err
}
} else {
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
if err != nil {
return err
}
if isGenesisVirtualSelectedParent {
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
"to the recent pruning point before normal operation can resume.", highHash)
return nil
}
}
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash, block.Header.DAAScore())
if err != nil {
return err
}
log.Debugf("Syncing the current pruning point UTXO set")
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet()
if err != nil {
return err
}
if !syncedPruningPointUTXOSetSuccessfully {
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
return nil
}
log.Debugf("Finished syncing the current pruning point UTXO set")
log.Debugf("Downloading block bodies up to %s", highHash)
err = flow.syncMissingBlockBodies(highHash)
if err != nil {
return err
}
log.Debugf("Finished downloading block bodies up to %s", highHash)
log.Debugf("Finished syncing blocks up to %s", highHash)
isFinishedSuccessfully = true
return nil
}
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
// syncHeaders attempts to sync headers from the peer. This method may fail
// because the peer and us have conflicting pruning points. In that case we
// return (false, nil) so that we may stop IBD gracefully.
func (flow *handleRelayInvsFlow) syncHeaders(highHash *externalapi.DomainHash) (bool, error) {
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
if err != nil {
return false, err
}
if !highestSharedBlockFound {
return false, nil
}
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
err = flow.downloadHeaders(highestSharedBlockHash, highHash)
if err != nil {
return false, err
}
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
}
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
successString := "successfully"
if !isFinishedSuccessfully {
successString = "(interrupted)"
// If the highHash has not been received, the peer is misbehaving
highHashBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(highHash)
if err != nil {
return false, err
}
log.Infof("IBD finished %s", successString)
if !highHashBlockInfo.Exists {
return false, protocolerrors.Errorf(true, "did not receive "+
"highHash header %s from peer %s during header download", highHash, flow.peer)
}
log.Debugf("Headers downloaded from peer %s", flow.peer)
return true, nil
}
// findHighestSharedBlock attempts to find the highest shared block between the peer
// and this node. This method may fail because the peer and us have conflicting pruning
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
func (flow *handleIBDFlow) findHighestSharedBlockHash(
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
@@ -218,7 +137,7 @@ func (flow *handleIBDFlow) findHighestSharedBlockHash(
}
}
func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
func (flow *handleRelayInvsFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
if err != nil {
@@ -236,7 +155,7 @@ func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.Domai
return blockLocator, nil
}
func (flow *handleIBDFlow) findHighestHashIndex(
func (flow *handleRelayInvsFlow) findHighestHashIndex(
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
highestHashIndex := 0
@@ -261,7 +180,7 @@ func (flow *handleIBDFlow) findHighestHashIndex(
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
// blockLocator. This method may fail because the peer and us have conflicting pruning
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
func (flow *handleIBDFlow) fetchHighestHash(
func (flow *handleRelayInvsFlow) fetchHighestHash(
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
@@ -269,7 +188,7 @@ func (flow *handleIBDFlow) fetchHighestHash(
if err != nil {
return nil, false, err
}
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
@@ -289,28 +208,20 @@ func (flow *handleIBDFlow) fetchHighestHash(
}
}
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
log.Infof("Downloading headers from %s", flow.peer)
func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externalapi.DomainHash,
highHash *externalapi.DomainHash) error {
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
if err != nil {
return err
}
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestSharedBlockHash)
if err != nil {
return err
}
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScore, "block headers")
// Keep a short queue of BlockHeadersMessages so that there's
// Keep a short queue of blockHeadersMessages so that there's
// never a moment when the node is not validating and inserting
// headers
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
errChan := make(chan error)
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
spawn("handleRelayInvsFlow-downloadHeaders", func() {
for {
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
if err != nil {
@@ -334,43 +245,31 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
for {
select {
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
case blockHeadersMessage, ok := <-blockHeadersMessageChan:
if !ok {
// If the highHash has not been received, the peer is misbehaving
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
if err != nil {
return err
}
if !highHashBlockInfo.Exists {
return protocolerrors.Errorf(true, "did not receive "+
"highHash block %s from peer %s during block download", highHash, flow.peer)
}
return nil
}
for _, header := range ibdBlocksMessage.BlockHeaders {
_, err := flow.processHeader(consensus, header)
for _, header := range blockHeadersMessage.BlockHeaders {
err = flow.processHeader(header)
if err != nil {
return err
}
}
lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1]
progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore)
case err := <-errChan:
return err
}
}
}
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
func (flow *handleRelayInvsFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
peerSelectedTipHash *externalapi.DomainHash) error {
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
}
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
func (flow *handleRelayInvsFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneIBD bool, err error) {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
@@ -382,14 +281,11 @@ func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeader
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
appmessage.CmdBlockHeaders,
appmessage.CmdDoneHeaders,
message.Command())
"expected: %s or %s, got: %s", appmessage.CmdHeader, appmessage.CmdDoneHeaders, message.Command())
}
}
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) (bool, error) {
func (flow *handleRelayInvsFlow) processHeader(msgBlockHeader *appmessage.MsgBlockHeader) error {
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
block := &externalapi.DomainBlock{
Header: header,
@@ -397,66 +293,154 @@ func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlo
}
blockHash := consensushashing.BlockHash(block)
blockInfo, err := consensus.GetBlockInfo(blockHash)
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockHash)
if err != nil {
return false, err
return err
}
if blockInfo.Exists {
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
return false, nil
return nil
}
_, err = consensus.ValidateAndInsertBlock(block, false)
_, err = flow.Domain().Consensus().ValidateAndInsertBlock(block)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return false, errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
}
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
} else {
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
return false, protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
}
}
return true, nil
}
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
if err != nil {
return err
}
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
if err != nil {
return err
}
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
if err != nil {
return err
}
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
if err != nil {
return err
}
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
"tip is smaller than the current selected tip")
}
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
return protocolerrors.Errorf(false, "difference between the timestamps of "+
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
}
return nil
}
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
func (flow *handleRelayInvsFlow) syncPruningPointUTXOSet() (bool, error) {
log.Debugf("Checking if a new pruning point is available")
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointHashMessage())
if err != nil {
return false, err
}
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return false, err
}
msgPruningPointHash, ok := message.(*appmessage.MsgPruningPointHashMessage)
if !ok {
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdPruningPointHash, message.Command())
}
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(msgPruningPointHash.Hash)
if err != nil {
return false, err
}
if !blockInfo.Exists {
return false, errors.Errorf("The pruning point header is missing")
}
if blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
log.Debugf("Already has the block data of the new suggested pruning point %s", msgPruningPointHash.Hash)
return true, nil
}
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", msgPruningPointHash.Hash)
isValid, err := flow.Domain().Consensus().IsValidPruningPoint(msgPruningPointHash.Hash)
if err != nil {
return false, err
}
if !isValid {
log.Infof("The suggested pruning point %s is incompatible to this node DAG, so stopping IBD with this"+
" peer", msgPruningPointHash.Hash)
return false, nil
}
log.Info("Fetching the pruning point UTXO set")
succeed, err := flow.fetchMissingUTXOSet(msgPruningPointHash.Hash)
if err != nil {
return false, err
}
if !succeed {
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
return false, nil
}
log.Info("Fetched the new pruning point UTXO set")
return true, nil
}
func (flow *handleRelayInvsFlow) fetchMissingUTXOSet(pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
defer func() {
err := flow.Domain().Consensus().ClearImportedPruningPointData()
if err != nil {
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
}
}()
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSetAndBlock(pruningPointHash))
if err != nil {
return false, err
}
block, err := flow.receivePruningPointBlock()
if err != nil {
return false, err
}
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(pruningPointHash)
if err != nil {
return false, err
}
if !receivedAll {
return false, nil
}
err = flow.Domain().Consensus().ValidateAndInsertImportedPruningPoint(block)
if err != nil {
// TODO: Find a better way to deal with finality conflicts.
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
return false, nil
}
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
}
err = flow.OnPruningPointUTXOSetOverride()
if err != nil {
return false, err
}
return true, nil
}
func (flow *handleRelayInvsFlow) receivePruningPointBlock() (*externalapi.DomainBlock, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "receivePruningPointBlock")
defer onEnd()
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, err
}
ibdBlockMessage, ok := message.(*appmessage.MsgIBDBlock)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
}
block := appmessage.MsgBlockToDomainBlock(ibdBlockMessage.MsgBlock)
log.Debugf("Received pruning point block %s", consensushashing.BlockHash(block))
return block, nil
}
func (flow *handleRelayInvsFlow) receiveAndInsertPruningPointUTXOSet(
pruningPointHash *externalapi.DomainHash) (bool, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
defer onEnd()
@@ -464,7 +448,7 @@ func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
receivedChunkCount := 0
receivedUTXOCount := 0
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return false, err
}
@@ -475,7 +459,7 @@ func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
domainOutpointAndUTXOEntryPairs :=
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
err := flow.Domain().Consensus().AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
if err != nil {
return false, err
}
@@ -510,7 +494,7 @@ func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
}
}
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
if err != nil {
return err
@@ -523,17 +507,6 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
return nil
}
lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0])
if err != nil {
return err
}
highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1])
if err != nil {
return err
}
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
highestProcessedDAAScore := lowBlockHeader.DAAScore()
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
var hashesToRequest []*externalapi.DomainHash
if offset+ibdBatchSize < len(hashes) {
@@ -548,7 +521,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
}
for _, expectedHash := range hashesToRequest {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return err
}
@@ -570,7 +543,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
return err
}
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
if err != nil {
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
@@ -578,62 +551,27 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
}
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
}
err = flow.OnNewBlock(block, virtualChangeSet)
err = flow.OnNewBlock(block, blockInsertionResult)
if err != nil {
return err
}
highestProcessedDAAScore = block.Header.DAAScore()
}
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
}
return flow.resolveVirtual(highestProcessedDAAScore)
}
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
if len(block.Transactions) == 0 {
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
consensushashing.BlockHash(block))
}
return nil
}
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
for i := 0; ; i++ {
if i%10 == 0 {
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
var percents int
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
percents = 100
} else {
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
}
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
}
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
// dequeueIncomingMessageAndSkipInvs is a convenience method to be used during
// IBD. Inv messages are expected to arrive at any given moment, but should be
// ignored while we're in IBD
func (flow *handleRelayInvsFlow) dequeueIncomingMessageAndSkipInvs(timeout time.Duration) (appmessage.Message, error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
if err != nil {
return err
return nil, err
}
err = flow.OnVirtualChange(virtualChangeSet)
if err != nil {
return err
}
if isCompletelyResolved {
log.Infof("Resolved virtual")
return nil
if _, ok := message.(*appmessage.MsgInvRelayBlock); !ok {
return message, nil
}
}
}

View File

@@ -4,14 +4,12 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// SendVirtualSelectedParentInvContext is the interface for the context needed for the SendVirtualSelectedParentInv flow.
type SendVirtualSelectedParentInvContext interface {
Domain() domain.Domain
Config() *config.Config
}
// SendVirtualSelectedParentInv sends a peer the selected parent hash of the virtual
@@ -23,11 +21,6 @@ func SendVirtualSelectedParentInv(context SendVirtualSelectedParentInvContext,
return err
}
if virtualSelectedParent.Equal(context.Config().NetParams().GenesisHash) {
log.Debugf("Skipping sending the virtual selected parent hash to peer %s because it's the genesis", peer)
return nil
}
log.Debugf("Sending virtual selected parent hash %s to peer %s", virtualSelectedParent, peer)
virtualSelectedParentInv := appmessage.NewMsgInvBlock(virtualSelectedParent)

View File

@@ -1,4 +1,4 @@
package flowcontext
package blockrelay
import (
"sync"
@@ -13,15 +13,13 @@ type SharedRequestedBlocks struct {
sync.Mutex
}
// Remove removes a block from the set.
func (s *SharedRequestedBlocks) Remove(hash *externalapi.DomainHash) {
func (s *SharedRequestedBlocks) remove(hash *externalapi.DomainHash) {
s.Lock()
defer s.Unlock()
delete(s.blocks, *hash)
}
// RemoveSet removes a set of blocks from the set.
func (s *SharedRequestedBlocks) RemoveSet(blockHashes map[externalapi.DomainHash]struct{}) {
func (s *SharedRequestedBlocks) removeSet(blockHashes map[externalapi.DomainHash]struct{}) {
s.Lock()
defer s.Unlock()
for hash := range blockHashes {
@@ -29,8 +27,7 @@ func (s *SharedRequestedBlocks) RemoveSet(blockHashes map[externalapi.DomainHash
}
}
// AddIfNotExists adds a block to the set if it doesn't exist yet.
func (s *SharedRequestedBlocks) AddIfNotExists(hash *externalapi.DomainHash) (exists bool) {
func (s *SharedRequestedBlocks) addIfNotExists(hash *externalapi.DomainHash) (exists bool) {
s.Lock()
defer s.Unlock()
_, ok := s.blocks[*hash]

View File

@@ -28,7 +28,7 @@ type HandleHandshakeContext interface {
HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error)
}
// HandleHandshake sets up the new_handshake protocol - It sends a version message and waits for an incoming
// HandleHandshake sets up the handshake protocol - It sends a version message and waits for an incoming
// version message, as well as a verack for the sent version
func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.NetConnection,
receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
@@ -98,7 +98,7 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
}
// Handshake is different from other flows, since in it should forward router.ErrRouteClosed to errChan
// Therefore we implement a separate handleError for new_handshake
// Therefore we implement a separate handleError for handshake
func handleError(err error, flowName string, isStopping *uint32, errChan chan error) {
if errors.Is(err, routerpkg.ErrRouteClosed) {
if atomic.AddUint32(isStopping, 1) == 1 {

View File

@@ -7,7 +7,6 @@ import (
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
var (
@@ -18,9 +17,7 @@ var (
// minAcceptableProtocolVersion is the lowest protocol version that a
// connected peer may support.
minAcceptableProtocolVersion = uint32(4)
maxAcceptableProtocolVersion = uint32(4)
minAcceptableProtocolVersion = appmessage.ProtocolVersion
)
type receiveVersionFlow struct {
@@ -100,12 +97,7 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
return nil, protocolerrors.New(false, "incompatible subnetworks")
}
if flow.Config().ProtocolVersion > maxAcceptableProtocolVersion {
return nil, errors.Errorf("%d is a non existing protocol version", flow.Config().ProtocolVersion)
}
maxProtocolVersion := flow.Config().ProtocolVersion
flow.peer.UpdateFieldsFromMsgVersion(msgVersion, maxProtocolVersion)
flow.peer.UpdateFieldsFromMsgVersion(msgVersion)
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgVerAck())
if err != nil {
return nil, err

View File

@@ -7,7 +7,6 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/version"
"github.com/pkg/errors"
)
var (
@@ -57,18 +56,15 @@ func (flow *sendVersionFlow) start() error {
// Version message.
localAddress := flow.AddressManager().BestLocalAddress(flow.peer.Connection().NetAddress())
subnetworkID := flow.Config().SubnetworkID
if flow.Config().ProtocolVersion < minAcceptableProtocolVersion {
return errors.Errorf("configured protocol version %d is obsolete", flow.Config().ProtocolVersion)
}
msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(),
flow.Config().ActiveNetParams.Name, subnetworkID, flow.Config().ProtocolVersion)
flow.Config().ActiveNetParams.Name, subnetworkID)
msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...)
// Advertise the services flag
msg.Services = defaultServices
// Advertise our max supported protocol version.
msg.ProtocolVersion = flow.Config().ProtocolVersion
msg.ProtocolVersion = appmessage.ProtocolVersion
// Advertise if inv messages for transactions are desired.
msg.DisableRelayTx = flow.Config().BlocksOnly

View File

@@ -1,9 +0,0 @@
package ready
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("PROT")
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -1,56 +0,0 @@
package ready
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"sync/atomic"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// HandleReady notify the other peer that peer is ready for messages, and wait for the other peer
// to send a ready message before start running the flows.
func HandleReady(incomingRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
peer *peerpkg.Peer,
) error {
log.Debugf("Sending ready message to %s", peer)
isStopping := uint32(0)
err := outgoingRoute.Enqueue(appmessage.NewMsgReady())
if err != nil {
return handleError(err, "HandleReady", &isStopping)
}
_, err = incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return handleError(err, "HandleReady", &isStopping)
}
log.Debugf("Got ready message from %s", peer)
return nil
}
// Ready is different from other flows, since in it should forward router.ErrRouteClosed to errChan
// Therefore we implement a separate handleError for 'ready'
func handleError(err error, flowName string, isStopping *uint32) error {
if errors.Is(err, routerpkg.ErrRouteClosed) {
if atomic.AddUint32(isStopping, 1) == 1 {
return err
}
return nil
}
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
log.Errorf("Ready protocol error from %s: %s", flowName, err)
if atomic.AddUint32(isStopping, 1) == 1 {
return err
}
return nil
}
panic(err)
}

View File

@@ -33,5 +33,10 @@ func (flow *handleRejectsFlow) start() error {
}
rejectMessage := message.(*appmessage.MsgReject)
const maxReasonLength = 255
if len(rejectMessage.Reason) > maxReasonLength {
return protocolerrors.Errorf(false, "got reject message longer than %d", maxReasonLength)
}
return protocolerrors.Errorf(false, "got reject message: `%s`", rejectMessage.Reason)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,11 @@
package testing
import (
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
"testing"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/addressexchange"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
@@ -21,8 +21,8 @@ func (f fakeReceiveAddressesContext) AddressManager() *addressmanager.AddressMan
func TestReceiveAddressesErrors(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
incomingRoute := router.NewRoute("incoming")
outgoingRoute := router.NewRoute("outgoing")
incomingRoute := router.NewRoute()
outgoingRoute := router.NewRoute()
peer := peerpkg.New(nil)
errChan := make(chan error)
go func() {

View File

@@ -3,7 +3,6 @@ package transactionrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
@@ -19,10 +18,9 @@ import (
type TransactionsRelayContext interface {
NetAdapter() *netadapter.NetAdapter
Domain() domain.Domain
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
SharedRequestedTransactions() *SharedRequestedTransactions
Broadcast(message appmessage.Message) error
OnTransactionAddedToMempool()
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
IsIBDRunning() bool
}
type handleRelayedTransactionsFlow struct {
@@ -50,10 +48,6 @@ func (flow *handleRelayedTransactionsFlow) start() error {
return err
}
if flow.IsIBDRunning() {
continue
}
requestedIDs, err := flow.requestInvTransactions(inv)
if err != nil {
return err
@@ -74,7 +68,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
if flow.isKnownTransaction(txID) {
continue
}
exists := flow.SharedRequestedTransactions().AddIfNotExists(txID)
exists := flow.SharedRequestedTransactions().addIfNotExists(txID)
if exists {
continue
}
@@ -88,7 +82,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
msgGetTransactions := appmessage.NewMsgRequestTransactions(idsToRequest)
err = flow.outgoingRoute.Enqueue(msgGetTransactions)
if err != nil {
flow.SharedRequestedTransactions().RemoveMany(idsToRequest)
flow.SharedRequestedTransactions().removeMany(idsToRequest)
return nil, err
}
return idsToRequest, nil
@@ -125,7 +119,8 @@ func (flow *handleRelayedTransactionsFlow) readInv() (*appmessage.MsgInvTransact
}
func (flow *handleRelayedTransactionsFlow) broadcastAcceptedTransactions(acceptedTxIDs []*externalapi.DomainTransactionID) error {
return flow.EnqueueTransactionIDsForPropagation(acceptedTxIDs)
inv := appmessage.NewMsgInvTransaction(acceptedTxIDs)
return flow.Broadcast(inv)
}
// readMsgTxOrNotFound returns the next msgTx or msgTransactionNotFound in incomingRoute,
@@ -157,7 +152,7 @@ func (flow *handleRelayedTransactionsFlow) readMsgTxOrNotFound() (
func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransactions []*externalapi.DomainTransactionID) error {
// In case the function returns earlier than expected, we want to make sure sharedRequestedTransactions is
// clean from any pending transactions.
defer flow.SharedRequestedTransactions().RemoveMany(requestedTransactions)
defer flow.SharedRequestedTransactions().removeMany(requestedTransactions)
for _, expectedID := range requestedTransactions {
msgTx, msgTxNotFound, err := flow.readMsgTxOrNotFound()
if err != nil {
@@ -178,18 +173,17 @@ func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransact
expectedID, txID)
}
acceptedTransactions, err :=
flow.Domain().MiningManager().ValidateAndInsertTransaction(tx, false, true)
err = flow.Domain().MiningManager().ValidateAndInsertTransaction(tx, true)
if err != nil {
ruleErr := &mempool.RuleError{}
if !errors.As(err, ruleErr) {
return errors.Wrapf(err, "failed to process transaction %s", txID)
}
shouldBan := false
shouldBan := true
if txRuleErr := (&mempool.TxRuleError{}); errors.As(ruleErr.Err, txRuleErr) {
if txRuleErr.RejectCode == mempool.RejectInvalid {
shouldBan = true
if txRuleErr.RejectCode != mempool.RejectInvalid {
shouldBan = false
}
}
@@ -199,7 +193,7 @@ func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransact
return protocolerrors.Errorf(true, "rejected transaction %s: %s", txID, ruleErr)
}
err = flow.broadcastAcceptedTransactions(consensushashing.TransactionIDs(acceptedTransactions))
err = flow.broadcastAcceptedTransactions([]*externalapi.DomainTransactionID{txID})
if err != nil {
return err
}

View File

@@ -1,4 +1,4 @@
package flowcontext
package transactionrelay
import (
"sync"
@@ -13,15 +13,13 @@ type SharedRequestedTransactions struct {
sync.Mutex
}
// Remove removes a transaction from the set.
func (s *SharedRequestedTransactions) Remove(txID *externalapi.DomainTransactionID) {
func (s *SharedRequestedTransactions) remove(txID *externalapi.DomainTransactionID) {
s.Lock()
defer s.Unlock()
delete(s.transactions, *txID)
}
// RemoveMany removes a set of transactions from the set.
func (s *SharedRequestedTransactions) RemoveMany(txIDs []*externalapi.DomainTransactionID) {
func (s *SharedRequestedTransactions) removeMany(txIDs []*externalapi.DomainTransactionID) {
s.Lock()
defer s.Unlock()
for _, txID := range txIDs {
@@ -29,8 +27,7 @@ func (s *SharedRequestedTransactions) RemoveMany(txIDs []*externalapi.DomainTran
}
}
// AddIfNotExists adds a transaction to the set if it doesn't exist yet.
func (s *SharedRequestedTransactions) AddIfNotExists(txID *externalapi.DomainTransactionID) (exists bool) {
func (s *SharedRequestedTransactions) addIfNotExists(txID *externalapi.DomainTransactionID) (exists bool) {
s.Lock()
defer s.Unlock()
_, ok := s.transactions[*txID]

View File

@@ -1,33 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.DomainHash, limit uint32) error {
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, limit)
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
}
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
switch message := message.(type) {
case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlockLocator:
return message.BlockLocatorHashes, nil
default:
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
}
}
}

View File

@@ -1,145 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"sync/atomic"
)
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
type PruningPointAndItsAnticoneRequestsContext interface {
Domain() domain.Domain
Config() *config.Config
}
var isBusy uint32
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
// the pruning point and its anticone to the requesting peer.
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
err := func() error {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
}
defer atomic.StoreUint32(&isBusy, 0)
log.Debugf("Got request for pruning point and its anticone from %s", peer)
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
if err != nil {
return err
}
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
for i, header := range pruningPointHeaders {
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
}
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
if err != nil {
return err
}
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
if err != nil {
return err
}
windowSize := context.Config().NetParams().DifficultyAdjustmentWindowSize
daaWindowBlocks := make([]*externalapi.TrustedDataDataDAAHeader, 0, windowSize)
daaWindowHashesToIndex := make(map[externalapi.DomainHash]int, windowSize)
trustedDataDAABlockIndexes := make(map[externalapi.DomainHash][]uint64)
ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, 0)
ghostdagDataHashToIndex := make(map[externalapi.DomainHash]int)
trustedDataGHOSTDAGDataIndexes := make(map[externalapi.DomainHash][]uint64)
for _, blockHash := range pointAndItsAnticone {
blockDAAWindowHashes, err := context.Domain().Consensus().BlockDAAWindowHashes(blockHash)
if err != nil {
return err
}
trustedDataDAABlockIndexes[*blockHash] = make([]uint64, 0, windowSize)
for i, daaBlockHash := range blockDAAWindowHashes {
index, exists := daaWindowHashesToIndex[*daaBlockHash]
if !exists {
trustedDataDataDAAHeader, err := context.Domain().Consensus().TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i))
if err != nil {
return err
}
daaWindowBlocks = append(daaWindowBlocks, trustedDataDataDAAHeader)
index = len(daaWindowBlocks) - 1
daaWindowHashesToIndex[*daaBlockHash] = index
}
trustedDataDAABlockIndexes[*blockHash] = append(trustedDataDAABlockIndexes[*blockHash], uint64(index))
}
ghostdagDataBlockHashes, err := context.Domain().Consensus().TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash)
if err != nil {
return err
}
trustedDataGHOSTDAGDataIndexes[*blockHash] = make([]uint64, 0, context.Config().NetParams().K)
for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes {
index, exists := ghostdagDataHashToIndex[*ghostdagDataBlockHash]
if !exists {
data, err := context.Domain().Consensus().TrustedGHOSTDAGData(ghostdagDataBlockHash)
if err != nil {
return err
}
ghostdagData = append(ghostdagData, &externalapi.BlockGHOSTDAGDataHashPair{
Hash: ghostdagDataBlockHash,
GHOSTDAGData: data,
})
index = len(ghostdagData) - 1
ghostdagDataHashToIndex[*ghostdagDataBlockHash] = index
}
trustedDataGHOSTDAGDataIndexes[*blockHash] = append(trustedDataGHOSTDAGDataIndexes[*blockHash], uint64(index))
}
}
err = outgoingRoute.Enqueue(appmessage.DomainTrustedDataToTrustedData(daaWindowBlocks, ghostdagData))
if err != nil {
return err
}
for _, blockHash := range pointAndItsAnticone {
block, err := context.Domain().Consensus().GetBlock(blockHash)
if err != nil {
return err
}
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
if err != nil {
return err
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
if err != nil {
return err
}
log.Debugf("Sent pruning point and its anticone to %s", peer)
return nil
}()
if err != nil {
return err
}
}
}

View File

@@ -1,40 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// PruningPointProofRequestsContext is the interface for the context needed for the HandlePruningPointProofRequests flow.
type PruningPointProofRequestsContext interface {
Domain() domain.Domain
}
// HandlePruningPointProofRequests listens to appmessage.MsgRequestPruningPointProof messages and sends
// the pruning point proof to the requesting peer.
func HandlePruningPointProofRequests(context PruningPointProofRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
log.Debugf("Got request for pruning point proof from %s", peer)
pruningPointProof, err := context.Domain().Consensus().BuildPruningPointProof()
if err != nil {
return err
}
pruningPointProofMessage := appmessage.DomainPruningPointProofToMsgPruningPointProof(pruningPointProof)
err = outgoingRoute.Enqueue(pruningPointProofMessage)
if err != nil {
return err
}
log.Debugf("Sent pruning point proof to %s", peer)
}
}

View File

@@ -1,140 +0,0 @@
package blockrelay
import (
"errors"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleRequestPruningPointUTXOSetContext is the interface for the context needed for the HandleRequestPruningPointUTXOSet flow.
type HandleRequestPruningPointUTXOSetContext interface {
Domain() domain.Domain
}
type handleRequestPruningPointUTXOSetFlow struct {
HandleRequestPruningPointUTXOSetContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestPruningPointUTXOSet listens to appmessage.MsgRequestPruningPointUTXOSet messages and sends
// the pruning point UTXO set and block body.
func HandleRequestPruningPointUTXOSet(context HandleRequestPruningPointUTXOSetContext, incomingRoute,
outgoingRoute *router.Route) error {
flow := &handleRequestPruningPointUTXOSetFlow{
HandleRequestPruningPointUTXOSetContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestPruningPointUTXOSetFlow) start() error {
for {
msgRequestPruningPointUTXOSet, err := flow.waitForRequestPruningPointUTXOSetMessages()
if err != nil {
return err
}
err = flow.handleRequestPruningPointUTXOSetMessage(msgRequestPruningPointUTXOSet)
if err != nil {
return err
}
}
}
func (flow *handleRequestPruningPointUTXOSetFlow) handleRequestPruningPointUTXOSetMessage(
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetFlow")
defer onEnd()
log.Debugf("Got request for pruning point UTXO set")
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSet)
}
func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXOSetMessages() (
*appmessage.MsgRequestPruningPointUTXOSet, error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
if !ok {
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
}
return msgRequestPruningPointUTXOSet, nil
}
func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
// Send the UTXO set in `step`-sized chunks
const step = 1000
var fromOutpoint *externalapi.DomainOutpoint
chunksSent := 0
for {
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
msgRequestPruningPointUTXOSet.PruningPointHash, fromOutpoint, step)
if err != nil {
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
}
}
log.Debugf("Retrieved %d UTXOs for pruning block %s",
len(pruningPointUTXOs), msgRequestPruningPointUTXOSet.PruningPointHash)
outpointAndUTXOEntryPairs :=
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
if err != nil {
return err
}
finished := len(pruningPointUTXOs) < step
if finished && chunksSent%ibdBatchSize != 0 {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
if len(pruningPointUTXOs) > 0 {
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
}
chunksSent++
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
if chunksSent%ibdBatchSize == 0 {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
if !ok {
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
}
if finished {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
}
}
}

View File

@@ -1,32 +0,0 @@
package blockrelay
type ibdProgressReporter struct {
lowDAAScore uint64
highDAAScore uint64
objectName string
totalDAAScoreDifference uint64
lastReportedProgressPercent int
processed int
}
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
return &ibdProgressReporter{
lowDAAScore: lowDAAScore,
highDAAScore: highDAAScore,
objectName: objectName,
totalDAAScoreDifference: highDAAScore - lowDAAScore,
lastReportedProgressPercent: 0,
processed: 0,
}
}
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
ipr.processed += processedDelta
relativeDAAScore := highestProcessedDAAScore - ipr.lowDAAScore
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
if progressPercent > ipr.lastReportedProgressPercent {
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
ipr.lastReportedProgressPercent = progressPercent
}
}

View File

@@ -1,390 +0,0 @@
package blockrelay
import (
"fmt"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/pkg/errors"
"time"
)
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
err := flow.Domain().InitStagingConsensus()
if err != nil {
return err
}
err = flow.downloadHeadersAndPruningUTXOSet(highHash, highBlockDAAScore)
if err != nil {
if !flow.IsRecoverableError(err) {
return err
}
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
if deleteStagingConsensusErr != nil {
return deleteStagingConsensusErr
}
return err
}
err = flow.Domain().CommitStagingConsensus()
if err != nil {
return err
}
err = flow.OnPruningPointUTXOSetOverride()
if err != nil {
return err
}
return nil
}
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *externalapi.DomainBlock,
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
if !highestSharedBlockFound {
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock)
if err != nil {
return false, false, err
}
if hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore {
return true, true, nil
}
return false, false, nil
}
return false, true, nil
}
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock *externalapi.DomainBlock) (bool, error) {
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
if err != nil {
return false, err
}
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
if err != nil {
return false, err
}
if highBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
return false, nil
}
return highBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
}
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
log.Infof("Downloading the pruning point proof from %s", flow.peer)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointProof())
if err != nil {
return nil, err
}
message, err := flow.incomingRoute.DequeueWithTimeout(10 * time.Minute)
if err != nil {
return nil, err
}
pruningPointProofMessage, ok := message.(*appmessage.MsgPruningPointProof)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdPruningPointProof, message.Command())
}
pruningPointProof := appmessage.MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage)
err = flow.Domain().Consensus().ValidatePruningPointProof(pruningPointProof)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
return nil, protocolerrors.Wrapf(true, err, "pruning point proof validation failed")
}
return nil, err
}
err = flow.Domain().StagingConsensus().ApplyPruningPointProof(pruningPointProof)
if err != nil {
return nil, err
}
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
}
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
if err != nil {
return err
}
err = flow.syncPruningPointsAndPruningPointAnticone(proofPruningPoint)
if err != nil {
return err
}
// TODO: Remove this condition once there's more proper way to check finality violation
// in the headers proof.
if proofPruningPoint.Equal(flow.Config().NetParams().GenesisHash) {
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
}
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash, highBlockDAAScore)
if err != nil {
return err
}
log.Infof("Headers downloaded from peer %s", flow.peer)
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
if err != nil {
return err
}
if !highHashInfo.Exists {
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
}
err = flow.validatePruningPointFutureHeaderTimestamps()
if err != nil {
return err
}
log.Debugf("Syncing the current pruning point UTXO set")
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet(flow.Domain().StagingConsensus(), proofPruningPoint)
if err != nil {
return err
}
if !syncedPruningPointUTXOSetSuccessfully {
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
return nil
}
log.Debugf("Finished syncing the current pruning point UTXO set")
return nil
}
func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error {
log.Infof("Downloading the past pruning points and the pruning point anticone from %s", flow.peer)
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone())
if err != nil {
return err
}
err = flow.validateAndInsertPruningPoints(proofPruningPoint)
if err != nil {
return err
}
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
msgTrustedData, ok := message.(*appmessage.MsgTrustedData)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdTrustedData, message.Command())
}
pruningPointWithMetaData, done, err := flow.receiveBlockWithTrustedData()
if err != nil {
return err
}
if done {
return protocolerrors.Errorf(true, "got `done` message before receiving the pruning point")
}
if !pruningPointWithMetaData.Block.Header.BlockHash().Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "first block with trusted data is not the pruning point")
}
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData, msgTrustedData)
if err != nil {
return err
}
for {
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
if err != nil {
return err
}
if done {
break
}
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData, msgTrustedData)
if err != nil {
return err
}
}
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
return nil
}
func (flow *handleIBDFlow) processBlockWithTrustedData(
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedDataV4, data *appmessage.MsgTrustedData) error {
blockWithTrustedData := &externalapi.BlockWithTrustedData{
Block: appmessage.MsgBlockToDomainBlock(block.Block),
DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(block.DAAWindowIndices)),
GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(block.GHOSTDAGDataIndices)),
}
for _, index := range block.DAAWindowIndices {
blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, appmessage.TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(data.DAAWindow[index]))
}
for _, index := range block.GHOSTDAGDataIndices {
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
}
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
return err
}
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch downCastedMessage := message.(type) {
case *appmessage.MsgBlockWithTrustedDataV4:
return downCastedMessage, false, nil
case *appmessage.MsgDoneBlocksWithTrustedData:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
(&appmessage.MsgBlockWithTrustedData{}).Command(),
(&appmessage.MsgDoneBlocksWithTrustedData{}).Command(),
downCastedMessage.Command())
}
}
func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgPruningPoints, ok := message.(*appmessage.MsgPruningPoints)
if !ok {
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdPruningPoints, message.Command())
}
return msgPruningPoints, nil
}
func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error {
currentPruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return err
}
if currentPruningPoint.Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "the proposed pruning point is the same as the current pruning point")
}
pruningPoints, err := flow.receivePruningPoints()
if err != nil {
return err
}
headers := make([]externalapi.BlockHeader, len(pruningPoints.Headers))
for i, header := range pruningPoints.Headers {
headers[i] = appmessage.BlockHeaderToDomainBlockHeader(header)
}
arePruningPointsViolatingFinality, err := flow.Domain().Consensus().ArePruningPointsViolatingFinality(headers)
if err != nil {
return err
}
if arePruningPointsViolatingFinality {
// TODO: Find a better way to deal with finality conflicts.
return protocolerrors.Errorf(false, "pruning points are violating finality")
}
lastPruningPoint := consensushashing.HeaderHash(headers[len(headers)-1])
if !lastPruningPoint.Equal(proofPruningPoint) {
return protocolerrors.Errorf(true, "the proof pruning point is not equal to the last pruning "+
"point in the list")
}
err = flow.Domain().StagingConsensus().ImportPruningPoints(headers)
if err != nil {
return err
}
return nil
}
func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus,
pruningPoint *externalapi.DomainHash) (bool, error) {
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint)
isValid, err := flow.Domain().StagingConsensus().IsValidPruningPoint(pruningPoint)
if err != nil {
return false, err
}
if !isValid {
return false, protocolerrors.Errorf(true, "invalid pruning point %s", pruningPoint)
}
log.Info("Fetching the pruning point UTXO set")
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
if err != nil {
return false, err
}
if !isSuccessful {
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
return false, nil
}
log.Info("Fetched the new pruning point UTXO set")
return true, nil
}
func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
defer func() {
err := flow.Domain().StagingConsensus().ClearImportedPruningPointData()
if err != nil {
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
}
}()
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSet(pruningPointHash))
if err != nil {
return false, err
}
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(consensus, pruningPointHash)
if err != nil {
return false, err
}
if !receivedAll {
return false, nil
}
err = flow.Domain().StagingConsensus().ValidateAndInsertImportedPruningPoint(pruningPointHash)
if err != nil {
// TODO: Find a better way to deal with finality conflicts.
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
return false, nil
}
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
}
return true, nil
}

View File

@@ -1,194 +0,0 @@
package v4
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/ping"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/rejects"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type protocolManager interface {
RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
messageTypes []appmessage.MessageCommand, isStopping *uint32,
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
Context() *flowcontext.FlowContext
}
// Register is used in order to register all the protocol flows to the given router.
func Register(m protocolManager, router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*common.Flow) {
flows = registerAddressFlows(m, router, isStopping, errChan)
flows = append(flows, registerBlockRelayFlows(m, router, isStopping, errChan)...)
flows = append(flows, registerPingFlows(m, router, isStopping, errChan)...)
flows = append(flows, registerTransactionRelayFlow(m, router, isStopping, errChan)...)
flows = append(flows, registerRejectsFlow(m, router, isStopping, errChan)...)
return flows
}
func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return addressexchange.ReceiveAddresses(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.SendVirtualSelectedParentInv(m.Context(), outgoingRoute, peer)
}),
m.RegisterFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator,
},
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRelayInvs(m.Context(), incomingRoute,
outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBD", router, []appmessage.MessageCommand{
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
appmessage.CmdBlockHeaders, appmessage.CmdIBDBlockLocatorHighestHash, appmessage.CmdBlockWithTrustedDataV4,
appmessage.CmdDoneBlocksWithTrustedData, appmessage.CmdIBDBlockLocatorHighestHashNotFound,
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
appmessage.CmdPruningPointProof,
appmessage.CmdTrustedData,
},
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBD(m.Context(), incomingRoute,
outgoingRoute, peer)
},
),
m.RegisterFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRelayBlockRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleRequestBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestBlockLocator(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestHeaders", router,
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestHeaders(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBDBlockRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBDBlockRequests(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestPruningPointUTXOSet", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSet,
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestPruningPointUTXOSet(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandleIBDBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBDBlockLocator(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
m.RegisterFlow("HandlePruningPointProofRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandlePruningPointProofRequests(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerPingFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return ping.ReceivePings(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return ping.SendPings(m.Context(), incomingRoute, outgoingRoute, peer)
},
),
}
}
func registerTransactionRelayFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return transactionrelay.HandleRelayedTransactions(m.Context(), incomingRoute, outgoingRoute)
},
),
m.RegisterFlow("HandleRequestTransactions", router,
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return transactionrelay.HandleRequestedTransactions(m.Context(), incomingRoute, outgoingRoute)
},
),
}
}
func registerRejectsFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
outgoingRoute := router.OutgoingRoute()
return []*common.Flow{
m.RegisterFlow("HandleRejects", router,
[]appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return rejects.HandleRejects(m.Context(), incomingRoute, outgoingRoute)
},
),
}
}

View File

@@ -1,196 +0,0 @@
package transactionrelay_test
import (
"errors"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
"strings"
"testing"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type mocTransactionsRelayContext struct {
netAdapter *netadapter.NetAdapter
domain domain.Domain
sharedRequestedTransactions *flowcontext.SharedRequestedTransactions
}
func (m *mocTransactionsRelayContext) NetAdapter() *netadapter.NetAdapter {
return m.netAdapter
}
func (m *mocTransactionsRelayContext) Domain() domain.Domain {
return m.domain
}
func (m *mocTransactionsRelayContext) SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions {
return m.sharedRequestedTransactions
}
func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error {
return nil
}
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
}
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
return false
}
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
// have the requested transactions in the mempool.
func TestHandleRelayedTransactionsNotFound(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
var log = logger.RegisterSubSystem("PROT")
var spawn = panics.GoroutineWrapperFunc(log)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRelayedTransactionsNotFound")
if err != nil {
t.Fatalf("Error setting up test consensus: %+v", err)
}
defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil {
t.Fatalf("Failed to create a NetAdapter: %v", err)
}
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
if err != nil {
t.Fatalf("Failed to set up a domain instance: %v", err)
}
context := &mocTransactionsRelayContext{
netAdapter: adapter,
domain: domainInstance,
sharedRequestedTransactions: sharedRequestedTransactions,
}
incomingRoute := router.NewRoute("incoming")
defer incomingRoute.Close()
peerIncomingRoute := router.NewRoute("outgoing")
defer peerIncomingRoute.Close()
txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02})
txIDs := []*externalapi.DomainTransactionID{txID1, txID2}
invMessage := appmessage.NewMsgInvTransaction(txIDs)
err = incomingRoute.Enqueue(invMessage)
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
// The goroutine is representing the peer's actions.
spawn("peerResponseToTheTransactionsRequest", func() {
msg, err := peerIncomingRoute.Dequeue()
if err != nil {
t.Fatalf("Dequeue: %v", err)
}
inv := msg.(*appmessage.MsgRequestTransactions)
if len(txIDs) != len(inv.IDs) {
t.Fatalf("TestHandleRelayedTransactions: expected %d transactions ID, but got %d", len(txIDs), len(inv.IDs))
}
for i, id := range inv.IDs {
if txIDs[i].String() != id.String() {
t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String())
}
err = incomingRoute.Enqueue(appmessage.NewMsgTransactionNotFound(txIDs[i]))
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
}
// Insert an unexpected message type to stop the infinity loop.
err = incomingRoute.Enqueue(&appmessage.MsgAddresses{})
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
})
err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, peerIncomingRoute)
// Since we inserted an unexpected message type to stop the infinity loop,
// we expect the error will be infected from this specific message and also the
// error will count as a protocol message.
if protocolErr := (protocolerrors.ProtocolError{}); err == nil || !errors.As(err, &protocolErr) {
t.Fatalf("Expected to protocol error")
} else {
if !protocolErr.ShouldBan {
t.Fatalf("Exepcted shouldBan true, but got false.")
}
if !strings.Contains(err.Error(), "unexpected Addresses [code 3] message in the block relay flow while expecting an inv message") {
t.Fatalf("Unexpected error: expected: an error due to existence of an Addresses message "+
"in the block relay flow, but got: %v", protocolErr.Cause)
}
}
})
}
// TestOnClosedIncomingRoute verifies that an appropriate error message will be returned when
// trying to dequeue a message from a closed route.
func TestOnClosedIncomingRoute(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestOnClosedIncomingRoute")
if err != nil {
t.Fatalf("Error setting up test consensus: %+v", err)
}
defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil {
t.Fatalf("Failed to creat a NetAdapter : %v", err)
}
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
if err != nil {
t.Fatalf("Failed to set up a domain instance: %v", err)
}
context := &mocTransactionsRelayContext{
netAdapter: adapter,
domain: domainInstance,
sharedRequestedTransactions: sharedRequestedTransactions,
}
incomingRoute := router.NewRoute("incoming")
outgoingRoute := router.NewRoute("outgoing")
defer outgoingRoute.Close()
txID := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
txIDs := []*externalapi.DomainTransactionID{txID}
err = incomingRoute.Enqueue(&appmessage.MsgInvTransaction{TxIDs: txIDs})
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
incomingRoute.Close()
err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, outgoingRoute)
if err == nil || !errors.Is(err, router.ErrRouteClosed) {
t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err)
}
})
}

View File

@@ -1,91 +0,0 @@
package transactionrelay_test
import (
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
"testing"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/panics"
"github.com/pkg/errors"
)
// TestHandleRequestedTransactionsNotFound tests the flow of HandleRequestedTransactions
// when the requested transactions don't found in the mempool.
func TestHandleRequestedTransactionsNotFound(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
var log = logger.RegisterSubSystem("PROT")
var spawn = panics.GoroutineWrapperFunc(log)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRequestedTransactionsNotFound")
if err != nil {
t.Fatalf("Error setting up test Consensus: %+v", err)
}
defer teardown(false)
sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions()
adapter, err := netadapter.NewNetAdapter(config.DefaultConfig())
if err != nil {
t.Fatalf("Failed to create a NetAdapter: %v", err)
}
domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database())
if err != nil {
t.Fatalf("Failed to set up a domain Instance: %v", err)
}
context := &mocTransactionsRelayContext{
netAdapter: adapter,
domain: domainInstance,
sharedRequestedTransactions: sharedRequestedTransactions,
}
incomingRoute := router.NewRoute("incoming")
outgoingRoute := router.NewRoute("outgoing")
defer outgoingRoute.Close()
txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02})
txIDs := []*externalapi.DomainTransactionID{txID1, txID2}
msg := appmessage.NewMsgRequestTransactions(txIDs)
err = incomingRoute.Enqueue(msg)
if err != nil {
t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err)
}
// The goroutine is representing the peer's actions.
spawn("peerResponseToTheTransactionsMessages", func() {
for i, id := range txIDs {
msg, err := outgoingRoute.Dequeue()
if err != nil {
t.Fatalf("Dequeue: %s", err)
}
outMsg := msg.(*appmessage.MsgTransactionNotFound)
if txIDs[i].String() != outMsg.ID.String() {
t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String())
}
}
// Closed the incomingRoute for stop the infinity loop.
incomingRoute.Close()
})
err = transactionrelay.HandleRequestedTransactions(context, incomingRoute, outgoingRoute)
// Make sure the error is due to the closed route.
if err == nil || !errors.Is(err, router.ErrRouteClosed) {
t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err)
}
})
}

View File

@@ -2,12 +2,10 @@ package protocol
import (
"fmt"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/pkg/errors"
"sync"
"sync/atomic"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
@@ -63,8 +61,8 @@ func (m *Manager) IBDPeer() *peerpkg.Peer {
}
// AddTransaction adds transaction to the mempool and propagates it.
func (m *Manager) AddTransaction(tx *externalapi.DomainTransaction, allowOrphan bool) error {
return m.context.AddTransaction(tx, allowOrphan)
func (m *Manager) AddTransaction(tx *externalapi.DomainTransaction) error {
return m.context.AddTransaction(tx)
}
// AddBlock adds the given block to the DAG and propagates it.
@@ -72,16 +70,11 @@ func (m *Manager) AddBlock(block *externalapi.DomainBlock) error {
return m.context.AddBlock(block)
}
// Context returns the manager's flow context
func (m *Manager) Context() *flowcontext.FlowContext {
return m.context
}
func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-chan error, flowsWaitGroup *sync.WaitGroup) error {
func (m *Manager) runFlows(flows []*flow, peer *peerpkg.Peer, errChan <-chan error, flowsWaitGroup *sync.WaitGroup) error {
flowsWaitGroup.Add(len(flows))
for _, flow := range flows {
executeFunc := flow.ExecuteFunc // extract to new variable so that it's not overwritten
spawn(fmt.Sprintf("flow-%s", flow.Name), func() {
executeFunc := flow.executeFunc // extract to new variable so that it's not overwritten
spawn(fmt.Sprintf("flow-%s", flow.name), func() {
executeFunc(peer)
flowsWaitGroup.Done()
})
@@ -90,11 +83,6 @@ func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-c
return <-errChan
}
// SetOnVirtualChange sets the onVirtualChangeHandler handler
func (m *Manager) SetOnVirtualChange(onVirtualChangeHandler flowcontext.OnVirtualChangeHandler) {
m.context.SetOnVirtualChangeHandler(onVirtualChangeHandler)
}
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowcontext.OnBlockAddedToDAGHandler) {
m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)

View File

@@ -13,6 +13,10 @@ import (
"github.com/kaspanet/kaspad/util/mstime"
)
// maxProtocolVersion version is the maximum supported protocol
// version this kaspad node supports
const maxProtocolVersion = 1
// Peer holds data about a peer.
type Peer struct {
connection *netadapter.NetConnection
@@ -31,8 +35,6 @@ type Peer struct {
lastPingNonce uint64 // The nonce of the last ping we sent
lastPingTime time.Time // Time we sent last ping
lastPingDuration time.Duration // Time for last ping to return
ibdRequestChannel chan *externalapi.DomainBlock // A channel used to communicate IBD requests between flows
}
// New returns a new Peer
@@ -40,7 +42,6 @@ func New(connection *netadapter.NetConnection) *Peer {
return &Peer{
connection: connection,
connectionStarted: time.Now(),
ibdRequestChannel: make(chan *externalapi.DomainBlock),
}
}
@@ -75,11 +76,6 @@ func (p *Peer) AdvertisedProtocolVersion() uint32 {
return p.advertisedProtocolVerion
}
// ProtocolVersion returns the protocol version which is used when communicating with the peer.
func (p *Peer) ProtocolVersion() uint32 {
return p.protocolVersion
}
// TimeConnected returns the time since the connection to this been has been started.
func (p *Peer) TimeConnected() time.Duration {
return time.Since(p.connectionStarted)
@@ -91,7 +87,7 @@ func (p *Peer) IsOutbound() bool {
}
// UpdateFieldsFromMsgVersion updates the peer with the data from the version message.
func (p *Peer) UpdateFieldsFromMsgVersion(msg *appmessage.MsgVersion, maxProtocolVersion uint32) {
func (p *Peer) UpdateFieldsFromMsgVersion(msg *appmessage.MsgVersion) {
// Negotiate the protocol version.
p.advertisedProtocolVerion = msg.ProtocolVersion
p.protocolVersion = mathUtil.MinUint32(maxProtocolVersion, p.advertisedProtocolVerion)
@@ -146,8 +142,3 @@ func (p *Peer) LastPingDuration() time.Duration {
return p.lastPingDuration
}
// IBDRequestChannel returns the channel used in order to communicate an IBD request between peer flows
func (p *Peer) IBDRequestChannel() chan *externalapi.DomainBlock {
return p.ibdRequestChannel
}

View File

@@ -1,23 +1,33 @@
package protocol
import (
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
v4 "github.com/kaspanet/kaspad/app/protocol/flows/v4"
"github.com/kaspanet/kaspad/app/protocol/flows/rejects"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"sync"
"sync/atomic"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/addressexchange"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/handshake"
"github.com/kaspanet/kaspad/app/protocol/flows/ping"
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
type flowInitializeFunc func(route *routerpkg.Route, peer *peerpkg.Peer) error
type flowExecuteFunc func(peer *peerpkg.Peer)
type flow struct {
name string
executeFunc flowExecuteFunc
}
func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *netadapter.NetConnection) {
// isStopping flag is raised the moment that the connection associated with this router is disconnected
// errChan is used by the flow goroutines to return to runFlows when an error occurs.
@@ -25,7 +35,8 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
isStopping := uint32(0)
errChan := make(chan error)
receiveVersionRoute, sendVersionRoute, receiveReadyRoute := registerHandshakeRoutes(router)
flows := m.registerFlows(router, errChan, &isStopping)
receiveVersionRoute, sendVersionRoute := registerHandshakeRoutes(router)
// After flows were registered - spawn a new thread that will wait for connection to finish initializing
// and start receiving messages
@@ -73,21 +84,6 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
}
defer m.context.RemoveFromPeers(peer)
var flows []*common.Flow
log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion())
switch peer.ProtocolVersion() {
case 4:
flows = v4.Register(m, router, errChan, &isStopping)
default:
panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion()))
}
err = ready.HandleReady(receiveReadyRoute, router.OutgoingRoute(), peer)
if err != nil {
m.handleError(err, netConnection, router.OutgoingRoute())
return
}
removeHandshakeRoutes(router)
flowsWaitGroup := &sync.WaitGroup{}
@@ -106,11 +102,11 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection, outgoingRoute *routerpkg.Route) {
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
if m.context.Config().EnableBanning && protocolErr.ShouldBan {
if !m.context.Config().DisableBanning && protocolErr.ShouldBan {
log.Warnf("Banning %s (reason: %s)", netConnection, protocolErr.Cause)
err := m.context.ConnectionManager().Ban(netConnection)
if err != nil && !errors.Is(err, connmanager.ErrCannotBanPermanent) {
if !errors.Is(err, connmanager.ErrCannotBanPermanent) {
panic(err)
}
@@ -134,11 +130,159 @@ func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection
panic(err)
}
// RegisterFlow registers a flow to the given router.
func (m *Manager) RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow {
func (m *Manager) registerFlows(router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*flow) {
flows = m.registerAddressFlows(router, isStopping, errChan)
flows = append(flows, m.registerBlockRelayFlows(router, isStopping, errChan)...)
flows = append(flows, m.registerPingFlows(router, isStopping, errChan)...)
flows = append(flows, m.registerTransactionRelayFlow(router, isStopping, errChan)...)
flows = append(flows, m.registerRejectsFlow(router, isStopping, errChan)...)
route, err := router.AddIncomingRoute(name, messageTypes)
return flows
}
func (m *Manager) registerAddressFlows(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
outgoingRoute := router.OutgoingRoute()
return []*flow{
m.registerFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return addressexchange.SendAddresses(m.context, incomingRoute, outgoingRoute)
},
),
m.registerOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return addressexchange.ReceiveAddresses(m.context, incomingRoute, outgoingRoute, peer)
},
),
}
}
func (m *Manager) registerBlockRelayFlows(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
outgoingRoute := router.OutgoingRoute()
return []*flow{
m.registerOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.SendVirtualSelectedParentInv(m.context, outgoingRoute, peer)
}),
m.registerFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator, appmessage.CmdIBDBlock,
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
appmessage.CmdBlockHeaders, appmessage.CmdPruningPointHash, appmessage.CmdIBDBlockLocatorHighestHash,
appmessage.CmdIBDBlockLocatorHighestHashNotFound, appmessage.CmdDonePruningPointUTXOSetChunks},
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRelayInvs(m.context, incomingRoute,
outgoingRoute, peer)
},
),
m.registerFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRelayBlockRequests(m.context, incomingRoute, outgoingRoute, peer)
},
),
m.registerFlow("HandleRequestBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestBlockLocator(m.context, incomingRoute, outgoingRoute)
},
),
m.registerFlow("HandleRequestHeaders", router,
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestHeaders(m.context, incomingRoute, outgoingRoute, peer)
},
),
m.registerFlow("HandleRequestPruningPointUTXOSetAndBlock", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSetAndBlock,
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestPruningPointUTXOSetAndBlock(m.context, incomingRoute, outgoingRoute)
},
),
m.registerFlow("HandleIBDBlockRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBDBlockRequests(m.context, incomingRoute, outgoingRoute)
},
),
m.registerFlow("HandlePruningPointHashRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointHash}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandlePruningPointHashRequests(m.context, incomingRoute, outgoingRoute)
},
),
m.registerFlow("HandleIBDBlockLocator", router,
[]appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBDBlockLocator(m.context, incomingRoute, outgoingRoute, peer)
},
),
}
}
func (m *Manager) registerPingFlows(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
outgoingRoute := router.OutgoingRoute()
return []*flow{
m.registerFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return ping.ReceivePings(m.context, incomingRoute, outgoingRoute)
},
),
m.registerFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return ping.SendPings(m.context, incomingRoute, outgoingRoute, peer)
},
),
}
}
func (m *Manager) registerTransactionRelayFlow(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
outgoingRoute := router.OutgoingRoute()
return []*flow{
m.registerFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return transactionrelay.HandleRelayedTransactions(m.context, incomingRoute, outgoingRoute)
},
),
m.registerFlow("HandleRequestTransactions", router,
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return transactionrelay.HandleRequestedTransactions(m.context, incomingRoute, outgoingRoute)
},
),
}
}
func (m *Manager) registerRejectsFlow(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
outgoingRoute := router.OutgoingRoute()
return []*flow{
m.registerFlow("HandleRejects", router,
[]appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return rejects.HandleRejects(m.context, incomingRoute, outgoingRoute)
},
),
}
}
func (m *Manager) registerFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
errChan chan error, initializeFunc flowInitializeFunc) *flow {
route, err := router.AddIncomingRoute(messageTypes)
if err != nil {
panic(err)
}
@@ -146,12 +290,11 @@ func (m *Manager) RegisterFlow(name string, router *routerpkg.Router, messageTyp
return m.registerFlowForRoute(route, name, isStopping, errChan, initializeFunc)
}
// RegisterFlowWithCapacity registers a flow to the given router with a custom capacity.
func (m *Manager) RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
func (m *Manager) registerFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
messageTypes []appmessage.MessageCommand, isStopping *uint32,
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow {
errChan chan error, initializeFunc flowInitializeFunc) *flow {
route, err := router.AddIncomingRouteWithCapacity(name, capacity, messageTypes)
route, err := router.AddIncomingRouteWithCapacity(capacity, messageTypes)
if err != nil {
panic(err)
}
@@ -160,11 +303,11 @@ func (m *Manager) RegisterFlowWithCapacity(name string, capacity int, router *ro
}
func (m *Manager) registerFlowForRoute(route *routerpkg.Route, name string, isStopping *uint32,
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow {
errChan chan error, initializeFunc flowInitializeFunc) *flow {
return &common.Flow{
Name: name,
ExecuteFunc: func(peer *peerpkg.Peer) {
return &flow{
name: name,
executeFunc: func(peer *peerpkg.Peer) {
err := initializeFunc(route, peer)
if err != nil {
m.context.HandleError(err, name, isStopping, errChan)
@@ -174,18 +317,17 @@ func (m *Manager) registerFlowForRoute(route *routerpkg.Route, name string, isSt
}
}
// RegisterOneTimeFlow registers a one-time flow (that exits once some operations are done) to the given router.
func (m *Manager) RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow {
func (m *Manager) registerOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
isStopping *uint32, stopChan chan error, initializeFunc flowInitializeFunc) *flow {
route, err := router.AddIncomingRoute(name, messageTypes)
route, err := router.AddIncomingRoute(messageTypes)
if err != nil {
panic(err)
}
return &common.Flow{
Name: name,
ExecuteFunc: func(peer *peerpkg.Peer) {
return &flow{
name: name,
executeFunc: func(peer *peerpkg.Peer) {
defer func() {
err := router.RemoveRoute(messageTypes)
if err != nil {
@@ -203,27 +345,22 @@ func (m *Manager) RegisterOneTimeFlow(name string, router *routerpkg.Router, mes
}
func registerHandshakeRoutes(router *routerpkg.Router) (
receiveVersionRoute, sendVersionRoute, receiveReadyRoute *routerpkg.Route) {
receiveVersionRoute, err := router.AddIncomingRoute("recieveVersion - incoming", []appmessage.MessageCommand{appmessage.CmdVersion})
receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route) {
receiveVersionRoute, err := router.AddIncomingRoute([]appmessage.MessageCommand{appmessage.CmdVersion})
if err != nil {
panic(err)
}
sendVersionRoute, err = router.AddIncomingRoute("sendVersion - incoming", []appmessage.MessageCommand{appmessage.CmdVerAck})
sendVersionRoute, err = router.AddIncomingRoute([]appmessage.MessageCommand{appmessage.CmdVerAck})
if err != nil {
panic(err)
}
receiveReadyRoute, err = router.AddIncomingRoute("recieveReady - incoming", []appmessage.MessageCommand{appmessage.CmdReady})
if err != nil {
panic(err)
}
return receiveVersionRoute, sendVersionRoute, receiveReadyRoute
return receiveVersionRoute, sendVersionRoute
}
func removeHandshakeRoutes(router *routerpkg.Router) {
err := router.RemoveRoute([]appmessage.MessageCommand{appmessage.CmdVersion, appmessage.CmdVerAck, appmessage.CmdReady})
err := router.RemoveRoute([]appmessage.MessageCommand{appmessage.CmdVersion, appmessage.CmdVerAck})
if err != nil {
panic(err)
}

View File

@@ -48,31 +48,12 @@ func NewManager(
}
// NotifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
defer onEnd()
err := m.NotifyVirtualChange(virtualChangeSet)
if err != nil {
return err
}
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
if err != nil {
return err
}
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
}
// NotifyVirtualChange notifies the manager that the virtual block has been changed.
func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
defer onEnd()
if m.context.Config.UTXOIndex {
err := m.notifyUTXOsChanged(virtualChangeSet)
err := m.notifyUTXOsChanged(blockInsertionResult)
if err != nil {
return err
}
@@ -88,12 +69,18 @@ func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChang
return err
}
err = m.notifyVirtualSelectedParentChainChanged(virtualChangeSet)
err = m.notifyVirtualSelectedParentChainChanged(blockInsertionResult)
if err != nil {
return err
}
return nil
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
if err != nil {
return err
}
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
}
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
@@ -130,11 +117,11 @@ func (m *Manager) NotifyFinalityConflictResolved(finalityBlockHash string) error
return m.context.NotificationManager.NotifyFinalityConflictResolved(notification)
}
func (m *Manager) notifyUTXOsChanged(virtualChangeSet *externalapi.VirtualChangeSet) error {
func (m *Manager) notifyUTXOsChanged(blockInsertionResult *externalapi.BlockInsertionResult) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyUTXOsChanged")
defer onEnd()
utxoIndexChanges, err := m.context.UTXOIndex.Update(virtualChangeSet)
utxoIndexChanges, err := m.context.UTXOIndex.Update(blockInsertionResult)
if err != nil {
return err
}
@@ -175,21 +162,21 @@ func (m *Manager) notifyVirtualDaaScoreChanged() error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualDaaScoreChanged")
defer onEnd()
virtualDAAScore, err := m.context.Domain.Consensus().GetVirtualDAAScore()
virtualInfo, err := m.context.Domain.Consensus().GetVirtualInfo()
if err != nil {
return err
}
notification := appmessage.NewVirtualDaaScoreChangedNotificationMessage(virtualDAAScore)
notification := appmessage.NewVirtualDaaScoreChangedNotificationMessage(virtualInfo.DAAScore)
return m.context.NotificationManager.NotifyVirtualDaaScoreChanged(notification)
}
func (m *Manager) notifyVirtualSelectedParentChainChanged(virtualChangeSet *externalapi.VirtualChangeSet) error {
func (m *Manager) notifyVirtualSelectedParentChainChanged(blockInsertionResult *externalapi.BlockInsertionResult) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged")
defer onEnd()
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
virtualChangeSet.VirtualSelectedParentChainChanges)
blockInsertionResult.VirtualSelectedParentChainChanges)
if err != nil {
return err
}

View File

@@ -28,7 +28,6 @@ var handlers = map[appmessage.MessageCommand]handler{
appmessage.CmdGetVirtualSelectedParentChainFromBlockRequestMessage: rpchandlers.HandleGetVirtualSelectedParentChainFromBlock,
appmessage.CmdGetBlocksRequestMessage: rpchandlers.HandleGetBlocks,
appmessage.CmdGetBlockCountRequestMessage: rpchandlers.HandleGetBlockCount,
appmessage.CmdGetBalanceByAddressRequestMessage: rpchandlers.HandleGetBalanceByAddress,
appmessage.CmdGetBlockDAGInfoRequestMessage: rpchandlers.HandleGetBlockDAGInfo,
appmessage.CmdResolveFinalityConflictRequestMessage: rpchandlers.HandleResolveFinalityConflict,
appmessage.CmdNotifyFinalityConflictsRequestMessage: rpchandlers.HandleNotifyFinalityConflicts,
@@ -38,7 +37,6 @@ var handlers = map[appmessage.MessageCommand]handler{
appmessage.CmdNotifyUTXOsChangedRequestMessage: rpchandlers.HandleNotifyUTXOsChanged,
appmessage.CmdStopNotifyingUTXOsChangedRequestMessage: rpchandlers.HandleStopNotifyingUTXOsChanged,
appmessage.CmdGetUTXOsByAddressesRequestMessage: rpchandlers.HandleGetUTXOsByAddresses,
appmessage.CmdGetBalancesByAddressesRequestMessage: rpchandlers.HandleGetBalancesByAddresses,
appmessage.CmdGetVirtualSelectedParentBlueScoreRequestMessage: rpchandlers.HandleGetVirtualSelectedParentBlueScore,
appmessage.CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualSelectedParentBlueScoreChanged,
appmessage.CmdBanRequestMessage: rpchandlers.HandleBan,
@@ -46,7 +44,6 @@ var handlers = map[appmessage.MessageCommand]handler{
appmessage.CmdGetInfoRequestMessage: rpchandlers.HandleGetInfo,
appmessage.CmdNotifyPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleNotifyPruningPointUTXOSetOverrideRequest,
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond,
appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged,
}
@@ -55,7 +52,7 @@ func (m *Manager) routerInitializer(router *router.Router, netConnection *netada
for messageType := range handlers {
messageTypes = append(messageTypes, messageType)
}
incomingRoute, err := router.AddIncomingRoute("rpc router", messageTypes)
incomingRoute, err := router.AddIncomingRoute(messageTypes)
if err != nil {
panic(err)
}

View File

@@ -3,6 +3,7 @@ package rpccontext
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
)
// ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage converts
@@ -15,9 +16,29 @@ func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotifi
removedChainBlockHashes[i] = removed.String()
}
addedChainBlocks := make([]string, len(selectedParentChainChanges.Added))
addedChainBlocks := make([]*appmessage.ChainBlock, len(selectedParentChainChanges.Added))
for i, added := range selectedParentChainChanges.Added {
addedChainBlocks[i] = added.String()
acceptanceData, err := ctx.Domain.Consensus().GetBlockAcceptanceData(added)
if err != nil {
return nil, err
}
acceptedBlocks := make([]*appmessage.AcceptedBlock, len(acceptanceData))
for j, acceptedBlock := range acceptanceData {
acceptedTransactionIDs := make([]string, len(acceptedBlock.TransactionAcceptanceData))
for k, transaction := range acceptedBlock.TransactionAcceptanceData {
transactionID := consensushashing.TransactionID(transaction.Transaction)
acceptedTransactionIDs[k] = transactionID.String()
}
acceptedBlocks[j] = &appmessage.AcceptedBlock{
Hash: acceptedBlock.BlockHash.String(),
AcceptedTransactionIDs: acceptedTransactionIDs,
}
}
addedChainBlocks[i] = &appmessage.ChainBlock{
Hash: added.String(),
AcceptedBlocks: acceptedBlocks,
}
}
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes, addedChainBlocks), nil

View File

@@ -10,6 +10,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
"github.com/kaspanet/kaspad/domain/consensus/utils/estimatedsize"
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/kaspanet/kaspad/app/appmessage"
@@ -56,29 +57,21 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
"invalid block")
}
_, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
if err != nil {
return err
}
isChainBlock, err := ctx.Domain.Consensus().IsChainBlock(blockHash)
_, selectedParentHash, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
if err != nil {
return err
}
block.VerboseData = &appmessage.RPCBlockVerboseData{
Hash: blockHash.String(),
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
ChildrenHashes: hashes.ToStrings(childrenHashes),
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
BlueScore: blockInfo.BlueScore,
MergeSetBluesHashes: hashes.ToStrings(blockInfo.MergeSetBlues),
MergeSetRedsHashes: hashes.ToStrings(blockInfo.MergeSetReds),
IsChainBlock: isChainBlock,
Hash: blockHash.String(),
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
ChildrenHashes: hashes.ToStrings(childrenHashes),
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
BlueScore: blockInfo.BlueScore,
}
// selectedParentHash will be nil in the genesis block
if blockInfo.SelectedParent != nil {
block.VerboseData.SelectedParentHash = blockInfo.SelectedParent.String()
if selectedParentHash != nil {
block.VerboseData.SelectedParentHash = selectedParentHash.String()
}
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
@@ -121,11 +114,10 @@ func (ctx *Context) PopulateTransactionWithVerboseData(
return err
}
ctx.Domain.Consensus().PopulateMass(domainTransaction)
transaction.VerboseData = &appmessage.RPCTransactionVerboseData{
TransactionID: consensushashing.TransactionID(domainTransaction).String(),
Hash: consensushashing.TransactionHash(domainTransaction).String(),
Mass: domainTransaction.Mass,
Size: estimatedsize.TransactionEstimatedSerializedSize(domainTransaction),
}
if domainBlockHeader != nil {
transaction.VerboseData.BlockHash = consensushashing.HeaderHash(domainBlockHeader).String()

View File

@@ -12,12 +12,8 @@ func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage
banRequest := request.(*appmessage.BanRequestMessage)
ip := net.ParseIP(banRequest.IP)
if ip == nil {
hint := ""
if banRequest.IP[0] == '[' {
hint = " (try to remove “[” and “]” symbols)"
}
errorMessage := &appmessage.BanResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Could not parse IP%s: %s", hint, banRequest.IP)
errorMessage.Error = appmessage.RPCErrorf("Could not parse IP %s", banRequest.IP)
return errorMessage, nil
}

Some files were not shown because too many files have changed in this diff Show More