mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 19:22:53 +00:00
Compare commits
7 Commits
v0.10.1-de
...
optimize-u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d5f2495522 | ||
|
|
8588774837 | ||
|
|
94c3f4b80c | ||
|
|
a5a84e9215 | ||
|
|
1cec4c91cf | ||
|
|
3c0b74208a | ||
|
|
08d983b84a |
77
.github/workflows/go-deploy.yml
vendored
77
.github/workflows/go-deploy.yml
vendored
@@ -1,77 +0,0 @@
|
||||
name: Build and Upload assets
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest, windows-latest, macos-latest ]
|
||||
name: Building For ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Fix windows CRLF
|
||||
run: git config --global core.autocrlf false
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# We need to increase the page size because the tests run out of memory on github CI windows.
|
||||
# Use the powershell script from this github action: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
|
||||
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
|
||||
- name: Increase page size on windows
|
||||
if: runner.os == 'Windows'
|
||||
shell: powershell
|
||||
run: powershell -command .\.github\workflows\SetPageFileSize.ps1
|
||||
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- name: Build on linux
|
||||
if: runner.os == 'Linux'
|
||||
# `-extldflags=-static` - means static link everything, `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
|
||||
# `-s -w` strips the binary to produce smaller size binaries
|
||||
run: |
|
||||
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build on Windows
|
||||
if: runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o bin/ ./...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
powershell "Compress-Archive bin/* \"${archive}\""
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build on MacOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o ./bin/ ./...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Upload Release Asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ github.event.release.upload_url }}
|
||||
asset_path: "./${{ env.archive }}"
|
||||
asset_name: "${{ env.asset_name }}"
|
||||
asset_content_type: application/zip
|
||||
49
.github/workflows/go-race.yml
vendored
49
.github/workflows/go-race.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: Go-Race
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
race_test:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
branch: [ master, latest ]
|
||||
name: Race detection on ${{ matrix.branch }}
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15
|
||||
|
||||
- name: Set scheduled branch name
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
if [ "${{ matrix.branch }}" == "master" ]; then
|
||||
echo "run_on=master" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ "${{ matrix.branch }}" == "latest" ]; then
|
||||
branch=$(git branch -r | grep 'v\([0-9]\+\.\)\([0-9]\+\.\)\([0-9]\+\)-dev' | sort -Vr | head -1 | xargs)
|
||||
echo "run_on=${branch}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Set manual branch name
|
||||
shell: bash
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: echo "run_on=${{ github.ref }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Test with race detector
|
||||
shell: bash
|
||||
run: |
|
||||
git checkout "${{ env.run_on }}"
|
||||
git status
|
||||
go test -race ./...
|
||||
15
.github/workflows/go.yml
vendored
15
.github/workflows/go.yml
vendored
@@ -11,9 +11,8 @@ jobs:
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-16.04, macos-10.15, windows-2019 ]
|
||||
os: [ ubuntu-16.04, macos-10.15 ]
|
||||
name: Testing on on ${{ matrix.os }}
|
||||
steps:
|
||||
|
||||
@@ -35,7 +34,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.14
|
||||
|
||||
|
||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||
@@ -49,7 +48,7 @@ jobs:
|
||||
|
||||
- name: Test
|
||||
shell: bash
|
||||
run: ./build_and_test.sh -v
|
||||
run: ./build_and_test.sh
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -61,13 +60,11 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- name: Delete the stability tests from coverage
|
||||
run: rm -r stability-tests
|
||||
go-version: 1.14
|
||||
|
||||
- name: Create coverage file
|
||||
run: go test -v -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./...
|
||||
# Because of https://github.com/golang/go/issues/27333 this seem to "fail" even though nothing is wrong, so ignore the failure
|
||||
run: go test -json -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./... || true
|
||||
|
||||
- name: Upload coverage file
|
||||
run: bash <(curl -s https://codecov.io/bash)
|
||||
@@ -18,7 +18,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.14 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -56,7 +56,7 @@ $ kaspad
|
||||
```
|
||||
|
||||
## Discord
|
||||
Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
|
||||
Join our discord server using the following link: https://discord.gg/WmGhhzk
|
||||
|
||||
## Issue Tracker
|
||||
|
||||
@@ -65,7 +65,7 @@ is used for this project.
|
||||
|
||||
## Documentation
|
||||
|
||||
The [documentation](https://github.com/kaspanet/docs) is a work-in-progress
|
||||
The documentation is a work-in-progress.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
44
app/app.go
44
app/app.go
@@ -7,20 +7,20 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/execenv"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/limits"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/signal"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/winservice"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
"github.com/kaspanet/kaspad/util/profiling"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
)
|
||||
|
||||
const leveldbCacheSizeMiB = 256
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/execenv"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/limits"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/winservice"
|
||||
)
|
||||
|
||||
var desiredLimits = &limits.DesiredLimits{
|
||||
FileLimitWant: 2048,
|
||||
@@ -49,7 +49,6 @@ func StartApp() error {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return err
|
||||
}
|
||||
defer logger.BackendLog.Close()
|
||||
defer panics.HandlePanic(log, "MAIN", nil)
|
||||
|
||||
app := &kaspadApp{cfg: cfg}
|
||||
@@ -85,6 +84,12 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
|
||||
profiling.Start(app.cfg.Profile, log)
|
||||
}
|
||||
|
||||
// Perform upgrades to kaspad as new versions require it.
|
||||
if err := doUpgrades(); err != nil {
|
||||
log.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Return now if an interrupt signal was triggered.
|
||||
if signal.InterruptRequested(interrupt) {
|
||||
return nil
|
||||
@@ -157,9 +162,15 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// doUpgrades performs upgrades to kaspad as new versions require it.
|
||||
// currently it's a placeholder we got from kaspad upstream, that does nothing
|
||||
func doUpgrades() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbPath returns the path to the block database given a database type.
|
||||
func databasePath(cfg *config.Config) string {
|
||||
return filepath.Join(cfg.AppDir, "data")
|
||||
return filepath.Join(cfg.DataDir, "db")
|
||||
}
|
||||
|
||||
func removeDatabase(cfg *config.Config) error {
|
||||
@@ -169,17 +180,6 @@ func removeDatabase(cfg *config.Config) error {
|
||||
|
||||
func openDB(cfg *config.Config) (database.Database, error) {
|
||||
dbPath := databasePath(cfg)
|
||||
|
||||
err := checkDatabaseVersion(dbPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("Loading database from '%s'", dbPath)
|
||||
db, err := ldb.NewLevelDB(dbPath, leveldbCacheSizeMiB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
return ldb.NewLevelDB(dbPath)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package appmessage
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@@ -84,6 +83,7 @@ func DomainTransactionToMsgTx(domainTransaction *externalapi.DomainTransaction)
|
||||
LockTime: domainTransaction.LockTime,
|
||||
SubnetworkID: domainTransaction.SubnetworkID,
|
||||
Gas: domainTransaction.Gas,
|
||||
PayloadHash: domainTransaction.PayloadHash,
|
||||
Payload: domainTransaction.Payload,
|
||||
}
|
||||
}
|
||||
@@ -133,6 +133,7 @@ func MsgTxToDomainTransaction(msgTx *MsgTx) *externalapi.DomainTransaction {
|
||||
LockTime: msgTx.LockTime,
|
||||
SubnetworkID: msgTx.SubnetworkID,
|
||||
Gas: msgTx.Gas,
|
||||
PayloadHash: msgTx.PayloadHash,
|
||||
Payload: payload,
|
||||
}
|
||||
}
|
||||
@@ -163,7 +164,11 @@ func outpointToDomainOutpoint(outpoint *Outpoint) *externalapi.DomainOutpoint {
|
||||
func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externalapi.DomainTransaction, error) {
|
||||
inputs := make([]*externalapi.DomainTransactionInput, len(rpcTransaction.Inputs))
|
||||
for i, input := range rpcTransaction.Inputs {
|
||||
transactionID, err := transactionid.FromString(input.PreviousOutpoint.TransactionID)
|
||||
transactionIDBytes, err := hex.DecodeString(input.PreviousOutpoint.TransactionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactionID, err := transactionid.FromBytes(transactionIDBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -193,7 +198,19 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
||||
}
|
||||
}
|
||||
|
||||
subnetworkID, err := subnetworks.FromString(rpcTransaction.SubnetworkID)
|
||||
subnetworkIDBytes, err := hex.DecodeString(rpcTransaction.SubnetworkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subnetworkID, err := subnetworks.FromBytes(subnetworkIDBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHashBytes, err := hex.DecodeString(rpcTransaction.PayloadHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHash, err := externalapi.NewDomainHashFromByteSlice(payloadHashBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -209,6 +226,7 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
||||
LockTime: rpcTransaction.LockTime,
|
||||
SubnetworkID: *subnetworkID,
|
||||
Gas: rpcTransaction.LockTime,
|
||||
PayloadHash: *payloadHash,
|
||||
Payload: payload,
|
||||
}, nil
|
||||
}
|
||||
@@ -237,7 +255,8 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
||||
ScriptPublicKey: &RPCScriptPublicKey{Script: scriptPublicKey, Version: output.ScriptPublicKey.Version},
|
||||
}
|
||||
}
|
||||
subnetworkID := transaction.SubnetworkID.String()
|
||||
subnetworkID := hex.EncodeToString(transaction.SubnetworkID[:])
|
||||
payloadHash := transaction.PayloadHash.String()
|
||||
payload := hex.EncodeToString(transaction.Payload)
|
||||
return &RPCTransaction{
|
||||
Version: transaction.Version,
|
||||
@@ -246,6 +265,7 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
||||
LockTime: transaction.LockTime,
|
||||
SubnetworkID: subnetworkID,
|
||||
Gas: transaction.LockTime,
|
||||
PayloadHash: payloadHash,
|
||||
Payload: payload,
|
||||
}
|
||||
}
|
||||
@@ -266,7 +286,7 @@ func OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(
|
||||
outpointAndUTXOEntryPair.UTXOEntry.Amount,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.BlockBlueScore,
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -289,76 +309,9 @@ func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
|
||||
Amount: outpointAndUTXOEntryPair.UTXOEntry.Amount(),
|
||||
ScriptPublicKey: outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey(),
|
||||
IsCoinbase: outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase(),
|
||||
BlockDAAScore: outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore(),
|
||||
BlockBlueScore: outpointAndUTXOEntryPair.UTXOEntry.BlockBlueScore(),
|
||||
},
|
||||
}
|
||||
}
|
||||
return domainOutpointAndUTXOEntryPairs
|
||||
}
|
||||
|
||||
// DomainBlockToRPCBlock converts DomainBlocks to RPCBlocks
|
||||
func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock {
|
||||
header := &RPCBlockHeader{
|
||||
Version: uint32(block.Header.Version()),
|
||||
ParentHashes: hashes.ToStrings(block.Header.ParentHashes()),
|
||||
HashMerkleRoot: block.Header.HashMerkleRoot().String(),
|
||||
AcceptedIDMerkleRoot: block.Header.AcceptedIDMerkleRoot().String(),
|
||||
UTXOCommitment: block.Header.UTXOCommitment().String(),
|
||||
Timestamp: block.Header.TimeInMilliseconds(),
|
||||
Bits: block.Header.Bits(),
|
||||
Nonce: block.Header.Nonce(),
|
||||
}
|
||||
transactions := make([]*RPCTransaction, len(block.Transactions))
|
||||
for i, transaction := range block.Transactions {
|
||||
transactions[i] = DomainTransactionToRPCTransaction(transaction)
|
||||
}
|
||||
return &RPCBlock{
|
||||
Header: header,
|
||||
Transactions: transactions,
|
||||
}
|
||||
}
|
||||
|
||||
// RPCBlockToDomainBlock converts `block` into a DomainBlock
|
||||
func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
|
||||
parentHashes := make([]*externalapi.DomainHash, len(block.Header.ParentHashes))
|
||||
for i, parentHash := range block.Header.ParentHashes {
|
||||
domainParentHashes, err := externalapi.NewDomainHashFromString(parentHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentHashes[i] = domainParentHashes
|
||||
}
|
||||
hashMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.HashMerkleRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
acceptedIDMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.AcceptedIDMerkleRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoCommitment, err := externalapi.NewDomainHashFromString(block.Header.UTXOCommitment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header := blockheader.NewImmutableBlockHeader(
|
||||
uint16(block.Header.Version),
|
||||
parentHashes,
|
||||
hashMerkleRoot,
|
||||
acceptedIDMerkleRoot,
|
||||
utxoCommitment,
|
||||
block.Header.Timestamp,
|
||||
block.Header.Bits,
|
||||
block.Header.Nonce)
|
||||
transactions := make([]*externalapi.DomainTransaction, len(block.Transactions))
|
||||
for i, transaction := range block.Transactions {
|
||||
domainTransaction, err := RPCTransactionToDomainTransaction(transaction)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions[i] = domainTransaction
|
||||
}
|
||||
return &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: transactions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -59,7 +59,6 @@ const (
|
||||
CmdPruningPointHash
|
||||
CmdIBDBlockLocator
|
||||
CmdIBDBlockLocatorHighestHash
|
||||
CmdIBDBlockLocatorHighestHashNotFound
|
||||
CmdBlockHeaders
|
||||
CmdRequestNextPruningPointUTXOSetChunk
|
||||
CmdDonePruningPointUTXOSetChunks
|
||||
@@ -117,8 +116,6 @@ const (
|
||||
CmdNotifyUTXOsChangedRequestMessage
|
||||
CmdNotifyUTXOsChangedResponseMessage
|
||||
CmdUTXOsChangedNotificationMessage
|
||||
CmdStopNotifyingUTXOsChangedRequestMessage
|
||||
CmdStopNotifyingUTXOsChangedResponseMessage
|
||||
CmdGetUTXOsByAddressesRequestMessage
|
||||
CmdGetUTXOsByAddressesResponseMessage
|
||||
CmdGetVirtualSelectedParentBlueScoreRequestMessage
|
||||
@@ -126,17 +123,6 @@ const (
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage
|
||||
CmdVirtualSelectedParentBlueScoreChangedNotificationMessage
|
||||
CmdBanRequestMessage
|
||||
CmdBanResponseMessage
|
||||
CmdUnbanRequestMessage
|
||||
CmdUnbanResponseMessage
|
||||
CmdGetInfoRequestMessage
|
||||
CmdGetInfoResponseMessage
|
||||
CmdNotifyPruningPointUTXOSetOverrideRequestMessage
|
||||
CmdNotifyPruningPointUTXOSetOverrideResponseMessage
|
||||
CmdPruningPointUTXOSetOverrideNotificationMessage
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage
|
||||
)
|
||||
|
||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -170,7 +156,6 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdPruningPointHash: "PruningPointHash",
|
||||
CmdIBDBlockLocator: "IBDBlockLocator",
|
||||
CmdIBDBlockLocatorHighestHash: "IBDBlockLocatorHighestHash",
|
||||
CmdIBDBlockLocatorHighestHashNotFound: "IBDBlockLocatorHighestHashNotFound",
|
||||
CmdBlockHeaders: "BlockHeaders",
|
||||
CmdRequestNextPruningPointUTXOSetChunk: "RequestNextPruningPointUTXOSetChunk",
|
||||
CmdDonePruningPointUTXOSetChunks: "DonePruningPointUTXOSetChunks",
|
||||
@@ -228,8 +213,6 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdNotifyUTXOsChangedRequestMessage: "NotifyUTXOsChangedRequest",
|
||||
CmdNotifyUTXOsChangedResponseMessage: "NotifyUTXOsChangedResponse",
|
||||
CmdUTXOsChangedNotificationMessage: "UTXOsChangedNotification",
|
||||
CmdStopNotifyingUTXOsChangedRequestMessage: "StopNotifyingUTXOsChangedRequest",
|
||||
CmdStopNotifyingUTXOsChangedResponseMessage: "StopNotifyingUTXOsChangedResponse",
|
||||
CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest",
|
||||
CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse",
|
||||
CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest",
|
||||
@@ -237,17 +220,6 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest",
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage: "NotifyVirtualSelectedParentBlueScoreChangedResponse",
|
||||
CmdVirtualSelectedParentBlueScoreChangedNotificationMessage: "VirtualSelectedParentBlueScoreChangedNotification",
|
||||
CmdBanRequestMessage: "BanRequest",
|
||||
CmdBanResponseMessage: "BanResponse",
|
||||
CmdUnbanRequestMessage: "UnbanRequest",
|
||||
CmdUnbanResponseMessage: "UnbanResponse",
|
||||
CmdGetInfoRequestMessage: "GetInfoRequest",
|
||||
CmdGetInfoResponseMessage: "GeInfoResponse",
|
||||
CmdNotifyPruningPointUTXOSetOverrideRequestMessage: "NotifyPruningPointUTXOSetOverrideRequest",
|
||||
CmdNotifyPruningPointUTXOSetOverrideResponseMessage: "NotifyPruningPointUTXOSetOverrideResponse",
|
||||
CmdPruningPointUTXOSetOverrideNotificationMessage: "PruningPointUTXOSetOverrideNotification",
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: "StopNotifyingPruningPointUTXOSetOverrideRequest",
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage: "StopNotifyingPruningPointUTXOSetOverrideResponse",
|
||||
}
|
||||
|
||||
// Message is an interface that describes a kaspa message. A type that
|
||||
|
||||
@@ -15,6 +15,19 @@ import (
|
||||
// backing array multiple times.
|
||||
const defaultTransactionAlloc = 2048
|
||||
|
||||
// MaxMassAcceptedByBlock is the maximum total transaction mass a block may accept.
|
||||
const MaxMassAcceptedByBlock = 10000000
|
||||
|
||||
// MaxMassPerTx is the maximum total mass a transaction may have.
|
||||
const MaxMassPerTx = MaxMassAcceptedByBlock / 2
|
||||
|
||||
// MaxTxPerBlock is the maximum number of transactions that could
|
||||
// possibly fit into a block.
|
||||
const MaxTxPerBlock = (MaxMassAcceptedByBlock / minTxPayload) + 1
|
||||
|
||||
// MaxBlockParents is the maximum allowed number of parents for block.
|
||||
const MaxBlockParents = 10
|
||||
|
||||
// TxLoc holds locator data for the offset and length of where a transaction is
|
||||
// located within a MsgBlock data buffer.
|
||||
type TxLoc struct {
|
||||
|
||||
@@ -11,11 +11,15 @@ import (
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/kaspanet/kaspad/util/random"
|
||||
)
|
||||
|
||||
// TestBlockHeader tests the MsgBlockHeader API.
|
||||
func TestBlockHeader(t *testing.T) {
|
||||
nonce := uint64(0xba4d87a69924a93d)
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
t.Errorf("random.Uint64: Error generating nonce: %v", err)
|
||||
}
|
||||
|
||||
hashes := []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}
|
||||
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// MsgIBDBlockLocatorHighestHashNotFound represents a kaspa BlockLocatorHighestHashNotFound message
|
||||
type MsgIBDBlockLocatorHighestHashNotFound struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgIBDBlockLocatorHighestHashNotFound) Command() MessageCommand {
|
||||
return CmdIBDBlockLocatorHighestHashNotFound
|
||||
}
|
||||
|
||||
// NewMsgIBDBlockLocatorHighestHashNotFound returns a new IBDBlockLocatorHighestHashNotFound message
|
||||
func NewMsgIBDBlockLocatorHighestHashNotFound() *MsgIBDBlockLocatorHighestHashNotFound {
|
||||
return &MsgIBDBlockLocatorHighestHashNotFound{}
|
||||
}
|
||||
@@ -6,12 +6,17 @@ package appmessage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/random"
|
||||
)
|
||||
|
||||
// TestPing tests the MsgPing API against the latest protocol version.
|
||||
func TestPing(t *testing.T) {
|
||||
// Ensure we get the same nonce back out.
|
||||
nonce := uint64(0x61c2c5535902862)
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
t.Errorf("random.Uint64: Error generating nonce: %v", err)
|
||||
}
|
||||
msg := NewMsgPing(nonce)
|
||||
if msg.Nonce != nonce {
|
||||
t.Errorf("NewMsgPing: wrong nonce - got %v, want %v",
|
||||
|
||||
@@ -6,11 +6,16 @@ package appmessage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/random"
|
||||
)
|
||||
|
||||
// TestPongLatest tests the MsgPong API against the latest protocol version.
|
||||
func TestPongLatest(t *testing.T) {
|
||||
nonce := uint64(0x1a05b581a5182c)
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
t.Errorf("random.Uint64: error generating nonce: %v", err)
|
||||
}
|
||||
msg := NewMsgPong(nonce)
|
||||
if msg.Nonce != nonce {
|
||||
t.Errorf("NewMsgPong: wrong nonce - got %v, want %v",
|
||||
|
||||
@@ -31,6 +31,6 @@ type OutpointAndUTXOEntryPair struct {
|
||||
type UTXOEntry struct {
|
||||
Amount uint64
|
||||
ScriptPublicKey *externalapi.ScriptPublicKey
|
||||
BlockDAAScore uint64
|
||||
BlockBlueScore uint64
|
||||
IsCoinbase bool
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package appmessage
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"strconv"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@@ -132,6 +133,7 @@ type MsgTx struct {
|
||||
LockTime uint64
|
||||
SubnetworkID externalapi.DomainSubnetworkID
|
||||
Gas uint64
|
||||
PayloadHash externalapi.DomainHash
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
@@ -177,6 +179,7 @@ func (msg *MsgTx) Copy() *MsgTx {
|
||||
LockTime: msg.LockTime,
|
||||
SubnetworkID: msg.SubnetworkID,
|
||||
Gas: msg.Gas,
|
||||
PayloadHash: msg.PayloadHash,
|
||||
}
|
||||
|
||||
if msg.Payload != nil {
|
||||
@@ -277,12 +280,18 @@ func newMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetworkID *extern
|
||||
txOut = make([]*TxOut, 0, defaultTxInOutAlloc)
|
||||
}
|
||||
|
||||
var payloadHash externalapi.DomainHash
|
||||
if *subnetworkID != subnetworks.SubnetworkIDNative {
|
||||
payloadHash = *hashes.PayloadHash(payload)
|
||||
}
|
||||
|
||||
return &MsgTx{
|
||||
Version: version,
|
||||
TxIn: txIn,
|
||||
TxOut: txOut,
|
||||
SubnetworkID: *subnetworkID,
|
||||
Gas: gas,
|
||||
PayloadHash: payloadHash,
|
||||
Payload: payload,
|
||||
LockTime: lockTime,
|
||||
}
|
||||
|
||||
@@ -133,8 +133,8 @@ func TestTx(t *testing.T) {
|
||||
|
||||
// TestTxHash tests the ability to generate the hash of a transaction accurately.
|
||||
func TestTxHashAndID(t *testing.T) {
|
||||
txHash1Str := "93663e597f6c968d32d229002f76408edf30d6a0151ff679fc729812d8cb2acc"
|
||||
txID1Str := "24079c6d2bdf602fc389cc307349054937744a9c8dc0f07c023e6af0e949a4e7"
|
||||
txHash1Str := "4bee9ee495bd93a755de428376bd582a2bb6ec37c041753b711c0606d5745c13"
|
||||
txID1Str := "f868bd20e816256b80eac976821be4589d24d21141bd1cec6e8005d0c16c6881"
|
||||
wantTxID1, err := transactionid.FromString(txID1Str)
|
||||
if err != nil {
|
||||
t.Fatalf("NewTxIDFromStr: %v", err)
|
||||
@@ -185,14 +185,14 @@ func TestTxHashAndID(t *testing.T) {
|
||||
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
|
||||
}
|
||||
|
||||
hash2Str := "8dafd1bec24527d8e3b443ceb0a3b92fffc0d60026317f890b2faf5e9afc177a"
|
||||
hash2Str := "cb1bdb4a83d4885535fb3cceb5c96597b7df903db83f0ffcd779d703affd8efd"
|
||||
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
|
||||
if err != nil {
|
||||
t.Errorf("NewTxIDFromStr: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
id2Str := "89ffb49474637502d9059af38b8a95fc2f0d3baef5c801d7a9b9c8830671b711"
|
||||
id2Str := "ca080073d4ddf5b84443a0964af633f3c70a5b290fd3bc35a7e6f93fd33f9330"
|
||||
wantID2, err := transactionid.FromString(id2Str)
|
||||
if err != nil {
|
||||
t.Errorf("NewTxIDFromStr: %v", err)
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestVersion(t *testing.T) {
|
||||
|
||||
// Create version message data.
|
||||
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
|
||||
me := NewNetAddress(tcpAddrMe)
|
||||
me := NewNetAddress(tcpAddrMe, SFNodeNetwork)
|
||||
generatedID, err := id.GenerateID()
|
||||
if err != nil {
|
||||
t.Fatalf("id.GenerateID: %s", err)
|
||||
|
||||
@@ -15,6 +15,9 @@ type NetAddress struct {
|
||||
// Last time the address was seen.
|
||||
Timestamp mstime.Time
|
||||
|
||||
// Bitfield which identifies the services supported by the address.
|
||||
Services ServiceFlag
|
||||
|
||||
// IP address of the peer.
|
||||
IP net.IP
|
||||
|
||||
@@ -23,6 +26,17 @@ type NetAddress struct {
|
||||
Port uint16
|
||||
}
|
||||
|
||||
// HasService returns whether the specified service is supported by the address.
|
||||
func (na *NetAddress) HasService(service ServiceFlag) bool {
|
||||
return na.Services&service == service
|
||||
}
|
||||
|
||||
// AddService adds service as a supported service by the peer generating the
|
||||
// message.
|
||||
func (na *NetAddress) AddService(service ServiceFlag) {
|
||||
na.Services |= service
|
||||
}
|
||||
|
||||
// TCPAddress converts the NetAddress to *net.TCPAddr
|
||||
func (na *NetAddress) TCPAddress() *net.TCPAddr {
|
||||
return &net.TCPAddr{
|
||||
@@ -33,19 +47,20 @@ func (na *NetAddress) TCPAddress() *net.TCPAddr {
|
||||
|
||||
// NewNetAddressIPPort returns a new NetAddress using the provided IP, port, and
|
||||
// supported services with defaults for the remaining fields.
|
||||
func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress {
|
||||
return NewNetAddressTimestamp(mstime.Now(), ip, port)
|
||||
func NewNetAddressIPPort(ip net.IP, port uint16, services ServiceFlag) *NetAddress {
|
||||
return NewNetAddressTimestamp(mstime.Now(), services, ip, port)
|
||||
}
|
||||
|
||||
// NewNetAddressTimestamp returns a new NetAddress using the provided
|
||||
// timestamp, IP, port, and supported services. The timestamp is rounded to
|
||||
// single millisecond precision.
|
||||
func NewNetAddressTimestamp(
|
||||
timestamp mstime.Time, ip net.IP, port uint16) *NetAddress {
|
||||
timestamp mstime.Time, services ServiceFlag, ip net.IP, port uint16) *NetAddress {
|
||||
// Limit the timestamp to one millisecond precision since the protocol
|
||||
// doesn't support better.
|
||||
na := NetAddress{
|
||||
Timestamp: timestamp,
|
||||
Services: services,
|
||||
IP: ip,
|
||||
Port: port,
|
||||
}
|
||||
@@ -54,6 +69,6 @@ func NewNetAddressTimestamp(
|
||||
|
||||
// NewNetAddress returns a new NetAddress using the provided TCP address and
|
||||
// supported services with defaults for the remaining fields.
|
||||
func NewNetAddress(addr *net.TCPAddr) *NetAddress {
|
||||
return NewNetAddressIPPort(addr.IP, uint16(addr.Port))
|
||||
func NewNetAddress(addr *net.TCPAddr, services ServiceFlag) *NetAddress {
|
||||
return NewNetAddressIPPort(addr.IP, uint16(addr.Port), services)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestNetAddress(t *testing.T) {
|
||||
port := 16111
|
||||
|
||||
// Test NewNetAddress.
|
||||
na := NewNetAddress(&net.TCPAddr{IP: ip, Port: port})
|
||||
na := NewNetAddress(&net.TCPAddr{IP: ip, Port: port}, 0)
|
||||
|
||||
// Ensure we get the same ip, port, and services back out.
|
||||
if !na.IP.Equal(ip) {
|
||||
@@ -25,4 +25,21 @@ func TestNetAddress(t *testing.T) {
|
||||
t.Errorf("NetNetAddress: wrong port - got %v, want %v", na.Port,
|
||||
port)
|
||||
}
|
||||
if na.Services != 0 {
|
||||
t.Errorf("NetNetAddress: wrong services - got %v, want %v",
|
||||
na.Services, 0)
|
||||
}
|
||||
if na.HasService(SFNodeNetwork) {
|
||||
t.Errorf("HasService: SFNodeNetwork service is set")
|
||||
}
|
||||
|
||||
// Ensure adding the full service node flag works.
|
||||
na.AddService(SFNodeNetwork)
|
||||
if na.Services != SFNodeNetwork {
|
||||
t.Errorf("AddService: wrong services - got %v, want %v",
|
||||
na.Services, SFNodeNetwork)
|
||||
}
|
||||
if !na.HasService(SFNodeNetwork) {
|
||||
t.Errorf("HasService: SFNodeNetwork service not set")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// BanRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type BanRequestMessage struct {
|
||||
baseMessage
|
||||
|
||||
IP string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *BanRequestMessage) Command() MessageCommand {
|
||||
return CmdBanRequestMessage
|
||||
}
|
||||
|
||||
// NewBanRequestMessage returns an instance of the message
|
||||
func NewBanRequestMessage(ip string) *BanRequestMessage {
|
||||
return &BanRequestMessage{
|
||||
IP: ip,
|
||||
}
|
||||
}
|
||||
|
||||
// BanResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type BanResponseMessage struct {
|
||||
baseMessage
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *BanResponseMessage) Command() MessageCommand {
|
||||
return CmdBanResponseMessage
|
||||
}
|
||||
|
||||
// NewBanResponseMessage returns a instance of the message
|
||||
func NewBanResponseMessage() *BanResponseMessage {
|
||||
return &BanResponseMessage{}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ func NewGetBlockRequestMessage(hash string, includeTransactionVerboseData bool)
|
||||
// its respective RPC message
|
||||
type GetBlockResponseMessage struct {
|
||||
baseMessage
|
||||
Block *RPCBlock
|
||||
BlockVerboseData *BlockVerboseData
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@@ -39,3 +39,69 @@ func (msg *GetBlockResponseMessage) Command() MessageCommand {
|
||||
func NewGetBlockResponseMessage() *GetBlockResponseMessage {
|
||||
return &GetBlockResponseMessage{}
|
||||
}
|
||||
|
||||
// BlockVerboseData holds verbose data about a block
|
||||
type BlockVerboseData struct {
|
||||
Hash string
|
||||
Version uint16
|
||||
VersionHex string
|
||||
HashMerkleRoot string
|
||||
AcceptedIDMerkleRoot string
|
||||
UTXOCommitment string
|
||||
TxIDs []string
|
||||
TransactionVerboseData []*TransactionVerboseData
|
||||
Time int64
|
||||
Nonce uint64
|
||||
Bits string
|
||||
Difficulty float64
|
||||
ParentHashes []string
|
||||
SelectedParentHash string
|
||||
BlueScore uint64
|
||||
IsHeaderOnly bool
|
||||
}
|
||||
|
||||
// TransactionVerboseData holds verbose data about a transaction
|
||||
type TransactionVerboseData struct {
|
||||
TxID string
|
||||
Hash string
|
||||
Size uint64
|
||||
Version uint16
|
||||
LockTime uint64
|
||||
SubnetworkID string
|
||||
Gas uint64
|
||||
PayloadHash string
|
||||
Payload string
|
||||
TransactionVerboseInputs []*TransactionVerboseInput
|
||||
TransactionVerboseOutputs []*TransactionVerboseOutput
|
||||
BlockHash string
|
||||
Time uint64
|
||||
BlockTime uint64
|
||||
}
|
||||
|
||||
// TransactionVerboseInput holds data about a transaction input
|
||||
type TransactionVerboseInput struct {
|
||||
TxID string
|
||||
OutputIndex uint32
|
||||
ScriptSig *ScriptSig
|
||||
Sequence uint64
|
||||
}
|
||||
|
||||
// ScriptSig holds data about a script signature
|
||||
type ScriptSig struct {
|
||||
Asm string
|
||||
Hex string
|
||||
}
|
||||
|
||||
// TransactionVerboseOutput holds data about a transaction output
|
||||
type TransactionVerboseOutput struct {
|
||||
Value uint64
|
||||
Index uint32
|
||||
ScriptPubKey *ScriptPubKeyResult
|
||||
}
|
||||
|
||||
// ScriptPubKeyResult holds data about a script public key
|
||||
type ScriptPubKeyResult struct {
|
||||
Hex string
|
||||
Type string
|
||||
Address string
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ type GetBlockDAGInfoResponseMessage struct {
|
||||
VirtualParentHashes []string
|
||||
Difficulty float64
|
||||
PastMedianTime int64
|
||||
PruningPointHash string
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateReque
|
||||
// its respective RPC message
|
||||
type GetBlockTemplateResponseMessage struct {
|
||||
baseMessage
|
||||
Block *RPCBlock
|
||||
MsgBlock *MsgBlock
|
||||
IsSynced bool
|
||||
|
||||
Error *RPCError
|
||||
@@ -35,9 +35,9 @@ func (msg *GetBlockTemplateResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlockTemplateResponseMessage returns a instance of the message
|
||||
func NewGetBlockTemplateResponseMessage(block *RPCBlock, isSynced bool) *GetBlockTemplateResponseMessage {
|
||||
func NewGetBlockTemplateResponseMessage(msgBlock *MsgBlock, isSynced bool) *GetBlockTemplateResponseMessage {
|
||||
return &GetBlockTemplateResponseMessage{
|
||||
Block: block,
|
||||
MsgBlock: msgBlock,
|
||||
IsSynced: isSynced,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetBlocksRequestMessage struct {
|
||||
baseMessage
|
||||
LowHash string
|
||||
IncludeBlocks bool
|
||||
IncludeTransactionVerboseData bool
|
||||
LowHash string
|
||||
IncludeBlockHexes bool
|
||||
IncludeBlockVerboseData bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -15,12 +15,11 @@ func (msg *GetBlocksRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlocksRequestMessage returns a instance of the message
|
||||
func NewGetBlocksRequestMessage(lowHash string, includeBlocks bool,
|
||||
includeTransactionVerboseData bool) *GetBlocksRequestMessage {
|
||||
func NewGetBlocksRequestMessage(lowHash string, includeBlockHexes bool, includeBlockVerboseData bool) *GetBlocksRequestMessage {
|
||||
return &GetBlocksRequestMessage{
|
||||
LowHash: lowHash,
|
||||
IncludeBlocks: includeBlocks,
|
||||
IncludeTransactionVerboseData: includeTransactionVerboseData,
|
||||
LowHash: lowHash,
|
||||
IncludeBlockHexes: includeBlockHexes,
|
||||
IncludeBlockVerboseData: includeBlockVerboseData,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,8 +27,9 @@ func NewGetBlocksRequestMessage(lowHash string, includeBlocks bool,
|
||||
// its respective RPC message
|
||||
type GetBlocksResponseMessage struct {
|
||||
baseMessage
|
||||
BlockHashes []string
|
||||
Blocks []*RPCBlock
|
||||
BlockHashes []string
|
||||
BlockHexes []string
|
||||
BlockVerboseData []*BlockVerboseData
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@@ -40,6 +40,12 @@ func (msg *GetBlocksResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlocksResponseMessage returns a instance of the message
|
||||
func NewGetBlocksResponseMessage() *GetBlocksResponseMessage {
|
||||
return &GetBlocksResponseMessage{}
|
||||
func NewGetBlocksResponseMessage(blockHashes []string, blockHexes []string,
|
||||
blockVerboseData []*BlockVerboseData) *GetBlocksResponseMessage {
|
||||
|
||||
return &GetBlocksResponseMessage{
|
||||
BlockHashes: blockHashes,
|
||||
BlockHexes: blockHexes,
|
||||
BlockVerboseData: blockVerboseData,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// GetInfoRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetInfoRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetInfoRequestMessage) Command() MessageCommand {
|
||||
return CmdGetInfoRequestMessage
|
||||
}
|
||||
|
||||
// NewGetInfoRequestMessage returns a instance of the message
|
||||
func NewGetInfoRequestMessage() *GetInfoRequestMessage {
|
||||
return &GetInfoRequestMessage{}
|
||||
}
|
||||
|
||||
// GetInfoResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetInfoResponseMessage struct {
|
||||
baseMessage
|
||||
P2PID string
|
||||
MempoolSize uint64
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetInfoResponseMessage) Command() MessageCommand {
|
||||
return CmdGetInfoResponseMessage
|
||||
}
|
||||
|
||||
// NewGetInfoResponseMessage returns a instance of the message
|
||||
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64) *GetInfoResponseMessage {
|
||||
return &GetInfoResponseMessage{
|
||||
P2PID: p2pID,
|
||||
MempoolSize: mempoolSize,
|
||||
}
|
||||
}
|
||||
@@ -28,8 +28,8 @@ type GetMempoolEntryResponseMessage struct {
|
||||
|
||||
// MempoolEntry represents a transaction in the mempool.
|
||||
type MempoolEntry struct {
|
||||
Fee uint64
|
||||
Transaction *RPCTransaction
|
||||
Fee uint64
|
||||
TransactionVerboseData *TransactionVerboseData
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -38,11 +38,11 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntryResponseMessage returns a instance of the message
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transactionVerboseData *TransactionVerboseData) *GetMempoolEntryResponseMessage {
|
||||
return &GetMempoolEntryResponseMessage{
|
||||
Entry: &MempoolEntry{
|
||||
Fee: fee,
|
||||
Transaction: transaction,
|
||||
Fee: fee,
|
||||
TransactionVerboseData: transactionVerboseData,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func NewNotifyBlockAddedResponseMessage() *NotifyBlockAddedResponseMessage {
|
||||
// its respective RPC message
|
||||
type BlockAddedNotificationMessage struct {
|
||||
baseMessage
|
||||
Block *RPCBlock
|
||||
Block *MsgBlock
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -46,7 +46,7 @@ func (msg *BlockAddedNotificationMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewBlockAddedNotificationMessage returns a instance of the message
|
||||
func NewBlockAddedNotificationMessage(block *RPCBlock) *BlockAddedNotificationMessage {
|
||||
func NewBlockAddedNotificationMessage(block *MsgBlock) *BlockAddedNotificationMessage {
|
||||
return &BlockAddedNotificationMessage{
|
||||
Block: block,
|
||||
}
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// NotifyPruningPointUTXOSetOverrideRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyPruningPointUTXOSetOverrideRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyPruningPointUTXOSetOverrideRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyPruningPointUTXOSetOverrideRequestMessage returns a instance of the message
|
||||
func NewNotifyPruningPointUTXOSetOverrideRequestMessage() *NotifyPruningPointUTXOSetOverrideRequestMessage {
|
||||
return &NotifyPruningPointUTXOSetOverrideRequestMessage{}
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverrideResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyPruningPointUTXOSetOverrideResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyPruningPointUTXOSetOverrideResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideResponseMessage
|
||||
}
|
||||
|
||||
// NewNotifyPruningPointUTXOSetOverrideResponseMessage returns a instance of the message
|
||||
func NewNotifyPruningPointUTXOSetOverrideResponseMessage() *NotifyPruningPointUTXOSetOverrideResponseMessage {
|
||||
return &NotifyPruningPointUTXOSetOverrideResponseMessage{}
|
||||
}
|
||||
|
||||
// PruningPointUTXOSetOverrideNotificationMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type PruningPointUTXOSetOverrideNotificationMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *PruningPointUTXOSetOverrideNotificationMessage) Command() MessageCommand {
|
||||
return CmdPruningPointUTXOSetOverrideNotificationMessage
|
||||
}
|
||||
|
||||
// NewPruningPointUTXOSetOverrideNotificationMessage returns a instance of the message
|
||||
func NewPruningPointUTXOSetOverrideNotificationMessage() *PruningPointUTXOSetOverrideNotificationMessage {
|
||||
return &PruningPointUTXOSetOverrideNotificationMessage{}
|
||||
}
|
||||
|
||||
// StopNotifyingPruningPointUTXOSetOverrideRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingPruningPointUTXOSetOverrideRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingPruningPointUTXOSetOverrideRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideRequestMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingPruningPointUTXOSetOverrideRequestMessage returns a instance of the message
|
||||
func NewStopNotifyingPruningPointUTXOSetOverrideRequestMessage() *StopNotifyingPruningPointUTXOSetOverrideRequestMessage {
|
||||
return &StopNotifyingPruningPointUTXOSetOverrideRequestMessage{}
|
||||
}
|
||||
|
||||
// StopNotifyingPruningPointUTXOSetOverrideResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingPruningPointUTXOSetOverrideResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingPruningPointUTXOSetOverrideResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideResponseMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage returns a instance of the message
|
||||
func NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage() *StopNotifyingPruningPointUTXOSetOverrideResponseMessage {
|
||||
return &StopNotifyingPruningPointUTXOSetOverrideResponseMessage{}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// StopNotifyingUTXOsChangedRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingUTXOsChangedRequestMessage struct {
|
||||
baseMessage
|
||||
Addresses []string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingUTXOsChangedRequestMessage) Command() MessageCommand {
|
||||
return CmdStopNotifyingUTXOsChangedRequestMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingUTXOsChangedRequestMessage returns a instance of the message
|
||||
func NewStopNotifyingUTXOsChangedRequestMessage(addresses []string) *StopNotifyingUTXOsChangedRequestMessage {
|
||||
return &StopNotifyingUTXOsChangedRequestMessage{
|
||||
Addresses: addresses,
|
||||
}
|
||||
}
|
||||
|
||||
// StopNotifyingUTXOsChangedResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingUTXOsChangedResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingUTXOsChangedResponseMessage) Command() MessageCommand {
|
||||
return CmdStopNotifyingUTXOsChangedResponseMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingUTXOsChangedResponseMessage returns a instance of the message
|
||||
func NewStopNotifyingUTXOsChangedResponseMessage() *StopNotifyingUTXOsChangedResponseMessage {
|
||||
return &StopNotifyingUTXOsChangedResponseMessage{}
|
||||
}
|
||||
@@ -4,7 +4,7 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type SubmitBlockRequestMessage struct {
|
||||
baseMessage
|
||||
Block *RPCBlock
|
||||
Block *MsgBlock
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -13,7 +13,7 @@ func (msg *SubmitBlockRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewSubmitBlockRequestMessage returns a instance of the message
|
||||
func NewSubmitBlockRequestMessage(block *RPCBlock) *SubmitBlockRequestMessage {
|
||||
func NewSubmitBlockRequestMessage(block *MsgBlock) *SubmitBlockRequestMessage {
|
||||
return &SubmitBlockRequestMessage{
|
||||
Block: block,
|
||||
}
|
||||
@@ -57,35 +57,3 @@ func (msg *SubmitBlockResponseMessage) Command() MessageCommand {
|
||||
func NewSubmitBlockResponseMessage() *SubmitBlockResponseMessage {
|
||||
return &SubmitBlockResponseMessage{}
|
||||
}
|
||||
|
||||
// RPCBlock is a kaspad block representation meant to be
|
||||
// used over RPC
|
||||
type RPCBlock struct {
|
||||
Header *RPCBlockHeader
|
||||
Transactions []*RPCTransaction
|
||||
VerboseData *RPCBlockVerboseData
|
||||
}
|
||||
|
||||
// RPCBlockHeader is a kaspad block header representation meant to be
|
||||
// used over RPC
|
||||
type RPCBlockHeader struct {
|
||||
Version uint32
|
||||
ParentHashes []string
|
||||
HashMerkleRoot string
|
||||
AcceptedIDMerkleRoot string
|
||||
UTXOCommitment string
|
||||
Timestamp int64
|
||||
Bits uint32
|
||||
Nonce uint64
|
||||
}
|
||||
|
||||
// RPCBlockVerboseData holds verbose data about a block
|
||||
type RPCBlockVerboseData struct {
|
||||
Hash string
|
||||
Difficulty float64
|
||||
SelectedParentHash string
|
||||
TransactionIDs []string
|
||||
IsHeaderOnly bool
|
||||
BlueScore uint64
|
||||
ChildrenHashes []string
|
||||
}
|
||||
|
||||
@@ -49,8 +49,8 @@ type RPCTransaction struct {
|
||||
LockTime uint64
|
||||
SubnetworkID string
|
||||
Gas uint64
|
||||
PayloadHash string
|
||||
Payload string
|
||||
VerboseData *RPCTransactionVerboseData
|
||||
}
|
||||
|
||||
// RPCTransactionInput is a kaspad transaction input representation
|
||||
@@ -59,7 +59,6 @@ type RPCTransactionInput struct {
|
||||
PreviousOutpoint *RPCOutpoint
|
||||
SignatureScript string
|
||||
Sequence uint64
|
||||
VerboseData *RPCTransactionInputVerboseData
|
||||
}
|
||||
|
||||
// RPCScriptPublicKey is a kaspad ScriptPublicKey representation
|
||||
@@ -73,7 +72,6 @@ type RPCScriptPublicKey struct {
|
||||
type RPCTransactionOutput struct {
|
||||
Amount uint64
|
||||
ScriptPublicKey *RPCScriptPublicKey
|
||||
VerboseData *RPCTransactionOutputVerboseData
|
||||
}
|
||||
|
||||
// RPCOutpoint is a kaspad outpoint representation meant to be used
|
||||
@@ -88,25 +86,6 @@ type RPCOutpoint struct {
|
||||
type RPCUTXOEntry struct {
|
||||
Amount uint64
|
||||
ScriptPublicKey *RPCScriptPublicKey
|
||||
BlockDAAScore uint64
|
||||
BlockBlueScore uint64
|
||||
IsCoinbase bool
|
||||
}
|
||||
|
||||
// RPCTransactionVerboseData holds verbose data about a transaction
|
||||
type RPCTransactionVerboseData struct {
|
||||
TransactionID string
|
||||
Hash string
|
||||
Size uint64
|
||||
BlockHash string
|
||||
BlockTime uint64
|
||||
}
|
||||
|
||||
// RPCTransactionInputVerboseData holds data about a transaction input
|
||||
type RPCTransactionInputVerboseData struct {
|
||||
}
|
||||
|
||||
// RPCTransactionOutputVerboseData holds data about a transaction output
|
||||
type RPCTransactionOutputVerboseData struct {
|
||||
ScriptPublicKeyType string
|
||||
ScriptPublicKeyAddress string
|
||||
}
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// UnbanRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type UnbanRequestMessage struct {
|
||||
baseMessage
|
||||
|
||||
IP string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *UnbanRequestMessage) Command() MessageCommand {
|
||||
return CmdUnbanRequestMessage
|
||||
}
|
||||
|
||||
// NewUnbanRequestMessage returns an instance of the message
|
||||
func NewUnbanRequestMessage(ip string) *UnbanRequestMessage {
|
||||
return &UnbanRequestMessage{
|
||||
IP: ip,
|
||||
}
|
||||
}
|
||||
|
||||
// UnbanResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type UnbanResponseMessage struct {
|
||||
baseMessage
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *UnbanResponseMessage) Command() MessageCommand {
|
||||
return CmdUnbanResponseMessage
|
||||
}
|
||||
|
||||
// NewUnbanResponseMessage returns a instance of the message
|
||||
func NewUnbanResponseMessage() *UnbanResponseMessage {
|
||||
return &UnbanResponseMessage{}
|
||||
}
|
||||
@@ -4,19 +4,23 @@ import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||
|
||||
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol"
|
||||
"github.com/kaspanet/kaspad/app/rpc"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/dnsseed"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
@@ -68,8 +72,6 @@ func (a *ComponentManager) Stop() {
|
||||
log.Errorf("Error stopping the net adapter: %+v", err)
|
||||
}
|
||||
|
||||
a.protocolManager.Close()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -78,13 +80,7 @@ func (a *ComponentManager) Stop() {
|
||||
func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database, interrupt chan<- struct{}) (
|
||||
*ComponentManager, error) {
|
||||
|
||||
consensusConfig := consensus.Config{
|
||||
Params: *cfg.ActiveNetParams,
|
||||
IsArchival: cfg.IsArchivalNode,
|
||||
EnableSanityCheckPruningUTXOSet: cfg.EnableSanityCheckPruningUTXOSet,
|
||||
}
|
||||
|
||||
domain, err := domain.New(&consensusConfig, db)
|
||||
domain, err := domain.New(cfg.ActiveNetParams, db, cfg.IsArchivalNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -94,18 +90,14 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addressManager, err := addressmanager.New(addressmanager.NewConfig(cfg), db)
|
||||
addressManager, err := addressmanager.New(addressmanager.NewConfig(cfg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var utxoIndex *utxoindex.UTXOIndex
|
||||
if cfg.UTXOIndex {
|
||||
utxoIndex, err = utxoindex.New(domain.Consensus(), db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxoIndex = utxoindex.New(domain.Consensus(), db)
|
||||
log.Infof("UTXO index started")
|
||||
}
|
||||
|
||||
@@ -152,14 +144,13 @@ func setupRPC(
|
||||
shutDownChan,
|
||||
)
|
||||
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
|
||||
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
||||
|
||||
return rpcManager
|
||||
}
|
||||
|
||||
func (a *ComponentManager) maybeSeedFromDNS() {
|
||||
if !a.cfg.DisableDNSSeed {
|
||||
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, false, nil,
|
||||
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, appmessage.SFNodeNetwork, false, nil,
|
||||
a.cfg.Lookup, func(addresses []*appmessage.NetAddress) {
|
||||
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
|
||||
// IPs of nodes and not its own IP, we can not know real IP of
|
||||
@@ -167,7 +158,7 @@ func (a *ComponentManager) maybeSeedFromDNS() {
|
||||
a.addressManager.AddAddresses(addresses...)
|
||||
})
|
||||
|
||||
dnsseed.SeedFromGRPC(a.cfg.NetParams(), a.cfg.GRPCSeed, false, nil,
|
||||
dnsseed.SeedFromGRPC(a.cfg.NetParams(), a.cfg.GRPCSeed, appmessage.SFNodeNetwork, false, nil,
|
||||
func(addresses []*appmessage.NetAddress) {
|
||||
a.addressManager.AddAddresses(addresses...)
|
||||
})
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const currentDatabaseVersion = 1
|
||||
|
||||
func checkDatabaseVersion(dbPath string) (err error) {
|
||||
versionFileName := versionFilePath(dbPath)
|
||||
|
||||
versionBytes, err := os.ReadFile(versionFileName)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) { // If version file doesn't exist, we assume that the database is new
|
||||
return createDatabaseVersionFile(dbPath, versionFileName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
databaseVersion, err := strconv.Atoi(string(versionBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if databaseVersion != currentDatabaseVersion {
|
||||
// TODO: Once there's more then one database version, it might make sense to add upgrade logic at this point
|
||||
return errors.Errorf("Invalid database version %d. Expected version: %d", databaseVersion, currentDatabaseVersion)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createDatabaseVersionFile(dbPath string, versionFileName string) error {
|
||||
err := os.MkdirAll(dbPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
versionFile, err := os.Create(versionFileName)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer versionFile.Close()
|
||||
|
||||
versionString := strconv.Itoa(currentDatabaseVersion)
|
||||
_, err = versionFile.Write([]byte(versionString))
|
||||
return err
|
||||
}
|
||||
|
||||
func versionFilePath(dbPath string) string {
|
||||
dbVersionFileName := path.Join(dbPath, "version")
|
||||
return dbVersionFileName
|
||||
}
|
||||
@@ -7,6 +7,8 @@ package app
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("KASD")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KASD)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
58
app/protocol/blocklogger/blocklogger.go
Normal file
58
app/protocol/blocklogger/blocklogger.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright (c) 2015-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blocklogger
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
)
|
||||
|
||||
var (
|
||||
receivedLogBlocks int64
|
||||
receivedLogTx int64
|
||||
lastBlockLogTime = mstime.Now()
|
||||
mtx sync.Mutex
|
||||
)
|
||||
|
||||
// LogBlock logs a new block blue score as an information message
|
||||
// to show progress to the user. In order to prevent spam, it limits logging to
|
||||
// one message every 10 seconds with duration and totals included.
|
||||
func LogBlock(block *externalapi.DomainBlock) {
|
||||
mtx.Lock()
|
||||
defer mtx.Unlock()
|
||||
|
||||
receivedLogBlocks++
|
||||
receivedLogTx += int64(len(block.Transactions))
|
||||
|
||||
now := mstime.Now()
|
||||
duration := now.Sub(lastBlockLogTime)
|
||||
if duration < time.Second*10 {
|
||||
return
|
||||
}
|
||||
|
||||
// Truncate the duration to 10s of milliseconds.
|
||||
tDuration := duration.Round(10 * time.Millisecond)
|
||||
|
||||
// Log information about new block blue score.
|
||||
blockStr := "blocks"
|
||||
if receivedLogBlocks == 1 {
|
||||
blockStr = "block"
|
||||
}
|
||||
txStr := "transactions"
|
||||
if receivedLogTx == 1 {
|
||||
txStr = "transaction"
|
||||
}
|
||||
|
||||
log.Infof("Processed %d %s in the last %s (%d %s, %s)",
|
||||
receivedLogBlocks, blockStr, tDuration, receivedLogTx,
|
||||
txStr, mstime.UnixMilliseconds(block.Header.TimeInMilliseconds()))
|
||||
|
||||
receivedLogBlocks = 0
|
||||
receivedLogTx = 0
|
||||
lastBlockLogTime = now
|
||||
}
|
||||
@@ -8,4 +8,4 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("BDAG")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
@@ -1,8 +1,8 @@
|
||||
package flowcontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
@@ -37,14 +37,14 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
newBlockInsertionResults = append(newBlockInsertionResults, unorphaningResult.blockInsertionResult)
|
||||
}
|
||||
|
||||
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
|
||||
for i, newBlock := range newBlocks {
|
||||
blocklogger.LogBlock(block)
|
||||
|
||||
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
|
||||
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
||||
_, err = f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
|
||||
|
||||
if f.onBlockAddedToDAGHandler != nil {
|
||||
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
|
||||
@@ -56,22 +56,13 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
}
|
||||
}
|
||||
|
||||
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
|
||||
}
|
||||
|
||||
// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
|
||||
if f.onPruningPointUTXOSetOverrideHandler != nil {
|
||||
return f.onPruningPointUTXOSetOverrideHandler()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
||||
addedBlocks []*externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error {
|
||||
block *externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error {
|
||||
|
||||
f.updateTransactionsToRebroadcast(addedBlocks)
|
||||
f.updateTransactionsToRebroadcast(block)
|
||||
|
||||
// Don't relay transactions when in IBD.
|
||||
if f.IsIBDRunning() {
|
||||
@@ -110,10 +101,6 @@ func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks
|
||||
|
||||
// AddBlock adds the given block to the DAG and propagates it.
|
||||
func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(false, "cannot add header only block")
|
||||
}
|
||||
|
||||
blockInsertionResult, err := f.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
|
||||
@@ -18,11 +18,11 @@ import (
|
||||
func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error) {
|
||||
isErrRouteClosed := errors.Is(err, router.ErrRouteClosed)
|
||||
if !isErrRouteClosed {
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||
if protocolErr := &(protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
log.Errorf("error from %s: %+v", flowName, err)
|
||||
}
|
||||
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
|
||||
@@ -23,10 +23,6 @@ import (
|
||||
// when a block is added to the DAG
|
||||
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
|
||||
|
||||
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
type OnPruningPointUTXOSetOverrideHandler func() error
|
||||
|
||||
// OnTransactionAddedToMempoolHandler is a handler function that's triggered
|
||||
// when a transaction is added to the mempool
|
||||
type OnTransactionAddedToMempoolHandler func()
|
||||
@@ -42,9 +38,8 @@ type FlowContext struct {
|
||||
|
||||
timeStarted int64
|
||||
|
||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
||||
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||
|
||||
transactionsToRebroadcastLock sync.Mutex
|
||||
transactionsToRebroadcast map[externalapi.DomainTransactionID]*externalapi.DomainTransaction
|
||||
@@ -61,8 +56,6 @@ type FlowContext struct {
|
||||
|
||||
orphans map[externalapi.DomainHash]*externalapi.DomainBlock
|
||||
orphansMutex sync.RWMutex
|
||||
|
||||
shutdownChan chan struct{}
|
||||
}
|
||||
|
||||
// New returns a new instance of FlowContext.
|
||||
@@ -81,31 +74,14 @@ func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanage
|
||||
transactionsToRebroadcast: make(map[externalapi.DomainTransactionID]*externalapi.DomainTransaction),
|
||||
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
||||
timeStarted: mstime.Now().UnixMilliseconds(),
|
||||
shutdownChan: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Close signals to all flows the the protocol manager is closed.
|
||||
func (f *FlowContext) Close() {
|
||||
close(f.shutdownChan)
|
||||
}
|
||||
|
||||
// ShutdownChan is a chan where flows can subscribe to shutdown
|
||||
// event.
|
||||
func (f *FlowContext) ShutdownChan() <-chan struct{} {
|
||||
return f.shutdownChan
|
||||
}
|
||||
|
||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
||||
func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
|
||||
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
||||
func (f *FlowContext) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler) {
|
||||
f.onPruningPointUTXOSetOverrideHandler = onPruningPointUTXOSetOverrideHandler
|
||||
}
|
||||
|
||||
// SetOnTransactionAddedToMempoolHandler sets the onTransactionAddedToMempool handler
|
||||
func (f *FlowContext) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler) {
|
||||
f.onTransactionAddedToMempoolHandler = onTransactionAddedToMempoolHandler
|
||||
|
||||
@@ -2,6 +2,8 @@ package flowcontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -194,9 +194,6 @@ func (f *FlowContext) GetOrphanRoots(orphan *externalapi.DomainHash) ([]*externa
|
||||
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
roots = append(roots, current)
|
||||
} else {
|
||||
log.Debugf("Block %s was skipped when checking for orphan roots: "+
|
||||
"exists: %t, status: %s", current, blockInfo.Exists, blockInfo.BlockStatus)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -25,17 +25,14 @@ func (f *FlowContext) AddTransaction(tx *externalapi.DomainTransaction) error {
|
||||
return f.Broadcast(inv)
|
||||
}
|
||||
|
||||
func (f *FlowContext) updateTransactionsToRebroadcast(addedBlocks []*externalapi.DomainBlock) {
|
||||
func (f *FlowContext) updateTransactionsToRebroadcast(block *externalapi.DomainBlock) {
|
||||
f.transactionsToRebroadcastLock.Lock()
|
||||
defer f.transactionsToRebroadcastLock.Unlock()
|
||||
|
||||
for _, block := range addedBlocks {
|
||||
// Note: if a transaction is included in the DAG but not accepted,
|
||||
// it won't be rebroadcast anymore, although it is not included in
|
||||
// the UTXO set
|
||||
for _, tx := range block.Transactions {
|
||||
delete(f.transactionsToRebroadcast, *consensushashing.TransactionID(tx))
|
||||
}
|
||||
// Note: if the block is red, its transactions won't be rebroadcasted
|
||||
// anymore, although they are not included in the UTXO set.
|
||||
// This is probably ok, since red blocks are quite rare.
|
||||
for _, tx := range block.Transactions {
|
||||
delete(f.transactionsToRebroadcast, *consensushashing.TransactionID(tx))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -35,5 +35,6 @@ func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Rou
|
||||
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
|
||||
}
|
||||
|
||||
return context.AddressManager().AddAddresses(msgAddresses.AddressList...)
|
||||
context.AddressManager().AddAddresses(msgAddresses.AddressList...)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -70,14 +70,8 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
||||
}
|
||||
|
||||
if !foundHighestHashInTheSelectedParentChainOfTargetHash {
|
||||
log.Warnf("no hash was found in the blockLocator "+
|
||||
return protocolerrors.Errorf(true, "no hash was found in the blockLocator "+
|
||||
"that was in the selected parent chain of targetHash %s", targetHash)
|
||||
|
||||
ibdBlockLocatorHighestHashNotFoundMessage := appmessage.NewMsgIBDBlockLocatorHighestHashNotFound()
|
||||
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashNotFoundMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ type RelayInvsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
SharedRequestedBlocks() *SharedRequestedBlocks
|
||||
Broadcast(message appmessage.Message) error
|
||||
AddOrphan(orphanBlock *externalapi.DomainBlock)
|
||||
@@ -105,19 +104,9 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
missingParents, blockInsertionResult, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Infof("Ignoring duplicate block %s", inv.Hash)
|
||||
continue
|
||||
@@ -146,15 +135,6 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||
consensushashing.BlockHash(block))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
|
||||
@@ -17,7 +17,7 @@ type RequestIBDBlocksContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestHeadersFlow struct {
|
||||
type handleRequestBlocksFlow struct {
|
||||
RequestIBDBlocksContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peer.Peer
|
||||
@@ -27,7 +27,7 @@ type handleRequestHeadersFlow struct {
|
||||
func HandleRequestHeaders(context RequestIBDBlocksContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
flow := &handleRequestHeadersFlow{
|
||||
flow := &handleRequestBlocksFlow{
|
||||
RequestIBDBlocksContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
@@ -36,7 +36,7 @@ func HandleRequestHeaders(context RequestIBDBlocksContext, incomingRoute *router
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestHeadersFlow) start() error {
|
||||
func (flow *handleRequestBlocksFlow) start() error {
|
||||
for {
|
||||
lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute)
|
||||
if err != nil {
|
||||
@@ -50,7 +50,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
// GetHashesBetween is a relatively heavy operation so we limit it
|
||||
// in order to avoid locking the consensus for too long
|
||||
const maxBlueScoreDifference = 1 << 10
|
||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlueScoreDifference)
|
||||
blockHashes, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlueScoreDifference)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -28,14 +28,10 @@ func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.Domain
|
||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
|
||||
log.Debugf("Syncing headers up to %s", highHash)
|
||||
headersSynced, err := flow.syncHeaders(highHash)
|
||||
err := flow.syncHeaders(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !headersSynced {
|
||||
log.Debugf("Aborting IBD because the headers failed to sync")
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Finished syncing headers up to %s", highHash)
|
||||
|
||||
log.Debugf("Syncing the current pruning point UTXO set")
|
||||
@@ -59,61 +55,47 @@ func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.Domain
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncHeaders attempts to sync headers from the peer. This method may fail
|
||||
// because the peer and us have conflicting pruning points. In that case we
|
||||
// return (false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleRelayInvsFlow) syncHeaders(highHash *externalapi.DomainHash) (bool, error) {
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !highestSharedBlockFound {
|
||||
return false, nil
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
func (flow *handleRelayInvsFlow) syncHeaders(highHash *externalapi.DomainHash) error {
|
||||
highHashReceived := false
|
||||
for !highHashReceived {
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
err = flow.downloadHeaders(highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = flow.downloadHeaders(highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the highHash has not been received, the peer is misbehaving
|
||||
highHashBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
// We're finished once highHash has been inserted into the DAG
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
highHashReceived = blockInfo.Exists
|
||||
log.Debugf("Headers downloaded from peer %s. Are further headers required: %t", flow.peer, !highHashReceived)
|
||||
}
|
||||
if !highHashBlockInfo.Exists {
|
||||
return false, protocolerrors.Errorf(true, "did not receive "+
|
||||
"highHash header %s from peer %s during header download", highHash, flow.peer)
|
||||
}
|
||||
log.Debugf("Headers downloaded from peer %s", flow.peer)
|
||||
return true, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(
|
||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(targetHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
|
||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
highestHash, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !highestHashFound {
|
||||
return nil, false, nil
|
||||
return nil, err
|
||||
}
|
||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if highestHashIndex == 0 ||
|
||||
@@ -122,7 +104,7 @@ func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(
|
||||
// an endless loop, we explicitly stop the loop in such situation.
|
||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
||||
|
||||
return highestHash, true, nil
|
||||
return highestHash, nil
|
||||
}
|
||||
|
||||
locatorHashAboveHighestHash := highestHash
|
||||
@@ -132,7 +114,7 @@ func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(
|
||||
|
||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -177,35 +159,27 @@ func (flow *handleRelayInvsFlow) findHighestHashIndex(
|
||||
return highestHashIndex, nil
|
||||
}
|
||||
|
||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleRelayInvsFlow) fetchHighestHash(
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, error) {
|
||||
|
||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
||||
highestHash := message.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, true, nil
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
||||
return nil, false, nil
|
||||
default:
|
||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
ibdBlockLocatorHighestHashMessage, ok := message.(*appmessage.MsgIBDBlockLocatorHighestHash)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
||||
}
|
||||
highestHash := ibdBlockLocatorHighestHashMessage.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
@@ -221,6 +195,7 @@ func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externa
|
||||
// headers
|
||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
||||
errChan := make(chan error)
|
||||
doneChan := make(chan interface{})
|
||||
spawn("handleRelayInvsFlow-downloadHeaders", func() {
|
||||
for {
|
||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
||||
@@ -229,7 +204,7 @@ func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externa
|
||||
return
|
||||
}
|
||||
if doneIBD {
|
||||
close(blockHeadersMessageChan)
|
||||
doneChan <- struct{}{}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -245,10 +220,7 @@ func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externa
|
||||
|
||||
for {
|
||||
select {
|
||||
case blockHeadersMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
case blockHeadersMessage := <-blockHeadersMessageChan:
|
||||
for _, header := range blockHeadersMessage.BlockHeaders {
|
||||
err = flow.processHeader(header)
|
||||
if err != nil {
|
||||
@@ -257,6 +229,8 @@ func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externa
|
||||
}
|
||||
case err := <-errChan:
|
||||
return err
|
||||
case <-doneChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -410,11 +384,6 @@ func (flow *handleRelayInvsFlow) fetchMissingUTXOSet(pruningPointHash *externala
|
||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
|
||||
}
|
||||
|
||||
err = flow.OnPruningPointUTXOSetOverride()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -499,13 +468,6 @@ func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.Do
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(hashes) == 0 {
|
||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
||||
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
||||
log.Debugf("No missing block body hashes found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
@@ -538,11 +500,6 @@ func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.Do
|
||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// SendVirtualSelectedParentInvContext is the interface for the context needed for the SendVirtualSelectedParentInv flow.
|
||||
type SendVirtualSelectedParentInvContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// SendVirtualSelectedParentInv sends a peer the selected parent hash of the virtual
|
||||
func SendVirtualSelectedParentInv(context SendVirtualSelectedParentInvContext,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
virtualSelectedParent, err := context.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sending virtual selected parent hash %s to peer %s", virtualSelectedParent, peer)
|
||||
|
||||
virtualSelectedParentInv := appmessage.NewMsgInvBlock(virtualSelectedParent)
|
||||
return outgoingRoute.Enqueue(virtualSelectedParentInv)
|
||||
}
|
||||
@@ -89,10 +89,7 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
|
||||
}
|
||||
|
||||
if peerAddress != nil {
|
||||
err := context.AddressManager().AddAddresses(peerAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
context.AddressManager().AddAddresses(peerAddress)
|
||||
}
|
||||
return peer, nil
|
||||
}
|
||||
@@ -107,7 +104,7 @@ func handleError(err error, flowName string, isStopping *uint32, errChan chan er
|
||||
return
|
||||
}
|
||||
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
if protocolErr := &(protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
log.Errorf("Handshake protocol error from %s: %s", flowName, err)
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
errChan <- err
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -60,7 +60,7 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
|
||||
}
|
||||
|
||||
if !allowSelfConnections && flow.NetAdapter().ID().IsEqual(msgVersion.ID) {
|
||||
return nil, protocolerrors.New(false, "connected to self")
|
||||
return nil, protocolerrors.New(true, "connected to self")
|
||||
}
|
||||
|
||||
// Disconnect and ban peers from a different network
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
// SendPingsContext is the interface for the context needed for the SendPings flow.
|
||||
type SendPingsContext interface {
|
||||
ShutdownChan() <-chan struct{}
|
||||
}
|
||||
|
||||
type sendPingsFlow struct {
|
||||
@@ -40,13 +39,7 @@ func (flow *sendPingsFlow) start() error {
|
||||
ticker := time.NewTicker(pingInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-flow.ShutdownChan():
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
for range ticker.C {
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -69,4 +62,5 @@ func (flow *sendPingsFlow) start() error {
|
||||
}
|
||||
flow.peer.SetPingIdle()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func checkFlowError(t *testing.T, err error, isProtocolError bool, shouldBan bool, contains string) {
|
||||
pErr := protocolerrors.ProtocolError{}
|
||||
pErr := &protocolerrors.ProtocolError{}
|
||||
if errors.As(err, &pErr) != isProtocolError {
|
||||
t.Fatalf("Unexepcted error %+v", err)
|
||||
}
|
||||
|
||||
@@ -2,15 +2,10 @@ package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
|
||||
@@ -23,21 +18,10 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var headerOnlyBlock = &externalapi.DomainBlock{
|
||||
Header: blockheader.NewImmutableBlockHeader(
|
||||
constants.MaxBlockVersion,
|
||||
[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
}
|
||||
|
||||
var orphanBlock = &externalapi.DomainBlock{
|
||||
Header: blockheader.NewImmutableBlockHeader(
|
||||
constants.MaxBlockVersion,
|
||||
@@ -49,7 +33,6 @@ var orphanBlock = &externalapi.DomainBlock{
|
||||
0,
|
||||
0,
|
||||
),
|
||||
Transactions: []*externalapi.DomainTransaction{{}},
|
||||
}
|
||||
|
||||
var validPruningPointBlock = &externalapi.DomainBlock{
|
||||
@@ -63,7 +46,6 @@ var validPruningPointBlock = &externalapi.DomainBlock{
|
||||
0,
|
||||
0,
|
||||
),
|
||||
Transactions: []*externalapi.DomainTransaction{{}},
|
||||
}
|
||||
|
||||
var invalidPruningPointBlock = &externalapi.DomainBlock{
|
||||
@@ -77,7 +59,6 @@ var invalidPruningPointBlock = &externalapi.DomainBlock{
|
||||
0,
|
||||
0,
|
||||
),
|
||||
Transactions: []*externalapi.DomainTransaction{{}},
|
||||
}
|
||||
|
||||
var unexpectedIBDBlock = &externalapi.DomainBlock{
|
||||
@@ -91,7 +72,6 @@ var unexpectedIBDBlock = &externalapi.DomainBlock{
|
||||
0,
|
||||
0,
|
||||
),
|
||||
Transactions: []*externalapi.DomainTransaction{{}},
|
||||
}
|
||||
|
||||
var invalidBlock = &externalapi.DomainBlock{
|
||||
@@ -105,7 +85,6 @@ var invalidBlock = &externalapi.DomainBlock{
|
||||
0,
|
||||
0,
|
||||
),
|
||||
Transactions: []*externalapi.DomainTransaction{{}},
|
||||
}
|
||||
|
||||
var unknownBlockHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})
|
||||
@@ -114,7 +93,6 @@ var validPruningPointHash = consensushashing.BlockHash(validPruningPointBlock)
|
||||
var invalidBlockHash = consensushashing.BlockHash(invalidBlock)
|
||||
var invalidPruningPointHash = consensushashing.BlockHash(invalidPruningPointBlock)
|
||||
var orphanBlockHash = consensushashing.BlockHash(orphanBlock)
|
||||
var headerOnlyBlockHash = consensushashing.BlockHash(headerOnlyBlock)
|
||||
|
||||
type fakeRelayInvsContext struct {
|
||||
testName string
|
||||
@@ -127,23 +105,6 @@ type fakeRelayInvsContext struct {
|
||||
validateAndInsertImportedPruningPointResponse error
|
||||
getBlockInfoResponse *externalapi.BlockInfo
|
||||
validateAndInsertBlockResponse error
|
||||
rwLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetBlockRelations(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, *externalapi.DomainHash, []*externalapi.DomainHash, error) {
|
||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) OnPruningPointUTXOSetOverride() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetVirtualUTXOs(expectedVirtualParents []*externalapi.DomainHash, fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) {
|
||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) Anticone(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) BuildBlock(coinbaseData *externalapi.DomainCoinbaseData, transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, error) {
|
||||
@@ -167,8 +128,6 @@ func (f *fakeRelayInvsContext) GetBlockHeader(blockHash *externalapi.DomainHash)
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalapi.BlockInfo, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
if f.getBlockInfoResponse != nil {
|
||||
return f.getBlockInfoResponse, nil
|
||||
}
|
||||
@@ -182,7 +141,7 @@ func (f *fakeRelayInvsContext) GetBlockAcceptanceData(blockHash *externalapi.Dom
|
||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetHashesBetween(lowHash, highHash *externalapi.DomainHash, maxBlueScoreDifference uint64) (hashes []*externalapi.DomainHash, actualHighHash *externalapi.DomainHash, err error) {
|
||||
func (f *fakeRelayInvsContext) GetHashesBetween(lowHash, highHash *externalapi.DomainHash, maxBlueScoreDifference uint64) ([]*externalapi.DomainHash, error) {
|
||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||
}
|
||||
|
||||
@@ -208,8 +167,6 @@ func (f *fakeRelayInvsContext) AppendImportedPruningPointUTXOs(outpointAndUTXOEn
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) ValidateAndInsertImportedPruningPoint(newPruningPoint *externalapi.DomainBlock) error {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.validateAndInsertImportedPruningPointResponse
|
||||
}
|
||||
|
||||
@@ -222,16 +179,12 @@ func (f *fakeRelayInvsContext) CreateBlockLocator(lowHash, highHash *externalapi
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) CreateHeadersSelectedChainBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return externalapi.BlockLocator{
|
||||
f.params.GenesisHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) CreateFullHeadersSelectedChainBlockLocator() (externalapi.BlockLocator, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return externalapi.BlockLocator{
|
||||
f.params.GenesisHash,
|
||||
}, nil
|
||||
@@ -250,8 +203,6 @@ func (f *fakeRelayInvsContext) GetVirtualInfo() (*externalapi.VirtualInfo, error
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) IsValidPruningPoint(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.isValidPruningPointResponse, nil
|
||||
}
|
||||
|
||||
@@ -280,8 +231,6 @@ func (f *fakeRelayInvsContext) Domain() domain.Domain {
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) Config() *config.Config {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
@@ -320,59 +269,13 @@ func (f *fakeRelayInvsContext) IsIBDRunning() bool {
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.trySetIBDRunningResponse
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) UnsetIBDRunning() {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
close(f.finishedIBD)
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetValidateAndInsertBlockResponse(err error) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.validateAndInsertBlockResponse = err
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetValidateAndInsertImportedPruningPointResponse(err error) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.validateAndInsertImportedPruningPointResponse = err
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetGetBlockInfoResponse(info externalapi.BlockInfo) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.getBlockInfoResponse = &info
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetTrySetIBDRunningResponse(b bool) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.trySetIBDRunningResponse = b
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetIsValidPruningPointResponse(b bool) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.isValidPruningPointResponse = b
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetGenesisHeader() externalapi.BlockHeader {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.params.GenesisBlock.Header
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetFinishedIBDChan() chan struct{} {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.finishedIBD
|
||||
}
|
||||
|
||||
func TestHandleRelayInvs(t *testing.T) {
|
||||
triggerIBD := func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(consensushashing.BlockHash(orphanBlock)))
|
||||
@@ -386,7 +289,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes()))
|
||||
context.validateAndInsertBlockResponse = ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes())
|
||||
defer func() {
|
||||
context.validateAndInsertBlockResponse = nil
|
||||
}()
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
|
||||
if err != nil {
|
||||
@@ -436,10 +342,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
name: "sending a known invalid inv",
|
||||
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusInvalid,
|
||||
})
|
||||
}
|
||||
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(knownInvalidBlockHash))
|
||||
if err != nil {
|
||||
@@ -482,29 +388,6 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "got unrequested block",
|
||||
},
|
||||
{
|
||||
name: "sending header only block on relay",
|
||||
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(headerOnlyBlockHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(headerOnlyBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "block where expected block with body",
|
||||
},
|
||||
{
|
||||
name: "sending invalid block",
|
||||
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
@@ -519,7 +402,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(invalidBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -543,7 +426,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes()))
|
||||
context.validateAndInsertBlockResponse = ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes())
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -569,7 +452,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
{
|
||||
name: "starting IBD when peer is already in IBD",
|
||||
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
context.SetTrySetIBDRunningResponse(false)
|
||||
context.trySetIBDRunningResponse = false
|
||||
triggerIBD(t, incomingRoute, outgoingRoute, context)
|
||||
|
||||
checkNoActivity(t, outgoingRoute)
|
||||
@@ -675,15 +558,15 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(
|
||||
appmessage.NewBlockHeadersMessage(
|
||||
[]*appmessage.MsgBlockHeader{
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.GetGenesisHeader())},
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.params.GenesisBlock.Header)},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -698,10 +581,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
// Finish the IBD by sending DoneHeaders and send incompatible pruning point
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
@@ -715,7 +598,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
context.SetIsValidPruningPointResponse(false)
|
||||
context.isValidPruningPointResponse = false
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -747,11 +630,11 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrDuplicateBlock)
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrDuplicateBlock
|
||||
err = incomingRoute.Enqueue(
|
||||
appmessage.NewBlockHeadersMessage(
|
||||
[]*appmessage.MsgBlockHeader{
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.GetGenesisHeader())},
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.params.GenesisBlock.Header)},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -766,10 +649,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
// Finish the IBD by sending DoneHeaders and send incompatible pruning point
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
@@ -783,7 +666,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
context.SetIsValidPruningPointResponse(false)
|
||||
context.isValidPruningPointResponse = false
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -815,7 +698,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
|
||||
err = incomingRoute.Enqueue(
|
||||
appmessage.NewBlockHeadersMessage(
|
||||
[]*appmessage.MsgBlockHeader{
|
||||
@@ -855,10 +738,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -907,10 +790,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -923,7 +806,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
context.SetIsValidPruningPointResponse(false)
|
||||
context.isValidPruningPointResponse = false
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -957,10 +840,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1022,10 +905,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1085,10 +968,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1154,10 +1037,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1181,7 +1064,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
context.validateAndInsertImportedPruningPointResponse = ruleerrors.ErrBadMerkleRoot
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidPruningPointBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -1221,10 +1104,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1248,7 +1131,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrSuggestedPruningViolatesFinality)
|
||||
context.validateAndInsertImportedPruningPointResponse = ruleerrors.ErrSuggestedPruningViolatesFinality
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(validPruningPointBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -1285,10 +1168,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1364,10 +1247,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1441,10 +1324,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1484,7 +1367,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestIBDBlocks)
|
||||
|
||||
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -1497,7 +1380,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
|
||||
for _, test := range tests {
|
||||
|
||||
// This is done to avoid race condition
|
||||
@@ -1512,7 +1395,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
errChan := make(chan error)
|
||||
context := &fakeRelayInvsContext{
|
||||
testName: test.name,
|
||||
params: &consensusConfig.Params,
|
||||
params: params,
|
||||
finishedIBD: make(chan struct{}),
|
||||
|
||||
trySetIBDRunningResponse: true,
|
||||
@@ -1528,17 +1411,17 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
select {
|
||||
case err := <-errChan:
|
||||
checkFlowError(t, err, test.expectsProtocolError, test.expectsBan, test.expectsErrToContain)
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("waiting for error timed out after %s", 10*time.Second)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("waiting for error timed out after %s", time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-context.GetFinishedIBDChan():
|
||||
case <-context.finishedIBD:
|
||||
if !test.expectsIBDToFinish {
|
||||
t.Fatalf("IBD unexpecetedly finished")
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
case <-time.After(time.Second):
|
||||
if test.expectsIBDToFinish {
|
||||
t.Fatalf("IBD didn't finished after %d", time.Second)
|
||||
}
|
||||
@@ -1553,7 +1436,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
if !errors.Is(err, router.ErrRouteClosed) {
|
||||
t.Fatalf("unexpected error %+v", err)
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("waiting for flow to finish timed out after %s", time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/addressexchange"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type fakeReceiveAddressesContext struct{}
|
||||
@@ -20,7 +19,7 @@ func (f fakeReceiveAddressesContext) AddressManager() *addressmanager.AddressMan
|
||||
}
|
||||
|
||||
func TestReceiveAddressesErrors(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
|
||||
incomingRoute := router.NewRoute()
|
||||
outgoingRoute := router.NewRoute()
|
||||
peer := peerpkg.New(nil)
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
package testing
|
||||
|
||||
// Because of a bug in Go coverage fails if you have packages with test files only. See https://github.com/golang/go/issues/27333
|
||||
// So this is a dummy non-test go file in the package.
|
||||
@@ -191,7 +191,7 @@ func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransact
|
||||
continue
|
||||
}
|
||||
|
||||
return protocolerrors.Errorf(true, "rejected transaction %s: %s", txID, ruleErr)
|
||||
return protocolerrors.Errorf(true, "rejected transaction %s", txID)
|
||||
}
|
||||
err = flow.broadcastAcceptedTransactions([]*externalapi.DomainTransactionID{txID})
|
||||
if err != nil {
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -2,9 +2,6 @@ package protocol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
|
||||
@@ -20,9 +17,7 @@ import (
|
||||
|
||||
// Manager manages the p2p protocol
|
||||
type Manager struct {
|
||||
context *flowcontext.FlowContext
|
||||
routersWaitGroup sync.WaitGroup
|
||||
isClosed uint32
|
||||
context *flowcontext.FlowContext
|
||||
}
|
||||
|
||||
// NewManager creates a new instance of the p2p protocol manager
|
||||
@@ -37,18 +32,6 @@ func NewManager(cfg *config.Config, domain domain.Domain, netAdapter *netadapter
|
||||
return &manager, nil
|
||||
}
|
||||
|
||||
// Close closes the protocol manager and waits until all p2p flows
|
||||
// finish.
|
||||
func (m *Manager) Close() {
|
||||
if !atomic.CompareAndSwapUint32(&m.isClosed, 0, 1) {
|
||||
panic(errors.New("The protocol manager was already closed"))
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&m.isClosed, 1)
|
||||
m.context.Close()
|
||||
m.routersWaitGroup.Wait()
|
||||
}
|
||||
|
||||
// Peers returns the currently active peers
|
||||
func (m *Manager) Peers() []*peerpkg.Peer {
|
||||
return m.context.Peers()
|
||||
@@ -70,13 +53,11 @@ func (m *Manager) AddBlock(block *externalapi.DomainBlock) error {
|
||||
return m.context.AddBlock(block)
|
||||
}
|
||||
|
||||
func (m *Manager) runFlows(flows []*flow, peer *peerpkg.Peer, errChan <-chan error, flowsWaitGroup *sync.WaitGroup) error {
|
||||
flowsWaitGroup.Add(len(flows))
|
||||
func (m *Manager) runFlows(flows []*flow, peer *peerpkg.Peer, errChan <-chan error) error {
|
||||
for _, flow := range flows {
|
||||
executeFunc := flow.executeFunc // extract to new variable so that it's not overwritten
|
||||
spawn(fmt.Sprintf("flow-%s", flow.name), func() {
|
||||
executeFunc(peer)
|
||||
flowsWaitGroup.Done()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -88,11 +69,6 @@ func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowconte
|
||||
m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
|
||||
func (m *Manager) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler flowcontext.OnPruningPointUTXOSetOverrideHandler) {
|
||||
m.context.SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler)
|
||||
}
|
||||
|
||||
// SetOnTransactionAddedToMempoolHandler sets the onTransactionAddedToMempool handler
|
||||
func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler flowcontext.OnTransactionAddedToMempoolHandler) {
|
||||
m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler)
|
||||
|
||||
@@ -2,6 +2,8 @@ package peer
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -2,8 +2,6 @@ package protocol
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/rejects"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
@@ -41,13 +39,6 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
// After flows were registered - spawn a new thread that will wait for connection to finish initializing
|
||||
// and start receiving messages
|
||||
spawn("routerInitializer-runFlows", func() {
|
||||
m.routersWaitGroup.Add(1)
|
||||
defer m.routersWaitGroup.Done()
|
||||
|
||||
if atomic.LoadUint32(&m.isClosed) == 1 {
|
||||
panic(errors.Errorf("tried to initialize router when the protocol manager is closed"))
|
||||
}
|
||||
|
||||
isBanned, err := m.context.ConnectionManager().IsBanned(netConnection)
|
||||
if err != nil && !errors.Is(err, addressmanager.ErrAddressNotFound) {
|
||||
panic(err)
|
||||
@@ -66,47 +57,29 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
|
||||
peer, err := handshake.HandleHandshake(m.context, netConnection, receiveVersionRoute,
|
||||
sendVersionRoute, router.OutgoingRoute())
|
||||
|
||||
if err != nil {
|
||||
// non-blocking read from channel
|
||||
select {
|
||||
case innerError := <-errChan:
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
m.handleError(innerError, netConnection, router.OutgoingRoute())
|
||||
} else {
|
||||
log.Errorf("Peer %s sent invalid message: %s", netConnection, innerError)
|
||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||
}
|
||||
default:
|
||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||
}
|
||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||
return
|
||||
}
|
||||
defer m.context.RemoveFromPeers(peer)
|
||||
|
||||
removeHandshakeRoutes(router)
|
||||
|
||||
flowsWaitGroup := &sync.WaitGroup{}
|
||||
err = m.runFlows(flows, peer, errChan, flowsWaitGroup)
|
||||
err = m.runFlows(flows, peer, errChan)
|
||||
if err != nil {
|
||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||
// We call `flowsWaitGroup.Wait()` in two places instead of deferring, because
|
||||
// we already defer `m.routersWaitGroup.Done()`, so we try to avoid error prone
|
||||
// and confusing use of multiple dependent defers.
|
||||
flowsWaitGroup.Wait()
|
||||
return
|
||||
}
|
||||
flowsWaitGroup.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection, outgoingRoute *routerpkg.Route) {
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
if protocolErr := &(protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
if !m.context.Config().DisableBanning && protocolErr.ShouldBan {
|
||||
log.Warnf("Banning %s (reason: %s)", netConnection, protocolErr.Cause)
|
||||
|
||||
err := m.context.ConnectionManager().Ban(netConnection)
|
||||
if !errors.Is(err, connmanager.ErrCannotBanPermanent) {
|
||||
if err != nil && !errors.Is(err, addressmanager.ErrAddressNotFound) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -115,7 +88,7 @@ func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
log.Infof("Disconnecting from %s (reason: %s)", netConnection, protocolErr.Cause)
|
||||
log.Debugf("Disconnecting from %s (reason: %s)", netConnection, protocolErr.Cause)
|
||||
netConnection.Disconnect()
|
||||
return
|
||||
}
|
||||
@@ -162,16 +135,11 @@ func (m *Manager) registerBlockRelayFlows(router *routerpkg.Router, isStopping *
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
|
||||
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.SendVirtualSelectedParentInv(m.context, outgoingRoute, peer)
|
||||
}),
|
||||
|
||||
m.registerFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator, appmessage.CmdIBDBlock,
|
||||
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdBlockHeaders, appmessage.CmdPruningPointHash, appmessage.CmdIBDBlockLocatorHighestHash,
|
||||
appmessage.CmdIBDBlockLocatorHighestHashNotFound, appmessage.CmdDonePruningPointUTXOSetChunks},
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayInvs(m.context, incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
@@ -251,7 +219,7 @@ func (m *Manager) registerTransactionRelayFlow(router *routerpkg.Router, isStopp
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
|
||||
m.registerFlow("HandleRelayedTransactions", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRelayedTransactions(m.context, incomingRoute, outgoingRoute)
|
||||
@@ -287,24 +255,6 @@ func (m *Manager) registerFlow(name string, router *routerpkg.Router, messageTyp
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return m.registerFlowForRoute(route, name, isStopping, errChan, initializeFunc)
|
||||
}
|
||||
|
||||
func (m *Manager) registerFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
|
||||
messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc flowInitializeFunc) *flow {
|
||||
|
||||
route, err := router.AddIncomingRouteWithCapacity(capacity, messageTypes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return m.registerFlowForRoute(route, name, isStopping, errChan, initializeFunc)
|
||||
}
|
||||
|
||||
func (m *Manager) registerFlowForRoute(route *routerpkg.Route, name string, isStopping *uint32,
|
||||
errChan chan error, initializeFunc flowInitializeFunc) *flow {
|
||||
|
||||
return &flow{
|
||||
name: name,
|
||||
executeFunc: func(peer *peerpkg.Peer) {
|
||||
|
||||
@@ -12,19 +12,19 @@ type ProtocolError struct {
|
||||
Cause error
|
||||
}
|
||||
|
||||
func (e ProtocolError) Error() string {
|
||||
func (e *ProtocolError) Error() string {
|
||||
return e.Cause.Error()
|
||||
}
|
||||
|
||||
// Unwrap returns the cause of ProtocolError, to be used with `errors.Unwrap()`
|
||||
func (e ProtocolError) Unwrap() error {
|
||||
func (e *ProtocolError) Unwrap() error {
|
||||
return e.Cause
|
||||
}
|
||||
|
||||
// Errorf formats according to a format specifier and returns the string
|
||||
// as a ProtocolError.
|
||||
func Errorf(shouldBan bool, format string, args ...interface{}) error {
|
||||
return ProtocolError{
|
||||
return &ProtocolError{
|
||||
ShouldBan: shouldBan,
|
||||
Cause: errors.Errorf(format, args...),
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func Errorf(shouldBan bool, format string, args ...interface{}) error {
|
||||
// New returns a ProtocolError with the supplied message.
|
||||
// New also records the stack trace at the point it was called.
|
||||
func New(shouldBan bool, message string) error {
|
||||
return ProtocolError{
|
||||
return &ProtocolError{
|
||||
ShouldBan: shouldBan,
|
||||
Cause: errors.New(message),
|
||||
}
|
||||
@@ -41,7 +41,7 @@ func New(shouldBan bool, message string) error {
|
||||
|
||||
// Wrap wraps the given error and returns it as a ProtocolError.
|
||||
func Wrap(shouldBan bool, err error, message string) error {
|
||||
return ProtocolError{
|
||||
return &ProtocolError{
|
||||
ShouldBan: shouldBan,
|
||||
Cause: errors.Wrap(err, message),
|
||||
}
|
||||
@@ -49,7 +49,7 @@ func Wrap(shouldBan bool, err error, message string) error {
|
||||
|
||||
// Wrapf wraps the given error with the given format and returns it as a ProtocolError.
|
||||
func Wrapf(shouldBan bool, err error, format string, args ...interface{}) error {
|
||||
return ProtocolError{
|
||||
return &ProtocolError{
|
||||
ShouldBan: shouldBan,
|
||||
Cause: errors.Wrapf(err, format, args...),
|
||||
}
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("RPCS")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.RPCS)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -69,31 +69,10 @@ func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, blockIns
|
||||
return err
|
||||
}
|
||||
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
|
||||
err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
|
||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(appmessage.DomainBlockToMsgBlock(block))
|
||||
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
|
||||
// resets due to pruning point change via IBD.
|
||||
func (m *Manager) NotifyPruningPointUTXOSetOverride() error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyPruningPointUTXOSetOverride")
|
||||
defer onEnd()
|
||||
|
||||
if m.context.Config.UTXOIndex {
|
||||
err := m.notifyPruningPointUTXOSetOverride()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyFinalityConflict notifies the manager that there's a finality conflict in the DAG
|
||||
func (m *Manager) NotifyFinalityConflict(violatingBlockHash string) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyFinalityConflict")
|
||||
@@ -116,25 +95,13 @@ func (m *Manager) notifyUTXOsChanged(blockInsertionResult *externalapi.BlockInse
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyUTXOsChanged")
|
||||
defer onEnd()
|
||||
|
||||
utxoIndexChanges, err := m.context.UTXOIndex.Update(blockInsertionResult)
|
||||
utxoIndexChanges, err := m.context.UTXOIndex.Update(blockInsertionResult.VirtualSelectedParentChainChanges)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.context.NotificationManager.NotifyUTXOsChanged(utxoIndexChanges)
|
||||
}
|
||||
|
||||
func (m *Manager) notifyPruningPointUTXOSetOverride() error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.notifyPruningPointUTXOSetOverride")
|
||||
defer onEnd()
|
||||
|
||||
err := m.context.UTXOIndex.Reset()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.context.NotificationManager.NotifyPruningPointUTXOSetOverride()
|
||||
}
|
||||
|
||||
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged() error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentBlueScoreChanged")
|
||||
defer onEnd()
|
||||
|
||||
@@ -35,15 +35,9 @@ var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdShutDownRequestMessage: rpchandlers.HandleShutDown,
|
||||
appmessage.CmdGetHeadersRequestMessage: rpchandlers.HandleGetHeaders,
|
||||
appmessage.CmdNotifyUTXOsChangedRequestMessage: rpchandlers.HandleNotifyUTXOsChanged,
|
||||
appmessage.CmdStopNotifyingUTXOsChangedRequestMessage: rpchandlers.HandleStopNotifyingUTXOsChanged,
|
||||
appmessage.CmdGetUTXOsByAddressesRequestMessage: rpchandlers.HandleGetUTXOsByAddresses,
|
||||
appmessage.CmdGetVirtualSelectedParentBlueScoreRequestMessage: rpchandlers.HandleGetVirtualSelectedParentBlueScore,
|
||||
appmessage.CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualSelectedParentBlueScoreChanged,
|
||||
appmessage.CmdBanRequestMessage: rpchandlers.HandleBan,
|
||||
appmessage.CmdUnbanRequestMessage: rpchandlers.HandleUnban,
|
||||
appmessage.CmdGetInfoRequestMessage: rpchandlers.HandleGetInfo,
|
||||
appmessage.CmdNotifyPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleNotifyPruningPointUTXOSetOverrideRequest,
|
||||
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
|
||||
}
|
||||
|
||||
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {
|
||||
|
||||
@@ -2,6 +2,8 @@ package rpccontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("RPCS")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.RPCS)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -30,9 +30,8 @@ type NotificationListener struct {
|
||||
propagateFinalityConflictResolvedNotifications bool
|
||||
propagateUTXOsChangedNotifications bool
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications bool
|
||||
propagatePruningPointUTXOSetOverrideNotifications bool
|
||||
|
||||
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
||||
propagateUTXOsChangedNotificationAddresses []*UTXOsChangedNotificationAddress
|
||||
}
|
||||
|
||||
// NewNotificationManager creates a new NotificationManager
|
||||
@@ -181,23 +180,6 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentBlueScoreChanged(
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverride notifies the notification manager that the UTXO index
|
||||
// reset due to pruning point change via IBD.
|
||||
func (nm *NotificationManager) NotifyPruningPointUTXOSetOverride() error {
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagatePruningPointUTXOSetOverrideNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(appmessage.NewPruningPointUTXOSetOverrideNotificationMessage())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newNotificationListener() *NotificationListener {
|
||||
return &NotificationListener{
|
||||
propagateBlockAddedNotifications: false,
|
||||
@@ -206,7 +188,6 @@ func newNotificationListener() *NotificationListener {
|
||||
propagateFinalityConflictResolvedNotifications: false,
|
||||
propagateUTXOsChangedNotifications: false,
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications: false,
|
||||
propagatePruningPointUTXOSetOverrideNotifications: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,70 +216,34 @@ func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications()
|
||||
}
|
||||
|
||||
// PropagateUTXOsChangedNotifications instructs the listener to send UTXOs changed notifications
|
||||
// to the remote listener for the given addresses. Subsequent calls instruct the listener to
|
||||
// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses
|
||||
// are ignored.
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
nl.propagateUTXOsChangedNotifications = true
|
||||
nl.propagateUTXOsChangedNotificationAddresses =
|
||||
make(map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress, len(addresses))
|
||||
}
|
||||
|
||||
for _, address := range addresses {
|
||||
nl.propagateUTXOsChangedNotificationAddresses[address.ScriptPublicKeyString] = address
|
||||
}
|
||||
}
|
||||
|
||||
// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs
|
||||
// changed notifications to the remote listener for the given addresses. Addresses for which
|
||||
// notifications are not currently sent are ignored.
|
||||
func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
return
|
||||
}
|
||||
|
||||
for _, address := range addresses {
|
||||
delete(nl.propagateUTXOsChangedNotificationAddresses, address.ScriptPublicKeyString)
|
||||
}
|
||||
nl.propagateUTXOsChangedNotifications = true
|
||||
nl.propagateUTXOsChangedNotificationAddresses = addresses
|
||||
}
|
||||
|
||||
func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
||||
utxoChanges *utxoindex.UTXOChanges) *appmessage.UTXOsChangedNotificationMessage {
|
||||
|
||||
// As an optimization, we iterate over the smaller set (O(n)) among the two below
|
||||
// and check existence over the larger set (O(1))
|
||||
utxoChangesSize := len(utxoChanges.Added) + len(utxoChanges.Removed)
|
||||
addressesSize := len(nl.propagateUTXOsChangedNotificationAddresses)
|
||||
|
||||
notification := &appmessage.UTXOsChangedNotificationMessage{}
|
||||
if utxoChangesSize < addressesSize {
|
||||
for scriptPublicKeyString, addedPairs := range utxoChanges.Added {
|
||||
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
|
||||
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
|
||||
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
|
||||
notification.Added = append(notification.Added,
|
||||
ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)...)
|
||||
}
|
||||
for scriptPublicKeyString, removedOutpoints := range utxoChanges.Removed {
|
||||
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
|
||||
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
|
||||
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
||||
for outpoint := range removedOutpoints {
|
||||
notification.Removed = append(notification.Removed, &appmessage.UTXOsByAddressesEntry{
|
||||
Address: listenerAddress.Address,
|
||||
Outpoint: &appmessage.RPCOutpoint{
|
||||
TransactionID: outpoint.TransactionID.String(),
|
||||
Index: outpoint.Index,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return notification
|
||||
}
|
||||
|
||||
@@ -307,15 +252,3 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
||||
func (nl *NotificationListener) PropagateVirtualSelectedParentBlueScoreChangedNotifications() {
|
||||
nl.propagateVirtualSelectedParentBlueScoreChangedNotifications = true
|
||||
}
|
||||
|
||||
// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications
|
||||
// to the remote listener.
|
||||
func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() {
|
||||
nl.propagatePruningPointUTXOSetOverrideNotifications = true
|
||||
}
|
||||
|
||||
// StopPropagatingPruningPointUTXOSetOverrideNotifications instructs the listener to stop sending pruning
|
||||
// point UTXO set override notifications to the remote listener.
|
||||
func (nl *NotificationListener) StopPropagatingPruningPointUTXOSetOverrideNotifications() {
|
||||
nl.propagatePruningPointUTXOSetOverrideNotifications = false
|
||||
}
|
||||
|
||||
@@ -2,9 +2,6 @@ package rpccontext
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||
@@ -24,50 +21,10 @@ func ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(address string, pair
|
||||
UTXOEntry: &appmessage.RPCUTXOEntry{
|
||||
Amount: utxoEntry.Amount(),
|
||||
ScriptPublicKey: &appmessage.RPCScriptPublicKey{Script: hex.EncodeToString(utxoEntry.ScriptPublicKey().Script), Version: utxoEntry.ScriptPublicKey().Version},
|
||||
BlockDAAScore: utxoEntry.BlockDAAScore(),
|
||||
BlockBlueScore: utxoEntry.BlockBlueScore(),
|
||||
IsCoinbase: utxoEntry.IsCoinbase(),
|
||||
},
|
||||
})
|
||||
}
|
||||
return utxosByAddressesEntries
|
||||
}
|
||||
|
||||
// convertUTXOOutpointsToUTXOsByAddressesEntries converts
|
||||
// UTXOOutpoints to a slice of UTXOsByAddressesEntry
|
||||
func convertUTXOOutpointsToUTXOsByAddressesEntries(address string, outpoints utxoindex.UTXOOutpoints) []*appmessage.UTXOsByAddressesEntry {
|
||||
utxosByAddressesEntries := make([]*appmessage.UTXOsByAddressesEntry, 0, len(outpoints))
|
||||
for outpoint := range outpoints {
|
||||
utxosByAddressesEntries = append(utxosByAddressesEntries, &appmessage.UTXOsByAddressesEntry{
|
||||
Address: address,
|
||||
Outpoint: &appmessage.RPCOutpoint{
|
||||
TransactionID: outpoint.TransactionID.String(),
|
||||
Index: outpoint.Index,
|
||||
},
|
||||
})
|
||||
}
|
||||
return utxosByAddressesEntries
|
||||
}
|
||||
|
||||
// ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings
|
||||
// to UTXOsChangedNotificationAddresses
|
||||
func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
|
||||
addressStrings []string) ([]*UTXOsChangedNotificationAddress, error) {
|
||||
|
||||
addresses := make([]*UTXOsChangedNotificationAddress, len(addressStrings))
|
||||
for i, addressString := range addressStrings {
|
||||
address, err := util.DecodeAddress(addressString, ctx.Config.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Could not decode address '%s': %s", addressString, err)
|
||||
}
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||
}
|
||||
scriptPublicKeyString := utxoindex.ConvertScriptPublicKeyToString(scriptPublicKey)
|
||||
addresses[i] = &UTXOsChangedNotificationAddress{
|
||||
Address: addressString,
|
||||
ScriptPublicKeyString: scriptPublicKeyString,
|
||||
}
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
@@ -2,25 +2,79 @@ package rpccontext
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
"math"
|
||||
"math/big"
|
||||
|
||||
difficultyPackage "github.com/kaspanet/kaspad/util/difficulty"
|
||||
"github.com/pkg/errors"
|
||||
"strconv"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/estimatedsize"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/pointers"
|
||||
)
|
||||
|
||||
// ErrBuildBlockVerboseDataInvalidBlock indicates that a block that was given to BuildBlockVerboseData is invalid.
|
||||
var ErrBuildBlockVerboseDataInvalidBlock = errors.New("ErrBuildBlockVerboseDataInvalidBlock")
|
||||
// BuildBlockVerboseData builds a BlockVerboseData from the given block.
|
||||
func (ctx *Context) BuildBlockVerboseData(blockHeader externalapi.BlockHeader, includeTransactionVerboseData bool) (*appmessage.BlockVerboseData, error) {
|
||||
hash := consensushashing.HeaderHash(blockHeader)
|
||||
|
||||
blockInfo, err := ctx.Domain.Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &appmessage.BlockVerboseData{
|
||||
Hash: hash.String(),
|
||||
Version: blockHeader.Version(),
|
||||
VersionHex: fmt.Sprintf("%08x", blockHeader.Version()),
|
||||
HashMerkleRoot: blockHeader.HashMerkleRoot().String(),
|
||||
AcceptedIDMerkleRoot: blockHeader.AcceptedIDMerkleRoot().String(),
|
||||
UTXOCommitment: blockHeader.UTXOCommitment().String(),
|
||||
ParentHashes: hashes.ToStrings(blockHeader.ParentHashes()),
|
||||
Nonce: blockHeader.Nonce(),
|
||||
Time: blockHeader.TimeInMilliseconds(),
|
||||
Bits: strconv.FormatInt(int64(blockHeader.Bits()), 16),
|
||||
Difficulty: ctx.GetDifficultyRatio(blockHeader.Bits(), ctx.Config.ActiveNetParams),
|
||||
BlueScore: blockInfo.BlueScore,
|
||||
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
||||
block, err := ctx.Domain.Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIDs := make([]string, len(block.Transactions))
|
||||
for i, tx := range block.Transactions {
|
||||
txIDs[i] = consensushashing.TransactionID(tx).String()
|
||||
}
|
||||
result.TxIDs = txIDs
|
||||
|
||||
if includeTransactionVerboseData {
|
||||
transactionVerboseData := make([]*appmessage.TransactionVerboseData, len(block.Transactions))
|
||||
for i, tx := range block.Transactions {
|
||||
txID := consensushashing.TransactionID(tx).String()
|
||||
data, err := ctx.BuildTransactionVerboseData(tx, txID, blockHeader, hash.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactionVerboseData[i] = data
|
||||
}
|
||||
result.TransactionVerboseData = transactionVerboseData
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetDifficultyRatio returns the proof-of-work difficulty as a multiple of the
|
||||
// minimum difficulty using the passed bits field from the header of a block.
|
||||
@@ -29,7 +83,7 @@ func (ctx *Context) GetDifficultyRatio(bits uint32, params *dagconfig.Params) fl
|
||||
// converted back to a number. Note this is not the same as the proof of
|
||||
// work limit directly because the block difficulty is encoded in a block
|
||||
// with the compact form which loses precision.
|
||||
target := difficultyPackage.CompactToBig(bits)
|
||||
target := difficulty.CompactToBig(bits)
|
||||
|
||||
difficulty := new(big.Rat).SetFrac(params.PowMax, target)
|
||||
diff, _ := difficulty.Float64()
|
||||
@@ -40,128 +94,102 @@ func (ctx *Context) GetDifficultyRatio(bits uint32, params *dagconfig.Params) fl
|
||||
return diff
|
||||
}
|
||||
|
||||
// PopulateBlockWithVerboseData populates the given `block` with verbose
|
||||
// data from `domainBlockHeader` and optionally from `domainBlock`
|
||||
func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, domainBlockHeader externalapi.BlockHeader,
|
||||
domainBlock *externalapi.DomainBlock, includeTransactionVerboseData bool) error {
|
||||
// BuildTransactionVerboseData builds a TransactionVerboseData from
|
||||
// the given parameters
|
||||
func (ctx *Context) BuildTransactionVerboseData(tx *externalapi.DomainTransaction, txID string,
|
||||
blockHeader externalapi.BlockHeader, blockHash string) (
|
||||
*appmessage.TransactionVerboseData, error) {
|
||||
|
||||
blockHash := consensushashing.HeaderHash(domainBlockHeader)
|
||||
|
||||
blockInfo, err := ctx.Domain.Consensus().GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
var payloadHash string
|
||||
if tx.SubnetworkID != subnetworks.SubnetworkIDNative {
|
||||
payloadHash = tx.PayloadHash.String()
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus == externalapi.StatusInvalid {
|
||||
return errors.Wrap(ErrBuildBlockVerboseDataInvalidBlock, "cannot build verbose data for "+
|
||||
"invalid block")
|
||||
txReply := &appmessage.TransactionVerboseData{
|
||||
TxID: txID,
|
||||
Hash: consensushashing.TransactionHash(tx).String(),
|
||||
Size: estimatedsize.TransactionEstimatedSerializedSize(tx),
|
||||
TransactionVerboseInputs: ctx.buildTransactionVerboseInputs(tx),
|
||||
TransactionVerboseOutputs: ctx.buildTransactionVerboseOutputs(tx, nil),
|
||||
Version: tx.Version,
|
||||
LockTime: tx.LockTime,
|
||||
SubnetworkID: tx.SubnetworkID.String(),
|
||||
Gas: tx.Gas,
|
||||
PayloadHash: payloadHash,
|
||||
Payload: hex.EncodeToString(tx.Payload),
|
||||
}
|
||||
|
||||
_, selectedParentHash, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
if blockHeader != nil {
|
||||
txReply.Time = uint64(blockHeader.TimeInMilliseconds())
|
||||
txReply.BlockTime = uint64(blockHeader.TimeInMilliseconds())
|
||||
txReply.BlockHash = blockHash
|
||||
}
|
||||
|
||||
block.VerboseData = &appmessage.RPCBlockVerboseData{
|
||||
Hash: blockHash.String(),
|
||||
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
|
||||
ChildrenHashes: hashes.ToStrings(childrenHashes),
|
||||
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
|
||||
BlueScore: blockInfo.BlueScore,
|
||||
}
|
||||
// selectedParentHash will be nil in the genesis block
|
||||
if selectedParentHash != nil {
|
||||
block.VerboseData.SelectedParentHash = selectedParentHash.String()
|
||||
}
|
||||
return txReply, nil
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return nil
|
||||
}
|
||||
func (ctx *Context) buildTransactionVerboseInputs(tx *externalapi.DomainTransaction) []*appmessage.TransactionVerboseInput {
|
||||
inputs := make([]*appmessage.TransactionVerboseInput, len(tx.Inputs))
|
||||
for i, transactionInput := range tx.Inputs {
|
||||
// The disassembled string will contain [error] inline
|
||||
// if the script doesn't fully parse, so ignore the
|
||||
// error here.
|
||||
disbuf, _ := txscript.DisasmString(constants.MaxScriptPublicKeyVersion, transactionInput.SignatureScript)
|
||||
|
||||
// Get the block if we didn't receive it previously
|
||||
if domainBlock == nil {
|
||||
domainBlock, err = ctx.Domain.Consensus().GetBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
input := &appmessage.TransactionVerboseInput{}
|
||||
input.TxID = transactionInput.PreviousOutpoint.TransactionID.String()
|
||||
input.OutputIndex = transactionInput.PreviousOutpoint.Index
|
||||
input.Sequence = transactionInput.Sequence
|
||||
input.ScriptSig = &appmessage.ScriptSig{
|
||||
Asm: disbuf,
|
||||
Hex: hex.EncodeToString(transactionInput.SignatureScript),
|
||||
}
|
||||
inputs[i] = input
|
||||
}
|
||||
|
||||
transactionIDs := make([]string, len(domainBlock.Transactions))
|
||||
for i, transaction := range domainBlock.Transactions {
|
||||
transactionIDs[i] = consensushashing.TransactionID(transaction).String()
|
||||
}
|
||||
block.VerboseData.TransactionIDs = transactionIDs
|
||||
return inputs
|
||||
}
|
||||
|
||||
if includeTransactionVerboseData {
|
||||
for _, transaction := range block.Transactions {
|
||||
err := ctx.PopulateTransactionWithVerboseData(transaction, domainBlockHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
// buildTransactionVerboseOutputs returns a slice of JSON objects for the outputs of the passed
|
||||
// transaction.
|
||||
func (ctx *Context) buildTransactionVerboseOutputs(tx *externalapi.DomainTransaction, filterAddrMap map[string]struct{}) []*appmessage.TransactionVerboseOutput {
|
||||
outputs := make([]*appmessage.TransactionVerboseOutput, len(tx.Outputs))
|
||||
for i, transactionOutput := range tx.Outputs {
|
||||
|
||||
// Ignore the error here since an error means the script
|
||||
// couldn't parse and there is no additional information about
|
||||
// it anyways.
|
||||
scriptClass, addr, _ := txscript.ExtractScriptPubKeyAddress(
|
||||
transactionOutput.ScriptPublicKey, ctx.Config.ActiveNetParams)
|
||||
|
||||
// Encode the addresses while checking if the address passes the
|
||||
// filter when needed.
|
||||
passesFilter := len(filterAddrMap) == 0
|
||||
var encodedAddr string
|
||||
if addr != nil {
|
||||
encodedAddr = *pointers.String(addr.EncodeAddress())
|
||||
|
||||
// If the filter doesn't already pass, make it pass if
|
||||
// the address exists in the filter.
|
||||
if _, exists := filterAddrMap[encodedAddr]; exists {
|
||||
passesFilter = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PopulateTransactionWithVerboseData populates the given `transaction` with
|
||||
// verbose data from `domainTransaction`
|
||||
func (ctx *Context) PopulateTransactionWithVerboseData(
|
||||
transaction *appmessage.RPCTransaction, domainBlockHeader externalapi.BlockHeader) error {
|
||||
|
||||
domainTransaction, err := appmessage.RPCTransactionToDomainTransaction(transaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
transaction.VerboseData = &appmessage.RPCTransactionVerboseData{
|
||||
TransactionID: consensushashing.TransactionID(domainTransaction).String(),
|
||||
Hash: consensushashing.TransactionHash(domainTransaction).String(),
|
||||
Size: estimatedsize.TransactionEstimatedSerializedSize(domainTransaction),
|
||||
}
|
||||
if domainBlockHeader != nil {
|
||||
transaction.VerboseData.BlockHash = consensushashing.HeaderHash(domainBlockHeader).String()
|
||||
transaction.VerboseData.BlockTime = uint64(domainBlockHeader.TimeInMilliseconds())
|
||||
}
|
||||
for _, input := range transaction.Inputs {
|
||||
ctx.populateTransactionInputWithVerboseData(input)
|
||||
}
|
||||
for _, output := range transaction.Outputs {
|
||||
err := ctx.populateTransactionOutputWithVerboseData(output)
|
||||
if err != nil {
|
||||
return err
|
||||
if !passesFilter {
|
||||
continue
|
||||
}
|
||||
|
||||
output := &appmessage.TransactionVerboseOutput{}
|
||||
output.Index = uint32(i)
|
||||
output.Value = transactionOutput.Value
|
||||
output.ScriptPubKey = &appmessage.ScriptPubKeyResult{
|
||||
Address: encodedAddr,
|
||||
Hex: hex.EncodeToString(transactionOutput.ScriptPublicKey.Script),
|
||||
Type: scriptClass.String(),
|
||||
}
|
||||
outputs[i] = output
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *Context) populateTransactionInputWithVerboseData(transactionInput *appmessage.RPCTransactionInput) {
|
||||
transactionInput.VerboseData = &appmessage.RPCTransactionInputVerboseData{}
|
||||
}
|
||||
|
||||
func (ctx *Context) populateTransactionOutputWithVerboseData(transactionOutput *appmessage.RPCTransactionOutput) error {
|
||||
scriptPublicKey, err := hex.DecodeString(transactionOutput.ScriptPublicKey.Script)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domainScriptPublicKey := &externalapi.ScriptPublicKey{
|
||||
Script: scriptPublicKey,
|
||||
Version: transactionOutput.ScriptPublicKey.Version,
|
||||
}
|
||||
|
||||
// Ignore the error here since an error means the script
|
||||
// couldn't be parsed and there's no additional information about
|
||||
// it anyways
|
||||
scriptPublicKeyType, scriptPublicKeyAddress, _ := txscript.ExtractScriptPubKeyAddress(
|
||||
domainScriptPublicKey, ctx.Config.ActiveNetParams)
|
||||
|
||||
var encodedScriptPublicKeyAddress string
|
||||
if scriptPublicKeyAddress != nil {
|
||||
encodedScriptPublicKeyAddress = scriptPublicKeyAddress.EncodeAddress()
|
||||
}
|
||||
transactionOutput.VerboseData = &appmessage.RPCTransactionOutputVerboseData{
|
||||
ScriptPublicKeyType: scriptPublicKeyType.String(),
|
||||
ScriptPublicKeyAddress: encodedScriptPublicKeyAddress,
|
||||
}
|
||||
return nil
|
||||
|
||||
return outputs
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"net"
|
||||
)
|
||||
|
||||
// HandleBan handles the respectively named RPC command
|
||||
func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
banRequest := request.(*appmessage.BanRequestMessage)
|
||||
ip := net.ParseIP(banRequest.IP)
|
||||
if ip == nil {
|
||||
errorMessage := &appmessage.BanResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not parse IP %s", banRequest.IP)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
err := context.ConnectionManager.BanByIP(ip)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.BanResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not ban IP: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
response := appmessage.NewBanResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleGetBlock handles the respectively named RPC command
|
||||
@@ -26,20 +25,14 @@ func HandleGetBlock(context *rpccontext.Context, _ *router.Router, request appme
|
||||
errorMessage.Error = appmessage.RPCErrorf("Block %s not found", hash)
|
||||
return errorMessage, nil
|
||||
}
|
||||
block := &externalapi.DomainBlock{Header: header}
|
||||
|
||||
response := appmessage.NewGetBlockResponseMessage()
|
||||
response.Block = appmessage.DomainBlockToRPCBlock(block)
|
||||
|
||||
err = context.PopulateBlockWithVerboseData(response.Block, header, nil, getBlockRequest.IncludeTransactionVerboseData)
|
||||
blockVerboseData, err := context.BuildBlockVerboseData(header, getBlockRequest.IncludeTransactionVerboseData)
|
||||
if err != nil {
|
||||
if errors.Is(err, rpccontext.ErrBuildBlockVerboseDataInvalidBlock) {
|
||||
errorMessage := &appmessage.GetBlockResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Block %s is invalid", hash)
|
||||
return errorMessage, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
response.BlockVerboseData = blockVerboseData
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -36,11 +36,5 @@ func HandleGetBlockDAGInfo(context *rpccontext.Context, _ *router.Router, _ appm
|
||||
response.Difficulty = context.GetDifficultyRatio(virtualInfo.Bits, context.Config.ActiveNetParams)
|
||||
response.PastMedianTime = virtualInfo.PastMedianTime
|
||||
|
||||
pruningPoint, err := context.Domain.Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.PruningPointHash = pruningPoint.String()
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -31,12 +31,12 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
|
||||
msgBlock := appmessage.DomainBlockToMsgBlock(templateBlock)
|
||||
|
||||
isSynced, err := context.ProtocolManager.ShouldMine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, isSynced), nil
|
||||
return appmessage.NewGetBlockTemplateResponseMessage(msgBlock, isSynced), nil
|
||||
}
|
||||
|
||||
@@ -3,101 +3,18 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxBlocksInGetBlocksResponse is the max amount of blocks that are
|
||||
// allowed in a GetBlocksResult.
|
||||
maxBlocksInGetBlocksResponse = 1000
|
||||
maxBlocksInGetBlocksResponse = 100
|
||||
)
|
||||
|
||||
// HandleGetBlocks handles the respectively named RPC command
|
||||
func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
getBlocksRequest := request.(*appmessage.GetBlocksRequestMessage)
|
||||
|
||||
// Validate that user didn't set IncludeTransactionVerboseData without setting IncludeBlocks
|
||||
if !getBlocksRequest.IncludeBlocks && getBlocksRequest.IncludeTransactionVerboseData {
|
||||
return &appmessage.GetBlocksResponseMessage{
|
||||
Error: appmessage.RPCErrorf(
|
||||
"If includeTransactionVerboseData is set, then includeBlockVerboseData must be set as well"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Decode lowHash
|
||||
// If lowHash is empty - use genesis instead.
|
||||
lowHash := context.Config.ActiveNetParams.GenesisHash
|
||||
if getBlocksRequest.LowHash != "" {
|
||||
var err error
|
||||
lowHash, err = externalapi.NewDomainHashFromString(getBlocksRequest.LowHash)
|
||||
if err != nil {
|
||||
return &appmessage.GetBlocksResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Could not decode lowHash %s: %s", getBlocksRequest.LowHash, err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
blockInfo, err := context.Domain.Consensus().GetBlockInfo(lowHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !blockInfo.Exists {
|
||||
return &appmessage.GetBlocksResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Could not find lowHash %s", getBlocksRequest.LowHash),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get hashes between lowHash and virtualSelectedParent
|
||||
virtualSelectedParent, err := context.Domain.Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockHashes, highHash, err := context.Domain.Consensus().GetHashesBetween(lowHash, virtualSelectedParent, maxBlocksInGetBlocksResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// prepend low hash to make it inclusive
|
||||
blockHashes = append([]*externalapi.DomainHash{lowHash}, blockHashes...)
|
||||
|
||||
// If the high hash is equal to virtualSelectedParent it means GetHashesBetween didn't skip any hashes, and
|
||||
// there's space to add the virtualSelectedParent's anticone, otherwise you can't add the anticone because
|
||||
// there's no guarantee that all of the anticone root ancestors will be present.
|
||||
if highHash.Equal(virtualSelectedParent) {
|
||||
virtualSelectedParentAnticone, err := context.Domain.Consensus().Anticone(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockHashes = append(blockHashes, virtualSelectedParentAnticone...)
|
||||
}
|
||||
|
||||
// Both GetHashesBetween and Anticone might return more then the allowed number of blocks, so
|
||||
// trim any extra blocks.
|
||||
if len(blockHashes) > maxBlocksInGetBlocksResponse {
|
||||
blockHashes = blockHashes[:maxBlocksInGetBlocksResponse]
|
||||
}
|
||||
|
||||
// Prepare the response
|
||||
response := appmessage.NewGetBlocksResponseMessage()
|
||||
response.BlockHashes = hashes.ToStrings(blockHashes)
|
||||
if getBlocksRequest.IncludeBlocks {
|
||||
blocks := make([]*appmessage.RPCBlock, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := context.Domain.Consensus().GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block := &externalapi.DomainBlock{Header: blockHeader}
|
||||
blocks[i] = appmessage.DomainBlockToRPCBlock(block)
|
||||
err = context.PopulateBlockWithVerboseData(blocks[i], blockHeader, nil, getBlocksRequest.IncludeTransactionVerboseData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response := &appmessage.GetBlocksResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -1,147 +0,0 @@
|
||||
package rpchandlers_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpchandlers"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
)
|
||||
|
||||
type fakeDomain struct {
|
||||
testapi.TestConsensus
|
||||
}
|
||||
|
||||
func (d fakeDomain) Consensus() externalapi.Consensus { return d }
|
||||
func (d fakeDomain) MiningManager() miningmanager.MiningManager { return nil }
|
||||
|
||||
func TestHandleGetBlocks(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
factory := consensus.NewFactory()
|
||||
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleGetBlocks")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up consensus: %+v", err)
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
fakeContext := rpccontext.Context{
|
||||
Config: &config.Config{Flags: &config.Flags{NetworkFlags: config.NetworkFlags{ActiveNetParams: &consensusConfig.Params}}},
|
||||
Domain: fakeDomain{tc},
|
||||
}
|
||||
|
||||
getBlocks := func(lowHash *externalapi.DomainHash) *appmessage.GetBlocksResponseMessage {
|
||||
request := appmessage.GetBlocksRequestMessage{}
|
||||
if lowHash != nil {
|
||||
request.LowHash = lowHash.String()
|
||||
}
|
||||
response, err := rpchandlers.HandleGetBlocks(&fakeContext, nil, &request)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected empty request to not fail, instead: '%v'", err)
|
||||
}
|
||||
return response.(*appmessage.GetBlocksResponseMessage)
|
||||
}
|
||||
|
||||
filterAntiPast := func(povBlock *externalapi.DomainHash, slice []*externalapi.DomainHash) []*externalapi.DomainHash {
|
||||
antipast := make([]*externalapi.DomainHash, 0, len(slice))
|
||||
|
||||
for _, blockHash := range slice {
|
||||
isInPastOfPovBlock, err := tc.DAGTopologyManager().IsAncestorOf(stagingArea, blockHash, povBlock)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed doing reachability check: '%v'", err)
|
||||
}
|
||||
if !isInPastOfPovBlock {
|
||||
antipast = append(antipast, blockHash)
|
||||
}
|
||||
}
|
||||
return antipast
|
||||
}
|
||||
|
||||
// Create a DAG with the following structure:
|
||||
// merging block
|
||||
// / | \
|
||||
// split1 split2 split3
|
||||
// \ | /
|
||||
// merging block
|
||||
// / | \
|
||||
// split1 split2 split3
|
||||
// \ | /
|
||||
// etc.
|
||||
expectedOrder := make([]*externalapi.DomainHash, 0, 40)
|
||||
mergingBlock := consensusConfig.GenesisHash
|
||||
for i := 0; i < 10; i++ {
|
||||
splitBlocks := make([]*externalapi.DomainHash, 0, 3)
|
||||
for j := 0; j < 3; j++ {
|
||||
blockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{mergingBlock}, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed adding block: %v", err)
|
||||
}
|
||||
splitBlocks = append(splitBlocks, blockHash)
|
||||
}
|
||||
sort.Sort(sort.Reverse(testutils.NewTestGhostDAGSorter(stagingArea, splitBlocks, tc, t)))
|
||||
restOfSplitBlocks, selectedParent := splitBlocks[:len(splitBlocks)-1], splitBlocks[len(splitBlocks)-1]
|
||||
expectedOrder = append(expectedOrder, selectedParent)
|
||||
expectedOrder = append(expectedOrder, restOfSplitBlocks...)
|
||||
|
||||
mergingBlock, _, err = tc.AddBlock(splitBlocks, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed adding block: %v", err)
|
||||
}
|
||||
expectedOrder = append(expectedOrder, mergingBlock)
|
||||
}
|
||||
|
||||
virtualSelectedParent, err := tc.GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting SelectedParent: %v", err)
|
||||
}
|
||||
if !virtualSelectedParent.Equal(expectedOrder[len(expectedOrder)-1]) {
|
||||
t.Fatalf("Expected %s to be selectedParent, instead found: %s", expectedOrder[len(expectedOrder)-1], virtualSelectedParent)
|
||||
}
|
||||
|
||||
requestSelectedParent := getBlocks(virtualSelectedParent)
|
||||
if !reflect.DeepEqual(requestSelectedParent.BlockHashes, hashes.ToStrings([]*externalapi.DomainHash{virtualSelectedParent})) {
|
||||
t.Fatalf("TestHandleGetBlocks expected:\n%v\nactual:\n%v", virtualSelectedParent, requestSelectedParent.BlockHashes)
|
||||
}
|
||||
|
||||
for i, blockHash := range expectedOrder {
|
||||
expectedBlocks := filterAntiPast(blockHash, expectedOrder)
|
||||
expectedBlocks = append([]*externalapi.DomainHash{blockHash}, expectedBlocks...)
|
||||
|
||||
actualBlocks := getBlocks(blockHash)
|
||||
if !reflect.DeepEqual(actualBlocks.BlockHashes, hashes.ToStrings(expectedBlocks)) {
|
||||
t.Fatalf("TestHandleGetBlocks %d \nexpected: \n%v\nactual:\n%v", i,
|
||||
hashes.ToStrings(expectedBlocks), actualBlocks.BlockHashes)
|
||||
}
|
||||
}
|
||||
|
||||
// Make explicitly sure that if lowHash==highHash we get a slice with a single hash.
|
||||
actualBlocks := getBlocks(virtualSelectedParent)
|
||||
if !reflect.DeepEqual(actualBlocks.BlockHashes, []string{virtualSelectedParent.String()}) {
|
||||
t.Fatalf("TestHandleGetBlocks expected blocks to contain just '%s', instead got: \n%v",
|
||||
virtualSelectedParent, actualBlocks.BlockHashes)
|
||||
}
|
||||
|
||||
expectedOrder = append([]*externalapi.DomainHash{consensusConfig.GenesisHash}, expectedOrder...)
|
||||
actualOrder := getBlocks(nil)
|
||||
if !reflect.DeepEqual(actualOrder.BlockHashes, hashes.ToStrings(expectedOrder)) {
|
||||
t.Fatalf("TestHandleGetBlocks \nexpected: %v \nactual:\n%v", expectedOrder, actualOrder.BlockHashes)
|
||||
}
|
||||
|
||||
requestAllExplictly := getBlocks(consensusConfig.GenesisHash)
|
||||
if !reflect.DeepEqual(requestAllExplictly.BlockHashes, hashes.ToStrings(expectedOrder)) {
|
||||
t.Fatalf("TestHandleGetBlocks \nexpected: \n%v\n. actual:\n%v", expectedOrder, requestAllExplictly.BlockHashes)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetInfo handles the respectively named RPC command
|
||||
func HandleGetInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
response := appmessage.NewGetInfoResponseMessage(
|
||||
context.NetAdapter.ID().String(),
|
||||
uint64(context.Domain.MiningManager().TransactionCount()),
|
||||
)
|
||||
|
||||
return response, nil
|
||||
}
|
||||
@@ -3,22 +3,25 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntries handles the respectively named RPC command
|
||||
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
transactions := context.Domain.MiningManager().AllTransactions()
|
||||
entries := make([]*appmessage.MempoolEntry, 0, len(transactions))
|
||||
for _, transaction := range transactions {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
for _, tx := range transactions {
|
||||
transactionVerboseData, err := context.BuildTransactionVerboseData(
|
||||
tx, consensushashing.TransactionID(tx).String(), nil, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries = append(entries, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
Fee: tx.Fee,
|
||||
TransactionVerboseData: transactionVerboseData,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -24,11 +24,12 @@ func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, reques
|
||||
errorMessage.Error = appmessage.RPCErrorf("Transaction %s was not found", transactionID)
|
||||
return errorMessage, nil
|
||||
}
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err = context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
|
||||
transactionVerboseData, err := context.BuildTransactionVerboseData(
|
||||
transaction, getMempoolEntryRequest.TxID, nil, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction), nil
|
||||
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, transactionVerboseData), nil
|
||||
}
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("RPCS")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.RPCS)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleNotifyPruningPointUTXOSetOverrideRequest handles the respectively named RPC command
|
||||
func HandleNotifyPruningPointUTXOSetOverrideRequest(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagatePruningPointUTXOSetOverrideNotifications()
|
||||
|
||||
response := appmessage.NewNotifyPruningPointUTXOSetOverrideResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -3,7 +3,10 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// HandleNotifyUTXOsChanged handles the respectively named RPC command
|
||||
@@ -15,11 +18,26 @@ func HandleNotifyUTXOsChanged(context *rpccontext.Context, router *router.Router
|
||||
}
|
||||
|
||||
notifyUTXOsChangedRequest := request.(*appmessage.NotifyUTXOsChangedRequestMessage)
|
||||
addresses, err := context.ConvertAddressStringsToUTXOsChangedNotificationAddresses(notifyUTXOsChangedRequest.Addresses)
|
||||
if err != nil {
|
||||
errorMessage := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
errorMessage.Error = appmessage.RPCErrorf("Parsing error: %s", err)
|
||||
return errorMessage, nil
|
||||
|
||||
addresses := make([]*rpccontext.UTXOsChangedNotificationAddress, len(notifyUTXOsChangedRequest.Addresses))
|
||||
for i, addressString := range notifyUTXOsChangedRequest.Addresses {
|
||||
address, err := util.DecodeAddress(addressString, context.Config.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
errorMessage := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not decode address '%s': %s", addressString, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
errorMessage := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
scriptPublicKeyString := utxoindex.ConvertScriptPublicKeyToString(scriptPublicKey)
|
||||
addresses[i] = &rpccontext.UTXOsChangedNotificationAddress{
|
||||
Address: addressString,
|
||||
ScriptPublicKeyString: scriptPublicKeyString,
|
||||
}
|
||||
}
|
||||
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleStopNotifyingPruningPointUTXOSetOverrideRequest handles the respectively named RPC command
|
||||
func HandleStopNotifyingPruningPointUTXOSetOverrideRequest(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.StopPropagatingPruningPointUTXOSetOverrideNotifications()
|
||||
|
||||
response := appmessage.NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleStopNotifyingUTXOsChanged handles the respectively named RPC command
|
||||
func HandleStopNotifyingUTXOsChanged(context *rpccontext.Context, router *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if !context.Config.UTXOIndex {
|
||||
errorMessage := appmessage.NewStopNotifyingUTXOsChangedResponseMessage()
|
||||
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
stopNotifyingUTXOsChangedRequest := request.(*appmessage.StopNotifyingUTXOsChangedRequestMessage)
|
||||
addresses, err := context.ConvertAddressStringsToUTXOsChangedNotificationAddresses(stopNotifyingUTXOsChangedRequest.Addresses)
|
||||
if err != nil {
|
||||
errorMessage := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
errorMessage.Error = appmessage.RPCErrorf("Parsing error: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.StopPropagatingUTXOsChangedNotifications(addresses)
|
||||
|
||||
response := appmessage.NewStopNotifyingUTXOsChangedResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@@ -14,6 +13,9 @@ import (
|
||||
func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage)
|
||||
|
||||
msgBlock := submitBlockRequest.Block
|
||||
domainBlock := appmessage.MsgBlockToDomainBlock(msgBlock)
|
||||
|
||||
if context.ProtocolManager.IsIBDRunning() {
|
||||
return &appmessage.SubmitBlockResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Block not submitted - IBD is running"),
|
||||
@@ -21,21 +23,11 @@ func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request ap
|
||||
}, nil
|
||||
}
|
||||
|
||||
domainBlock, err := appmessage.RPCBlockToDomainBlock(submitBlockRequest.Block)
|
||||
err := context.ProtocolManager.AddBlock(domainBlock)
|
||||
if err != nil {
|
||||
return &appmessage.SubmitBlockResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Could not parse block: %s", err),
|
||||
RejectReason: appmessage.RejectReasonBlockInvalid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = context.ProtocolManager.AddBlock(domainBlock)
|
||||
if err != nil {
|
||||
isProtocolOrRuleError := errors.As(err, &ruleerrors.RuleError{}) || errors.As(err, &protocolerrors.ProtocolError{})
|
||||
if !isProtocolOrRuleError {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &appmessage.SubmitBlockResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Block rejected. Reason: %s", err),
|
||||
RejectReason: appmessage.RejectReasonBlockInvalid,
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"net"
|
||||
)
|
||||
|
||||
// HandleUnban handles the respectively named RPC command
|
||||
func HandleUnban(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
unbanRequest := request.(*appmessage.UnbanRequestMessage)
|
||||
ip := net.ParseIP(unbanRequest.IP)
|
||||
if ip == nil {
|
||||
errorMessage := &appmessage.UnbanResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not parse IP %s", unbanRequest.IP)
|
||||
return errorMessage, nil
|
||||
}
|
||||
err := context.AddressManager.Unban(appmessage.NewNetAddressIPPort(ip, 0))
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.UnbanResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not unban IP: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
response := appmessage.NewUnbanResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
Kaspad v0.10.1 0 2021-05-11
|
||||
* Calculate virtual's acceptance data and multiset after importing a new pruning point (#1700)
|
||||
|
||||
Kaspad v0.10.0 - 2021-04-26
|
||||
===========================
|
||||
Major changes include:
|
||||
* Implementing a signature hashing scheme similar to BIP-143
|
||||
* Replacing HASH160 with BLAKE2B
|
||||
* Replacing ECMH with MuHash
|
||||
* Removing RIPEMD160 and SHA1 from the codebase entirely
|
||||
* Making P2PKH transactions non-standard
|
||||
* Vastly enhancing the CLI wallet
|
||||
* Restructuring kaspad's app/home directory
|
||||
* Modifying block and transaction types in the RPC to be easier to consume clientside
|
||||
|
||||
A partial list of the more-important commits is as follows:
|
||||
* Fix data race in GetBlockChildren (#1579)
|
||||
* Remove payload hash (#1583)
|
||||
* Add the mempool size to getInfo RPC command (#1584)
|
||||
* Change the difficulty to be calculated based on the same block instead of its selected parent (#1591)
|
||||
* Adjust the difficulty in the first difficultyAdjustmentWindowSize blocks (#1592)
|
||||
* Adding DAA score (#1596)
|
||||
* Use DAA score where needed (#1602)
|
||||
* Remove the Services field from NetAddress. (#1610)
|
||||
* Fix getBlocks to not add the anticone when some blocks were filtered by GetHashesBetween (#1611)
|
||||
* Restructure the default ~/.kaspad directory layout (#1613)
|
||||
* Replace the HomeDir flag with a AppDir flag (#1615)
|
||||
* Implement BIP-143-like sighash (#1598)
|
||||
* Change --datadir to --appdir and remove symmetrical connection in stability tests (#1617)
|
||||
* Use BLAKE2B instead of HASH160, and get rid of any usage of RIPEMD160 and SHA1 (#1618)
|
||||
* Replace ECMH with Muhash (#1624)
|
||||
* Add support for multiple staging areas (#1633)
|
||||
* Make sure the ghostdagDataStore cache is at least DifficultyAdjustmentBlockWindow sized (#1635)
|
||||
* Resolve each block status in it's own staging area (#1634)
|
||||
* Add mass limit to mempool (#1627)
|
||||
* In RPC, use RPCTransactions and RPCBlocks instead of TransactionMessages and BlockMessages (#1609)
|
||||
* Use go-secp256k1 v0.0.5 (#1640)
|
||||
* Add a show-address subcommand to kaspawallet (#1653)
|
||||
* Replace p2pkh with p2pk (#1650)
|
||||
* Implement importing private keys into the wallet (#1655)
|
||||
* Add dump unencrypted data sub command to the wallet (#1661)
|
||||
* Add ECDSA support (#1657)
|
||||
* Add OpCheckMultiSigECDSA (#1663)
|
||||
* Add ECDSA support to the wallet (#1664)
|
||||
* Make moving the pruning point faster (#1660)
|
||||
* Implement new mechanism for updating UTXO Diffs (#1671)
|
||||
|
||||
Kaspad v0.9.2 - 2021-03-31
|
||||
===========================
|
||||
* Increase the route capacity of InvTransaction messages. (#1603) (#1637)
|
||||
|
||||
Kaspad v0.9.1 - 2021-03-14
|
||||
===========================
|
||||
* Testnet network reset
|
||||
|
||||
Kaspad v0.9.0 - 2021-03-04
|
||||
===========================
|
||||
|
||||
* Merge big subdags in pick virtual parents (#1574)
|
||||
* Write in the reject message the tx rejection reason (#1573)
|
||||
* Add nil checks for protowire (#1570)
|
||||
* Increase getBlocks limit to 1000 (#1572)
|
||||
* Return RPC error if getBlock's lowHash doesn't exist (#1569)
|
||||
* Add default dns-seeder to testnet (#1568)
|
||||
* Fix utxoindex deserialization (#1566)
|
||||
* Add pruning point hash to GetBlockDagInfo response (#1565)
|
||||
* Use EmitUnpopulated so that kaspactl prints all fields, even the default ones (#1561)
|
||||
* Stop logging an error whenever an RPC/P2P connection is canceled (#1562)
|
||||
* Cleanup the logger and make it asynchronous (#1524)
|
||||
* Close all iterators (#1542)
|
||||
* Add childrenHashes to GetBlock/s RPC commands (#1560)
|
||||
* Add ScriptPublicKey.Version to RPC (#1559)
|
||||
* Fix the target block rate to create less bursty mining (#1554)
|
||||
|
||||
Kaspad v0.8.10 - 2021-02-25
|
||||
===========================
|
||||
|
||||
* Fix bug where invalid mempool transactions were not removed (#1551)
|
||||
* Add RPC reconnection to the miner (#1552)
|
||||
* Remove virtual diff parents - only selectedTip is virtualDiffParent now (#1550)
|
||||
* Fix UTXO index (#1548)
|
||||
* Prevent fast failing (#1545)
|
||||
* Increase the sleep time in kaspaminer when the node is not synced (#1544)
|
||||
* Disallow header only blocks on RPC, relay and when requesting IBD full blocks (#1537)
|
||||
* Make templateManager hold a DomainBlock and isSynced bool instead of a GetBlockTemplateResponseMessage (#1538)
|
||||
@@ -1,9 +0,0 @@
|
||||
genkeypair
|
||||
========
|
||||
|
||||
A tool for generating private-key-address pairs.
|
||||
|
||||
Note: This tool prints unencrypted private keys and is not recommended for day
|
||||
to day use, and is intended mainly for tests.
|
||||
|
||||
In order to manage your funds it's recommended to use [kaspawallet](../kaspawallet)
|
||||
@@ -1,26 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
)
|
||||
|
||||
type configFlags struct {
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
func parseConfig() (*configFlags, error) {
|
||||
cfg := &configFlags{}
|
||||
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
|
||||
_, err := parser.Parse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = cfg.ResolveNetwork(parser)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg, err := parseConfig()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
privateKey, publicKey, err := libkaspawallet.CreateKeyPair(false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
addr, err := util.NewAddressPublicKey(publicKey, cfg.NetParams().Prefix)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Private key: %x\n", privateKey)
|
||||
fmt.Printf("Address: %s\n", addr)
|
||||
}
|
||||
@@ -4,7 +4,7 @@ kaspactl is an RPC client for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.14 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ var commandTypes = []reflect.Type{
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetConnectedPeerInfoRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetPeerAddressesRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetCurrentNetworkRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetInfoRequest{}),
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetBlockRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetBlocksRequest{}),
|
||||
@@ -33,9 +32,6 @@ var commandTypes = []reflect.Type{
|
||||
reflect.TypeOf(protowire.KaspadMessage_SubmitTransactionRequest{}),
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetUtxosByAddressesRequest{}),
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_BanRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_UnbanRequest{}),
|
||||
}
|
||||
|
||||
type commandDescription struct {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.16-alpine AS build
|
||||
FROM golang:1.14-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
@@ -2,11 +2,10 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
|
||||
@@ -68,7 +67,7 @@ func postCommand(cfg *configFlags, client *grpcclient.GRPCClient, responseChan c
|
||||
if err != nil {
|
||||
printErrorAndExit(fmt.Sprintf("error posting the request to the RPC server: %s", err))
|
||||
}
|
||||
responseBytes, err := protojson.MarshalOptions{EmitUnpopulated: true}.Marshal(response)
|
||||
responseBytes, err := protojson.Marshal(response)
|
||||
if err != nil {
|
||||
printErrorAndExit(errors.Wrapf(err, "error parsing the response from the RPC server").Error())
|
||||
}
|
||||
@@ -93,7 +92,6 @@ func prettifyResponse(response string) string {
|
||||
|
||||
marshalOptions := &protojson.MarshalOptions{}
|
||||
marshalOptions.Indent = " "
|
||||
marshalOptions.EmitUnpopulated = true
|
||||
return marshalOptions.Format(kaspadMessage)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Kaspaminer is a CPU-based miner for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.14 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -13,47 +13,34 @@ const minerTimeout = 10 * time.Second
|
||||
type minerClient struct {
|
||||
*rpcclient.RPCClient
|
||||
|
||||
cfg *configFlags
|
||||
blockAddedNotificationChan chan struct{}
|
||||
}
|
||||
|
||||
func (mc *minerClient) connect() error {
|
||||
rpcAddress, err := mc.cfg.NetParams().NormalizeRPCServerAddress(mc.cfg.RPCServer)
|
||||
func newMinerClient(cfg *configFlags) (*minerClient, error) {
|
||||
rpcAddress, err := cfg.NetParams().NormalizeRPCServerAddress(cfg.RPCServer)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
rpcClient, err := rpcclient.NewRPCClient(rpcAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
mc.RPCClient = rpcClient
|
||||
mc.SetTimeout(minerTimeout)
|
||||
mc.SetLogger(backendLog, logger.LevelTrace)
|
||||
rpcClient.SetTimeout(minerTimeout)
|
||||
rpcClient.SetLogger(backendLog, logger.LevelTrace)
|
||||
|
||||
err = mc.RegisterForBlockAddedNotifications(func(_ *appmessage.BlockAddedNotificationMessage) {
|
||||
minerClient := &minerClient{
|
||||
RPCClient: rpcClient,
|
||||
blockAddedNotificationChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
err = rpcClient.RegisterForBlockAddedNotifications(func(_ *appmessage.BlockAddedNotificationMessage) {
|
||||
select {
|
||||
case mc.blockAddedNotificationChan <- struct{}{}:
|
||||
case minerClient.blockAddedNotificationChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error requesting block-added notifications")
|
||||
}
|
||||
|
||||
log.Infof("Connected to %s", rpcAddress)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newMinerClient(cfg *configFlags) (*minerClient, error) {
|
||||
minerClient := &minerClient{
|
||||
cfg: cfg,
|
||||
blockAddedNotificationChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
err := minerClient.connect()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "error requesting block-added notifications")
|
||||
}
|
||||
|
||||
return minerClient, nil
|
||||
|
||||
@@ -17,27 +17,26 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLogFilename = "kaspaminer.log"
|
||||
defaultErrLogFilename = "kaspaminer_err.log"
|
||||
defaultTargetBlockRateRatio = 2.0
|
||||
defaultLogFilename = "kaspaminer.log"
|
||||
defaultErrLogFilename = "kaspaminer_err.log"
|
||||
)
|
||||
|
||||
var (
|
||||
// Default configuration options
|
||||
defaultAppDir = util.AppDir("kaspaminer", false)
|
||||
defaultLogFile = filepath.Join(defaultAppDir, defaultLogFilename)
|
||||
defaultErrLogFile = filepath.Join(defaultAppDir, defaultErrLogFilename)
|
||||
defaultHomeDir = util.AppDataDir("kaspaminer", false)
|
||||
defaultLogFile = filepath.Join(defaultHomeDir, defaultLogFilename)
|
||||
defaultErrLogFile = filepath.Join(defaultHomeDir, defaultErrLogFilename)
|
||||
defaultRPCServer = "localhost"
|
||||
)
|
||||
|
||||
type configFlags struct {
|
||||
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
|
||||
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
|
||||
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
|
||||
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
|
||||
TargetBlocksPerSecond *float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. 0 means no limit (The default one is 2 * target network block rate)"`
|
||||
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
|
||||
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
|
||||
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
|
||||
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
|
||||
TargetBlocksPerSecond float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. This flag is for debugging purposes."`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
@@ -65,11 +64,6 @@ func parseConfig() (*configFlags, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.TargetBlocksPerSecond == nil {
|
||||
targetBlocksPerSecond := defaultTargetBlockRateRatio / cfg.NetParams().TargetTimePerBlock.Seconds()
|
||||
cfg.TargetBlocksPerSecond = &targetBlocksPerSecond
|
||||
}
|
||||
|
||||
if cfg.Profile != "" {
|
||||
profilePort, err := strconv.Atoi(cfg.Profile)
|
||||
if err != nil || profilePort < 1024 || profilePort > 65535 {
|
||||
@@ -77,10 +71,6 @@ func parseConfig() (*configFlags, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.MiningAddr == "" {
|
||||
return nil, errors.New("--miningaddr is required")
|
||||
}
|
||||
|
||||
initLog(defaultLogFile, defaultErrLogFile)
|
||||
|
||||
return cfg, nil
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.16-alpine AS build
|
||||
FROM golang:1.14-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ var (
|
||||
)
|
||||
|
||||
func initLog(logFile, errLogFile string) {
|
||||
log.SetLevel(logger.LevelDebug)
|
||||
err := backendLog.AddLogFile(logFile, logger.LevelTrace)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logger.LevelTrace, err)
|
||||
@@ -25,15 +24,4 @@ func initLog(logFile, errLogFile string) {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logger.LevelWarn, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = backendLog.AddLogWriter(os.Stdout, logger.LevelInfo)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding stdout to the loggerfor level %s: %s", logger.LevelWarn, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = backendLog.Run()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error starting the logger: %s ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ func main() {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing command-line arguments: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer backendLog.Close()
|
||||
|
||||
// Show version at startup.
|
||||
log.Infof("Version %s", version.Version())
|
||||
@@ -49,7 +48,7 @@ func main() {
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
spawn("mineLoop", func() {
|
||||
err = mineLoop(client, cfg.NumberOfBlocks, *cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr)
|
||||
err = mineLoop(client, cfg.NumberOfBlocks, cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error in mine loop"))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user