mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 19:22:53 +00:00
Compare commits
7 Commits
v0.9.2
...
optimize-u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d5f2495522 | ||
|
|
8588774837 | ||
|
|
94c3f4b80c | ||
|
|
a5a84e9215 | ||
|
|
1cec4c91cf | ||
|
|
3c0b74208a | ||
|
|
08d983b84a |
77
.github/workflows/go-deploy.yml
vendored
77
.github/workflows/go-deploy.yml
vendored
@@ -1,77 +0,0 @@
|
||||
name: Build and Upload assets
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest, windows-latest, macos-latest ]
|
||||
name: Building For ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Fix windows CRLF
|
||||
run: git config --global core.autocrlf false
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# We need to increase the page size because the tests run out of memory on github CI windows.
|
||||
# Use the powershell script from this github action: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
|
||||
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
|
||||
- name: Increase page size on windows
|
||||
if: runner.os == 'Windows'
|
||||
shell: powershell
|
||||
run: powershell -command .\.github\workflows\SetPageFileSize.ps1
|
||||
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- name: Build on linux
|
||||
if: runner.os == 'Linux'
|
||||
# `-extldflags=-static` - means static link everything, `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
|
||||
# `-s -w` strips the binary to produce smaller size binaries
|
||||
run: |
|
||||
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build on Windows
|
||||
if: runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o bin/ ./...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
powershell "Compress-Archive bin/* \"${archive}\""
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build on MacOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o ./bin/ ./...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Upload Release Asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ github.event.release.upload_url }}
|
||||
asset_path: "./${{ env.archive }}"
|
||||
asset_name: "${{ env.asset_name }}"
|
||||
asset_content_type: application/zip
|
||||
49
.github/workflows/go-race.yml
vendored
49
.github/workflows/go-race.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: Go-Race
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
race_test:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
branch: [ master, latest ]
|
||||
name: Race detection on ${{ matrix.branch }}
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15
|
||||
|
||||
- name: Set scheduled branch name
|
||||
shell: bash
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
if [ "${{ matrix.branch }}" == "master" ]; then
|
||||
echo "run_on=master" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ "${{ matrix.branch }}" == "latest" ]; then
|
||||
branch=$(git branch -r | grep 'v\([0-9]\+\.\)\([0-9]\+\.\)\([0-9]\+\)-dev' | sort -Vr | head -1 | xargs)
|
||||
echo "run_on=${branch}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Set manual branch name
|
||||
shell: bash
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: echo "run_on=${{ github.ref }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Test with race detector
|
||||
shell: bash
|
||||
run: |
|
||||
git checkout "${{ env.run_on }}"
|
||||
git status
|
||||
go test -race ./...
|
||||
10
.github/workflows/go.yml
vendored
10
.github/workflows/go.yml
vendored
@@ -11,7 +11,6 @@ jobs:
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-16.04, macos-10.15 ]
|
||||
name: Testing on on ${{ matrix.os }}
|
||||
@@ -35,7 +34,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.14
|
||||
|
||||
|
||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||
@@ -49,7 +48,7 @@ jobs:
|
||||
|
||||
- name: Test
|
||||
shell: bash
|
||||
run: ./build_and_test.sh -v
|
||||
run: ./build_and_test.sh
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -61,10 +60,11 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.14
|
||||
|
||||
- name: Create coverage file
|
||||
run: go test -v -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./...
|
||||
# Because of https://github.com/golang/go/issues/27333 this seem to "fail" even though nothing is wrong, so ignore the failure
|
||||
run: go test -json -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./... || true
|
||||
|
||||
- name: Upload coverage file
|
||||
run: bash <(curl -s https://codecov.io/bash)
|
||||
@@ -18,7 +18,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.14 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -65,7 +65,7 @@ is used for this project.
|
||||
|
||||
## Documentation
|
||||
|
||||
The [documentation](https://github.com/kaspanet/docs) is a work-in-progress
|
||||
The documentation is a work-in-progress.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
19
app/app.go
19
app/app.go
@@ -7,20 +7,20 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/execenv"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/limits"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/signal"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/winservice"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
"github.com/kaspanet/kaspad/util/profiling"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
)
|
||||
|
||||
const leveldbCacheSizeMiB = 256
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/execenv"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/limits"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/winservice"
|
||||
)
|
||||
|
||||
var desiredLimits = &limits.DesiredLimits{
|
||||
FileLimitWant: 2048,
|
||||
@@ -49,7 +49,6 @@ func StartApp() error {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return err
|
||||
}
|
||||
defer logger.BackendLog.Close()
|
||||
defer panics.HandlePanic(log, "MAIN", nil)
|
||||
|
||||
app := &kaspadApp{cfg: cfg}
|
||||
@@ -182,5 +181,5 @@ func removeDatabase(cfg *config.Config) error {
|
||||
func openDB(cfg *config.Config) (database.Database, error) {
|
||||
dbPath := databasePath(cfg)
|
||||
log.Infof("Loading database from '%s'", dbPath)
|
||||
return ldb.NewLevelDB(dbPath, leveldbCacheSizeMiB)
|
||||
return ldb.NewLevelDB(dbPath)
|
||||
}
|
||||
|
||||
@@ -164,7 +164,11 @@ func outpointToDomainOutpoint(outpoint *Outpoint) *externalapi.DomainOutpoint {
|
||||
func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externalapi.DomainTransaction, error) {
|
||||
inputs := make([]*externalapi.DomainTransactionInput, len(rpcTransaction.Inputs))
|
||||
for i, input := range rpcTransaction.Inputs {
|
||||
transactionID, err := transactionid.FromString(input.PreviousOutpoint.TransactionID)
|
||||
transactionIDBytes, err := hex.DecodeString(input.PreviousOutpoint.TransactionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactionID, err := transactionid.FromBytes(transactionIDBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -194,11 +198,19 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
||||
}
|
||||
}
|
||||
|
||||
subnetworkID, err := subnetworks.FromString(rpcTransaction.SubnetworkID)
|
||||
subnetworkIDBytes, err := hex.DecodeString(rpcTransaction.SubnetworkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHash, err := externalapi.NewDomainHashFromString(rpcTransaction.PayloadHash)
|
||||
subnetworkID, err := subnetworks.FromBytes(subnetworkIDBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHashBytes, err := hex.DecodeString(rpcTransaction.PayloadHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHash, err := externalapi.NewDomainHashFromByteSlice(payloadHashBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -243,7 +255,7 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
||||
ScriptPublicKey: &RPCScriptPublicKey{Script: scriptPublicKey, Version: output.ScriptPublicKey.Version},
|
||||
}
|
||||
}
|
||||
subnetworkID := transaction.SubnetworkID.String()
|
||||
subnetworkID := hex.EncodeToString(transaction.SubnetworkID[:])
|
||||
payloadHash := transaction.PayloadHash.String()
|
||||
payload := hex.EncodeToString(transaction.Payload)
|
||||
return &RPCTransaction{
|
||||
|
||||
@@ -59,7 +59,6 @@ const (
|
||||
CmdPruningPointHash
|
||||
CmdIBDBlockLocator
|
||||
CmdIBDBlockLocatorHighestHash
|
||||
CmdIBDBlockLocatorHighestHashNotFound
|
||||
CmdBlockHeaders
|
||||
CmdRequestNextPruningPointUTXOSetChunk
|
||||
CmdDonePruningPointUTXOSetChunks
|
||||
@@ -117,8 +116,6 @@ const (
|
||||
CmdNotifyUTXOsChangedRequestMessage
|
||||
CmdNotifyUTXOsChangedResponseMessage
|
||||
CmdUTXOsChangedNotificationMessage
|
||||
CmdStopNotifyingUTXOsChangedRequestMessage
|
||||
CmdStopNotifyingUTXOsChangedResponseMessage
|
||||
CmdGetUTXOsByAddressesRequestMessage
|
||||
CmdGetUTXOsByAddressesResponseMessage
|
||||
CmdGetVirtualSelectedParentBlueScoreRequestMessage
|
||||
@@ -126,17 +123,6 @@ const (
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage
|
||||
CmdVirtualSelectedParentBlueScoreChangedNotificationMessage
|
||||
CmdBanRequestMessage
|
||||
CmdBanResponseMessage
|
||||
CmdUnbanRequestMessage
|
||||
CmdUnbanResponseMessage
|
||||
CmdGetInfoRequestMessage
|
||||
CmdGetInfoResponseMessage
|
||||
CmdNotifyPruningPointUTXOSetOverrideRequestMessage
|
||||
CmdNotifyPruningPointUTXOSetOverrideResponseMessage
|
||||
CmdPruningPointUTXOSetOverrideNotificationMessage
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage
|
||||
)
|
||||
|
||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -170,7 +156,6 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdPruningPointHash: "PruningPointHash",
|
||||
CmdIBDBlockLocator: "IBDBlockLocator",
|
||||
CmdIBDBlockLocatorHighestHash: "IBDBlockLocatorHighestHash",
|
||||
CmdIBDBlockLocatorHighestHashNotFound: "IBDBlockLocatorHighestHashNotFound",
|
||||
CmdBlockHeaders: "BlockHeaders",
|
||||
CmdRequestNextPruningPointUTXOSetChunk: "RequestNextPruningPointUTXOSetChunk",
|
||||
CmdDonePruningPointUTXOSetChunks: "DonePruningPointUTXOSetChunks",
|
||||
@@ -228,8 +213,6 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdNotifyUTXOsChangedRequestMessage: "NotifyUTXOsChangedRequest",
|
||||
CmdNotifyUTXOsChangedResponseMessage: "NotifyUTXOsChangedResponse",
|
||||
CmdUTXOsChangedNotificationMessage: "UTXOsChangedNotification",
|
||||
CmdStopNotifyingUTXOsChangedRequestMessage: "StopNotifyingUTXOsChangedRequest",
|
||||
CmdStopNotifyingUTXOsChangedResponseMessage: "StopNotifyingUTXOsChangedResponse",
|
||||
CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest",
|
||||
CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse",
|
||||
CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest",
|
||||
@@ -237,17 +220,6 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest",
|
||||
CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage: "NotifyVirtualSelectedParentBlueScoreChangedResponse",
|
||||
CmdVirtualSelectedParentBlueScoreChangedNotificationMessage: "VirtualSelectedParentBlueScoreChangedNotification",
|
||||
CmdBanRequestMessage: "BanRequest",
|
||||
CmdBanResponseMessage: "BanResponse",
|
||||
CmdUnbanRequestMessage: "UnbanRequest",
|
||||
CmdUnbanResponseMessage: "UnbanResponse",
|
||||
CmdGetInfoRequestMessage: "GetInfoRequest",
|
||||
CmdGetInfoResponseMessage: "GeInfoResponse",
|
||||
CmdNotifyPruningPointUTXOSetOverrideRequestMessage: "NotifyPruningPointUTXOSetOverrideRequest",
|
||||
CmdNotifyPruningPointUTXOSetOverrideResponseMessage: "NotifyPruningPointUTXOSetOverrideResponse",
|
||||
CmdPruningPointUTXOSetOverrideNotificationMessage: "PruningPointUTXOSetOverrideNotification",
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: "StopNotifyingPruningPointUTXOSetOverrideRequest",
|
||||
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage: "StopNotifyingPruningPointUTXOSetOverrideResponse",
|
||||
}
|
||||
|
||||
// Message is an interface that describes a kaspa message. A type that
|
||||
|
||||
@@ -11,11 +11,15 @@ import (
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/kaspanet/kaspad/util/random"
|
||||
)
|
||||
|
||||
// TestBlockHeader tests the MsgBlockHeader API.
|
||||
func TestBlockHeader(t *testing.T) {
|
||||
nonce := uint64(0xba4d87a69924a93d)
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
t.Errorf("random.Uint64: Error generating nonce: %v", err)
|
||||
}
|
||||
|
||||
hashes := []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}
|
||||
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// MsgIBDBlockLocatorHighestHashNotFound represents a kaspa BlockLocatorHighestHashNotFound message
|
||||
type MsgIBDBlockLocatorHighestHashNotFound struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *MsgIBDBlockLocatorHighestHashNotFound) Command() MessageCommand {
|
||||
return CmdIBDBlockLocatorHighestHashNotFound
|
||||
}
|
||||
|
||||
// NewMsgIBDBlockLocatorHighestHashNotFound returns a new IBDBlockLocatorHighestHashNotFound message
|
||||
func NewMsgIBDBlockLocatorHighestHashNotFound() *MsgIBDBlockLocatorHighestHashNotFound {
|
||||
return &MsgIBDBlockLocatorHighestHashNotFound{}
|
||||
}
|
||||
@@ -6,12 +6,17 @@ package appmessage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/random"
|
||||
)
|
||||
|
||||
// TestPing tests the MsgPing API against the latest protocol version.
|
||||
func TestPing(t *testing.T) {
|
||||
// Ensure we get the same nonce back out.
|
||||
nonce := uint64(0x61c2c5535902862)
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
t.Errorf("random.Uint64: Error generating nonce: %v", err)
|
||||
}
|
||||
msg := NewMsgPing(nonce)
|
||||
if msg.Nonce != nonce {
|
||||
t.Errorf("NewMsgPing: wrong nonce - got %v, want %v",
|
||||
|
||||
@@ -6,11 +6,16 @@ package appmessage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/random"
|
||||
)
|
||||
|
||||
// TestPongLatest tests the MsgPong API against the latest protocol version.
|
||||
func TestPongLatest(t *testing.T) {
|
||||
nonce := uint64(0x1a05b581a5182c)
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
t.Errorf("random.Uint64: error generating nonce: %v", err)
|
||||
}
|
||||
msg := NewMsgPong(nonce)
|
||||
if msg.Nonce != nonce {
|
||||
t.Errorf("NewMsgPong: wrong nonce - got %v, want %v",
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// BanRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type BanRequestMessage struct {
|
||||
baseMessage
|
||||
|
||||
IP string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *BanRequestMessage) Command() MessageCommand {
|
||||
return CmdBanRequestMessage
|
||||
}
|
||||
|
||||
// NewBanRequestMessage returns an instance of the message
|
||||
func NewBanRequestMessage(ip string) *BanRequestMessage {
|
||||
return &BanRequestMessage{
|
||||
IP: ip,
|
||||
}
|
||||
}
|
||||
|
||||
// BanResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type BanResponseMessage struct {
|
||||
baseMessage
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *BanResponseMessage) Command() MessageCommand {
|
||||
return CmdBanResponseMessage
|
||||
}
|
||||
|
||||
// NewBanResponseMessage returns a instance of the message
|
||||
func NewBanResponseMessage() *BanResponseMessage {
|
||||
return &BanResponseMessage{}
|
||||
}
|
||||
@@ -55,7 +55,6 @@ type BlockVerboseData struct {
|
||||
Bits string
|
||||
Difficulty float64
|
||||
ParentHashes []string
|
||||
ChildrenHashes []string
|
||||
SelectedParentHash string
|
||||
BlueScore uint64
|
||||
IsHeaderOnly bool
|
||||
@@ -105,5 +104,4 @@ type ScriptPubKeyResult struct {
|
||||
Hex string
|
||||
Type string
|
||||
Address string
|
||||
Version uint16
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ type GetBlockDAGInfoResponseMessage struct {
|
||||
VirtualParentHashes []string
|
||||
Difficulty float64
|
||||
PastMedianTime int64
|
||||
PruningPointHash string
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetBlocksRequestMessage struct {
|
||||
baseMessage
|
||||
LowHash string
|
||||
IncludeBlockVerboseData bool
|
||||
IncludeTransactionVerboseData bool
|
||||
LowHash string
|
||||
IncludeBlockHexes bool
|
||||
IncludeBlockVerboseData bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -15,12 +15,11 @@ func (msg *GetBlocksRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlocksRequestMessage returns a instance of the message
|
||||
func NewGetBlocksRequestMessage(lowHash string, includeBlockVerboseData bool,
|
||||
includeTransactionVerboseData bool) *GetBlocksRequestMessage {
|
||||
func NewGetBlocksRequestMessage(lowHash string, includeBlockHexes bool, includeBlockVerboseData bool) *GetBlocksRequestMessage {
|
||||
return &GetBlocksRequestMessage{
|
||||
LowHash: lowHash,
|
||||
IncludeBlockVerboseData: includeBlockVerboseData,
|
||||
IncludeTransactionVerboseData: includeTransactionVerboseData,
|
||||
LowHash: lowHash,
|
||||
IncludeBlockHexes: includeBlockHexes,
|
||||
IncludeBlockVerboseData: includeBlockVerboseData,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,6 +28,7 @@ func NewGetBlocksRequestMessage(lowHash string, includeBlockVerboseData bool,
|
||||
type GetBlocksResponseMessage struct {
|
||||
baseMessage
|
||||
BlockHashes []string
|
||||
BlockHexes []string
|
||||
BlockVerboseData []*BlockVerboseData
|
||||
|
||||
Error *RPCError
|
||||
@@ -45,6 +45,7 @@ func NewGetBlocksResponseMessage(blockHashes []string, blockHexes []string,
|
||||
|
||||
return &GetBlocksResponseMessage{
|
||||
BlockHashes: blockHashes,
|
||||
BlockHexes: blockHexes,
|
||||
BlockVerboseData: blockVerboseData,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// GetInfoRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetInfoRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetInfoRequestMessage) Command() MessageCommand {
|
||||
return CmdGetInfoRequestMessage
|
||||
}
|
||||
|
||||
// NewGeInfoRequestMessage returns a instance of the message
|
||||
func NewGeInfoRequestMessage() *GetInfoRequestMessage {
|
||||
return &GetInfoRequestMessage{}
|
||||
}
|
||||
|
||||
// GetInfoResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetInfoResponseMessage struct {
|
||||
baseMessage
|
||||
P2PID string
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetInfoResponseMessage) Command() MessageCommand {
|
||||
return CmdGetInfoResponseMessage
|
||||
}
|
||||
|
||||
// NewGetInfoResponseMessage returns a instance of the message
|
||||
func NewGetInfoResponseMessage(p2pID string) *GetInfoResponseMessage {
|
||||
return &GetInfoResponseMessage{
|
||||
P2PID: p2pID,
|
||||
}
|
||||
}
|
||||
@@ -37,8 +37,7 @@ func NewNotifyBlockAddedResponseMessage() *NotifyBlockAddedResponseMessage {
|
||||
// its respective RPC message
|
||||
type BlockAddedNotificationMessage struct {
|
||||
baseMessage
|
||||
Block *MsgBlock
|
||||
BlockVerboseData *BlockVerboseData
|
||||
Block *MsgBlock
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -47,9 +46,8 @@ func (msg *BlockAddedNotificationMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewBlockAddedNotificationMessage returns a instance of the message
|
||||
func NewBlockAddedNotificationMessage(block *MsgBlock, blockVerboseData *BlockVerboseData) *BlockAddedNotificationMessage {
|
||||
func NewBlockAddedNotificationMessage(block *MsgBlock) *BlockAddedNotificationMessage {
|
||||
return &BlockAddedNotificationMessage{
|
||||
Block: block,
|
||||
BlockVerboseData: blockVerboseData,
|
||||
Block: block,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// NotifyPruningPointUTXOSetOverrideRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyPruningPointUTXOSetOverrideRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyPruningPointUTXOSetOverrideRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyPruningPointUTXOSetOverrideRequestMessage returns a instance of the message
|
||||
func NewNotifyPruningPointUTXOSetOverrideRequestMessage() *NotifyPruningPointUTXOSetOverrideRequestMessage {
|
||||
return &NotifyPruningPointUTXOSetOverrideRequestMessage{}
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverrideResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyPruningPointUTXOSetOverrideResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyPruningPointUTXOSetOverrideResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideResponseMessage
|
||||
}
|
||||
|
||||
// NewNotifyPruningPointUTXOSetOverrideResponseMessage returns a instance of the message
|
||||
func NewNotifyPruningPointUTXOSetOverrideResponseMessage() *NotifyPruningPointUTXOSetOverrideResponseMessage {
|
||||
return &NotifyPruningPointUTXOSetOverrideResponseMessage{}
|
||||
}
|
||||
|
||||
// PruningPointUTXOSetOverrideNotificationMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type PruningPointUTXOSetOverrideNotificationMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *PruningPointUTXOSetOverrideNotificationMessage) Command() MessageCommand {
|
||||
return CmdPruningPointUTXOSetOverrideNotificationMessage
|
||||
}
|
||||
|
||||
// NewPruningPointUTXOSetOverrideNotificationMessage returns a instance of the message
|
||||
func NewPruningPointUTXOSetOverrideNotificationMessage() *PruningPointUTXOSetOverrideNotificationMessage {
|
||||
return &PruningPointUTXOSetOverrideNotificationMessage{}
|
||||
}
|
||||
|
||||
// StopNotifyingPruningPointUTXOSetOverrideRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingPruningPointUTXOSetOverrideRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingPruningPointUTXOSetOverrideRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideRequestMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingPruningPointUTXOSetOverrideRequestMessage returns a instance of the message
|
||||
func NewStopNotifyingPruningPointUTXOSetOverrideRequestMessage() *StopNotifyingPruningPointUTXOSetOverrideRequestMessage {
|
||||
return &StopNotifyingPruningPointUTXOSetOverrideRequestMessage{}
|
||||
}
|
||||
|
||||
// StopNotifyingPruningPointUTXOSetOverrideResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingPruningPointUTXOSetOverrideResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingPruningPointUTXOSetOverrideResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyPruningPointUTXOSetOverrideResponseMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage returns a instance of the message
|
||||
func NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage() *StopNotifyingPruningPointUTXOSetOverrideResponseMessage {
|
||||
return &StopNotifyingPruningPointUTXOSetOverrideResponseMessage{}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// StopNotifyingUTXOsChangedRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingUTXOsChangedRequestMessage struct {
|
||||
baseMessage
|
||||
Addresses []string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingUTXOsChangedRequestMessage) Command() MessageCommand {
|
||||
return CmdStopNotifyingUTXOsChangedRequestMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingUTXOsChangedRequestMessage returns a instance of the message
|
||||
func NewStopNotifyingUTXOsChangedRequestMessage(addresses []string) *StopNotifyingUTXOsChangedRequestMessage {
|
||||
return &StopNotifyingUTXOsChangedRequestMessage{
|
||||
Addresses: addresses,
|
||||
}
|
||||
}
|
||||
|
||||
// StopNotifyingUTXOsChangedResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type StopNotifyingUTXOsChangedResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *StopNotifyingUTXOsChangedResponseMessage) Command() MessageCommand {
|
||||
return CmdStopNotifyingUTXOsChangedResponseMessage
|
||||
}
|
||||
|
||||
// NewStopNotifyingUTXOsChangedResponseMessage returns a instance of the message
|
||||
func NewStopNotifyingUTXOsChangedResponseMessage() *StopNotifyingUTXOsChangedResponseMessage {
|
||||
return &StopNotifyingUTXOsChangedResponseMessage{}
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package appmessage
|
||||
|
||||
// UnbanRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type UnbanRequestMessage struct {
|
||||
baseMessage
|
||||
|
||||
IP string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *UnbanRequestMessage) Command() MessageCommand {
|
||||
return CmdUnbanRequestMessage
|
||||
}
|
||||
|
||||
// NewUnbanRequestMessage returns an instance of the message
|
||||
func NewUnbanRequestMessage(ip string) *UnbanRequestMessage {
|
||||
return &UnbanRequestMessage{
|
||||
IP: ip,
|
||||
}
|
||||
}
|
||||
|
||||
// UnbanResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type UnbanResponseMessage struct {
|
||||
baseMessage
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *UnbanResponseMessage) Command() MessageCommand {
|
||||
return CmdUnbanResponseMessage
|
||||
}
|
||||
|
||||
// NewUnbanResponseMessage returns a instance of the message
|
||||
func NewUnbanResponseMessage() *UnbanResponseMessage {
|
||||
return &UnbanResponseMessage{}
|
||||
}
|
||||
@@ -90,18 +90,14 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addressManager, err := addressmanager.New(addressmanager.NewConfig(cfg), db)
|
||||
addressManager, err := addressmanager.New(addressmanager.NewConfig(cfg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var utxoIndex *utxoindex.UTXOIndex
|
||||
if cfg.UTXOIndex {
|
||||
utxoIndex, err = utxoindex.New(domain.Consensus(), db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxoIndex = utxoindex.New(domain.Consensus(), db)
|
||||
log.Infof("UTXO index started")
|
||||
}
|
||||
|
||||
@@ -148,7 +144,6 @@ func setupRPC(
|
||||
shutDownChan,
|
||||
)
|
||||
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
|
||||
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
||||
|
||||
return rpcManager
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ package app
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("KASD")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KASD)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
58
app/protocol/blocklogger/blocklogger.go
Normal file
58
app/protocol/blocklogger/blocklogger.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright (c) 2015-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blocklogger
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
)
|
||||
|
||||
var (
|
||||
receivedLogBlocks int64
|
||||
receivedLogTx int64
|
||||
lastBlockLogTime = mstime.Now()
|
||||
mtx sync.Mutex
|
||||
)
|
||||
|
||||
// LogBlock logs a new block blue score as an information message
|
||||
// to show progress to the user. In order to prevent spam, it limits logging to
|
||||
// one message every 10 seconds with duration and totals included.
|
||||
func LogBlock(block *externalapi.DomainBlock) {
|
||||
mtx.Lock()
|
||||
defer mtx.Unlock()
|
||||
|
||||
receivedLogBlocks++
|
||||
receivedLogTx += int64(len(block.Transactions))
|
||||
|
||||
now := mstime.Now()
|
||||
duration := now.Sub(lastBlockLogTime)
|
||||
if duration < time.Second*10 {
|
||||
return
|
||||
}
|
||||
|
||||
// Truncate the duration to 10s of milliseconds.
|
||||
tDuration := duration.Round(10 * time.Millisecond)
|
||||
|
||||
// Log information about new block blue score.
|
||||
blockStr := "blocks"
|
||||
if receivedLogBlocks == 1 {
|
||||
blockStr = "block"
|
||||
}
|
||||
txStr := "transactions"
|
||||
if receivedLogTx == 1 {
|
||||
txStr = "transaction"
|
||||
}
|
||||
|
||||
log.Infof("Processed %d %s in the last %s (%d %s, %s)",
|
||||
receivedLogBlocks, blockStr, tDuration, receivedLogTx,
|
||||
txStr, mstime.UnixMilliseconds(block.Header.TimeInMilliseconds()))
|
||||
|
||||
receivedLogBlocks = 0
|
||||
receivedLogTx = 0
|
||||
lastBlockLogTime = now
|
||||
}
|
||||
@@ -8,4 +8,4 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("BDAG")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
@@ -1,8 +1,8 @@
|
||||
package flowcontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
@@ -38,6 +38,8 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
}
|
||||
|
||||
for i, newBlock := range newBlocks {
|
||||
blocklogger.LogBlock(block)
|
||||
|
||||
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
|
||||
_, err = f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
||||
if err != nil {
|
||||
@@ -57,15 +59,6 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
|
||||
if f.onPruningPointUTXOSetOverrideHandler != nil {
|
||||
return f.onPruningPointUTXOSetOverrideHandler()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
||||
block *externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error {
|
||||
|
||||
@@ -108,10 +101,6 @@ func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks
|
||||
|
||||
// AddBlock adds the given block to the DAG and propagates it.
|
||||
func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(false, "cannot add header only block")
|
||||
}
|
||||
|
||||
blockInsertionResult, err := f.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
|
||||
@@ -18,11 +18,11 @@ import (
|
||||
func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error) {
|
||||
isErrRouteClosed := errors.Is(err, router.ErrRouteClosed)
|
||||
if !isErrRouteClosed {
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||
if protocolErr := &(protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
log.Errorf("error from %s: %+v", flowName, err)
|
||||
}
|
||||
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
|
||||
@@ -23,10 +23,6 @@ import (
|
||||
// when a block is added to the DAG
|
||||
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
|
||||
|
||||
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
type OnPruningPointUTXOSetOverrideHandler func() error
|
||||
|
||||
// OnTransactionAddedToMempoolHandler is a handler function that's triggered
|
||||
// when a transaction is added to the mempool
|
||||
type OnTransactionAddedToMempoolHandler func()
|
||||
@@ -42,9 +38,8 @@ type FlowContext struct {
|
||||
|
||||
timeStarted int64
|
||||
|
||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
||||
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||
|
||||
transactionsToRebroadcastLock sync.Mutex
|
||||
transactionsToRebroadcast map[externalapi.DomainTransactionID]*externalapi.DomainTransaction
|
||||
@@ -87,11 +82,6 @@ func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlo
|
||||
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
||||
func (f *FlowContext) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler) {
|
||||
f.onPruningPointUTXOSetOverrideHandler = onPruningPointUTXOSetOverrideHandler
|
||||
}
|
||||
|
||||
// SetOnTransactionAddedToMempoolHandler sets the onTransactionAddedToMempool handler
|
||||
func (f *FlowContext) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler) {
|
||||
f.onTransactionAddedToMempoolHandler = onTransactionAddedToMempoolHandler
|
||||
|
||||
@@ -2,6 +2,8 @@ package flowcontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -194,9 +194,6 @@ func (f *FlowContext) GetOrphanRoots(orphan *externalapi.DomainHash) ([]*externa
|
||||
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
roots = append(roots, current)
|
||||
} else {
|
||||
log.Debugf("Block %s was skipped when checking for orphan roots: "+
|
||||
"exists: %t, status: %s", current, blockInfo.Exists, blockInfo.BlockStatus)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -35,5 +35,6 @@ func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Rou
|
||||
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
|
||||
}
|
||||
|
||||
return context.AddressManager().AddAddresses(msgAddresses.AddressList...)
|
||||
context.AddressManager().AddAddresses(msgAddresses.AddressList...)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -70,14 +70,8 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
||||
}
|
||||
|
||||
if !foundHighestHashInTheSelectedParentChainOfTargetHash {
|
||||
log.Warnf("no hash was found in the blockLocator "+
|
||||
return protocolerrors.Errorf(true, "no hash was found in the blockLocator "+
|
||||
"that was in the selected parent chain of targetHash %s", targetHash)
|
||||
|
||||
ibdBlockLocatorHighestHashNotFoundMessage := appmessage.NewMsgIBDBlockLocatorHighestHashNotFound()
|
||||
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashNotFoundMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ type RelayInvsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
SharedRequestedBlocks() *SharedRequestedBlocks
|
||||
Broadcast(message appmessage.Message) error
|
||||
AddOrphan(orphanBlock *externalapi.DomainBlock)
|
||||
@@ -105,19 +104,9 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
missingParents, blockInsertionResult, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Infof("Ignoring duplicate block %s", inv.Hash)
|
||||
continue
|
||||
@@ -146,15 +135,6 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||
consensushashing.BlockHash(block))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
|
||||
@@ -17,7 +17,7 @@ type RequestIBDBlocksContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestHeadersFlow struct {
|
||||
type handleRequestBlocksFlow struct {
|
||||
RequestIBDBlocksContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peer.Peer
|
||||
@@ -27,7 +27,7 @@ type handleRequestHeadersFlow struct {
|
||||
func HandleRequestHeaders(context RequestIBDBlocksContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
flow := &handleRequestHeadersFlow{
|
||||
flow := &handleRequestBlocksFlow{
|
||||
RequestIBDBlocksContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
@@ -36,7 +36,7 @@ func HandleRequestHeaders(context RequestIBDBlocksContext, incomingRoute *router
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestHeadersFlow) start() error {
|
||||
func (flow *handleRequestBlocksFlow) start() error {
|
||||
for {
|
||||
lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute)
|
||||
if err != nil {
|
||||
|
||||
@@ -28,14 +28,10 @@ func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.Domain
|
||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
|
||||
log.Debugf("Syncing headers up to %s", highHash)
|
||||
headersSynced, err := flow.syncHeaders(highHash)
|
||||
err := flow.syncHeaders(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !headersSynced {
|
||||
log.Debugf("Aborting IBD because the headers failed to sync")
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Finished syncing headers up to %s", highHash)
|
||||
|
||||
log.Debugf("Syncing the current pruning point UTXO set")
|
||||
@@ -59,61 +55,47 @@ func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.Domain
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncHeaders attempts to sync headers from the peer. This method may fail
|
||||
// because the peer and us have conflicting pruning points. In that case we
|
||||
// return (false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleRelayInvsFlow) syncHeaders(highHash *externalapi.DomainHash) (bool, error) {
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !highestSharedBlockFound {
|
||||
return false, nil
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
func (flow *handleRelayInvsFlow) syncHeaders(highHash *externalapi.DomainHash) error {
|
||||
highHashReceived := false
|
||||
for !highHashReceived {
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
err = flow.downloadHeaders(highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = flow.downloadHeaders(highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the highHash has not been received, the peer is misbehaving
|
||||
highHashBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
// We're finished once highHash has been inserted into the DAG
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
highHashReceived = blockInfo.Exists
|
||||
log.Debugf("Headers downloaded from peer %s. Are further headers required: %t", flow.peer, !highHashReceived)
|
||||
}
|
||||
if !highHashBlockInfo.Exists {
|
||||
return false, protocolerrors.Errorf(true, "did not receive "+
|
||||
"highHash header %s from peer %s during header download", highHash, flow.peer)
|
||||
}
|
||||
log.Debugf("Headers downloaded from peer %s", flow.peer)
|
||||
return true, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(
|
||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(targetHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
|
||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
highestHash, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !highestHashFound {
|
||||
return nil, false, nil
|
||||
return nil, err
|
||||
}
|
||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if highestHashIndex == 0 ||
|
||||
@@ -122,7 +104,7 @@ func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(
|
||||
// an endless loop, we explicitly stop the loop in such situation.
|
||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
||||
|
||||
return highestHash, true, nil
|
||||
return highestHash, nil
|
||||
}
|
||||
|
||||
locatorHashAboveHighestHash := highestHash
|
||||
@@ -132,7 +114,7 @@ func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(
|
||||
|
||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -177,35 +159,27 @@ func (flow *handleRelayInvsFlow) findHighestHashIndex(
|
||||
return highestHashIndex, nil
|
||||
}
|
||||
|
||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleRelayInvsFlow) fetchHighestHash(
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, error) {
|
||||
|
||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
||||
highestHash := message.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, true, nil
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
||||
return nil, false, nil
|
||||
default:
|
||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
ibdBlockLocatorHighestHashMessage, ok := message.(*appmessage.MsgIBDBlockLocatorHighestHash)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
||||
}
|
||||
highestHash := ibdBlockLocatorHighestHashMessage.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
@@ -221,6 +195,7 @@ func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externa
|
||||
// headers
|
||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
||||
errChan := make(chan error)
|
||||
doneChan := make(chan interface{})
|
||||
spawn("handleRelayInvsFlow-downloadHeaders", func() {
|
||||
for {
|
||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
||||
@@ -229,7 +204,7 @@ func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externa
|
||||
return
|
||||
}
|
||||
if doneIBD {
|
||||
close(blockHeadersMessageChan)
|
||||
doneChan <- struct{}{}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -245,10 +220,7 @@ func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externa
|
||||
|
||||
for {
|
||||
select {
|
||||
case blockHeadersMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
case blockHeadersMessage := <-blockHeadersMessageChan:
|
||||
for _, header := range blockHeadersMessage.BlockHeaders {
|
||||
err = flow.processHeader(header)
|
||||
if err != nil {
|
||||
@@ -257,6 +229,8 @@ func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externa
|
||||
}
|
||||
case err := <-errChan:
|
||||
return err
|
||||
case <-doneChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -410,11 +384,6 @@ func (flow *handleRelayInvsFlow) fetchMissingUTXOSet(pruningPointHash *externala
|
||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
|
||||
}
|
||||
|
||||
err = flow.OnPruningPointUTXOSetOverride()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -499,13 +468,6 @@ func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.Do
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(hashes) == 0 {
|
||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
||||
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
||||
log.Debugf("No missing block body hashes found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
@@ -538,11 +500,6 @@ func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.Do
|
||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// SendVirtualSelectedParentInvContext is the interface for the context needed for the SendVirtualSelectedParentInv flow.
|
||||
type SendVirtualSelectedParentInvContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// SendVirtualSelectedParentInv sends a peer the selected parent hash of the virtual
|
||||
func SendVirtualSelectedParentInv(context SendVirtualSelectedParentInvContext,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
virtualSelectedParent, err := context.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sending virtual selected parent hash %s to peer %s", virtualSelectedParent, peer)
|
||||
|
||||
virtualSelectedParentInv := appmessage.NewMsgInvBlock(virtualSelectedParent)
|
||||
return outgoingRoute.Enqueue(virtualSelectedParentInv)
|
||||
}
|
||||
@@ -89,10 +89,7 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
|
||||
}
|
||||
|
||||
if peerAddress != nil {
|
||||
err := context.AddressManager().AddAddresses(peerAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
context.AddressManager().AddAddresses(peerAddress)
|
||||
}
|
||||
return peer, nil
|
||||
}
|
||||
@@ -107,7 +104,7 @@ func handleError(err error, flowName string, isStopping *uint32, errChan chan er
|
||||
return
|
||||
}
|
||||
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
if protocolErr := &(protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
log.Errorf("Handshake protocol error from %s: %s", flowName, err)
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
errChan <- err
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -60,7 +60,7 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
|
||||
}
|
||||
|
||||
if !allowSelfConnections && flow.NetAdapter().ID().IsEqual(msgVersion.ID) {
|
||||
return nil, protocolerrors.New(false, "connected to self")
|
||||
return nil, protocolerrors.New(true, "connected to self")
|
||||
}
|
||||
|
||||
// Disconnect and ban peers from a different network
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func checkFlowError(t *testing.T, err error, isProtocolError bool, shouldBan bool, contains string) {
|
||||
pErr := protocolerrors.ProtocolError{}
|
||||
pErr := &protocolerrors.ProtocolError{}
|
||||
if errors.As(err, &pErr) != isProtocolError {
|
||||
t.Fatalf("Unexepcted error %+v", err)
|
||||
}
|
||||
|
||||
@@ -2,10 +2,6 @@ package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
@@ -22,21 +18,10 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var headerOnlyBlock = &externalapi.DomainBlock{
|
||||
Header: blockheader.NewImmutableBlockHeader(
|
||||
constants.MaxBlockVersion,
|
||||
[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
&externalapi.DomainHash{},
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
}
|
||||
|
||||
var orphanBlock = &externalapi.DomainBlock{
|
||||
Header: blockheader.NewImmutableBlockHeader(
|
||||
constants.MaxBlockVersion,
|
||||
@@ -48,7 +33,6 @@ var orphanBlock = &externalapi.DomainBlock{
|
||||
0,
|
||||
0,
|
||||
),
|
||||
Transactions: []*externalapi.DomainTransaction{{}},
|
||||
}
|
||||
|
||||
var validPruningPointBlock = &externalapi.DomainBlock{
|
||||
@@ -62,7 +46,6 @@ var validPruningPointBlock = &externalapi.DomainBlock{
|
||||
0,
|
||||
0,
|
||||
),
|
||||
Transactions: []*externalapi.DomainTransaction{{}},
|
||||
}
|
||||
|
||||
var invalidPruningPointBlock = &externalapi.DomainBlock{
|
||||
@@ -76,7 +59,6 @@ var invalidPruningPointBlock = &externalapi.DomainBlock{
|
||||
0,
|
||||
0,
|
||||
),
|
||||
Transactions: []*externalapi.DomainTransaction{{}},
|
||||
}
|
||||
|
||||
var unexpectedIBDBlock = &externalapi.DomainBlock{
|
||||
@@ -90,7 +72,6 @@ var unexpectedIBDBlock = &externalapi.DomainBlock{
|
||||
0,
|
||||
0,
|
||||
),
|
||||
Transactions: []*externalapi.DomainTransaction{{}},
|
||||
}
|
||||
|
||||
var invalidBlock = &externalapi.DomainBlock{
|
||||
@@ -104,7 +85,6 @@ var invalidBlock = &externalapi.DomainBlock{
|
||||
0,
|
||||
0,
|
||||
),
|
||||
Transactions: []*externalapi.DomainTransaction{{}},
|
||||
}
|
||||
|
||||
var unknownBlockHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})
|
||||
@@ -113,7 +93,6 @@ var validPruningPointHash = consensushashing.BlockHash(validPruningPointBlock)
|
||||
var invalidBlockHash = consensushashing.BlockHash(invalidBlock)
|
||||
var invalidPruningPointHash = consensushashing.BlockHash(invalidPruningPointBlock)
|
||||
var orphanBlockHash = consensushashing.BlockHash(orphanBlock)
|
||||
var headerOnlyBlockHash = consensushashing.BlockHash(headerOnlyBlock)
|
||||
|
||||
type fakeRelayInvsContext struct {
|
||||
testName string
|
||||
@@ -126,23 +105,6 @@ type fakeRelayInvsContext struct {
|
||||
validateAndInsertImportedPruningPointResponse error
|
||||
getBlockInfoResponse *externalapi.BlockInfo
|
||||
validateAndInsertBlockResponse error
|
||||
rwLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetBlockChildren(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) OnPruningPointUTXOSetOverride() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetVirtualUTXOs(expectedVirtualParents []*externalapi.DomainHash, fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) {
|
||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) Anticone(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) BuildBlock(coinbaseData *externalapi.DomainCoinbaseData, transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, error) {
|
||||
@@ -166,8 +128,6 @@ func (f *fakeRelayInvsContext) GetBlockHeader(blockHash *externalapi.DomainHash)
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalapi.BlockInfo, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
if f.getBlockInfoResponse != nil {
|
||||
return f.getBlockInfoResponse, nil
|
||||
}
|
||||
@@ -207,8 +167,6 @@ func (f *fakeRelayInvsContext) AppendImportedPruningPointUTXOs(outpointAndUTXOEn
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) ValidateAndInsertImportedPruningPoint(newPruningPoint *externalapi.DomainBlock) error {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.validateAndInsertImportedPruningPointResponse
|
||||
}
|
||||
|
||||
@@ -221,16 +179,12 @@ func (f *fakeRelayInvsContext) CreateBlockLocator(lowHash, highHash *externalapi
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) CreateHeadersSelectedChainBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return externalapi.BlockLocator{
|
||||
f.params.GenesisHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) CreateFullHeadersSelectedChainBlockLocator() (externalapi.BlockLocator, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return externalapi.BlockLocator{
|
||||
f.params.GenesisHash,
|
||||
}, nil
|
||||
@@ -249,8 +203,6 @@ func (f *fakeRelayInvsContext) GetVirtualInfo() (*externalapi.VirtualInfo, error
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) IsValidPruningPoint(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.isValidPruningPointResponse, nil
|
||||
}
|
||||
|
||||
@@ -279,8 +231,6 @@ func (f *fakeRelayInvsContext) Domain() domain.Domain {
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) Config() *config.Config {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
@@ -319,59 +269,13 @@ func (f *fakeRelayInvsContext) IsIBDRunning() bool {
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.trySetIBDRunningResponse
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) UnsetIBDRunning() {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
close(f.finishedIBD)
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetValidateAndInsertBlockResponse(err error) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.validateAndInsertBlockResponse = err
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetValidateAndInsertImportedPruningPointResponse(err error) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.validateAndInsertImportedPruningPointResponse = err
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetGetBlockInfoResponse(info externalapi.BlockInfo) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.getBlockInfoResponse = &info
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetTrySetIBDRunningResponse(b bool) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.trySetIBDRunningResponse = b
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) SetIsValidPruningPointResponse(b bool) {
|
||||
f.rwLock.Lock()
|
||||
defer f.rwLock.Unlock()
|
||||
f.isValidPruningPointResponse = b
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetGenesisHeader() externalapi.BlockHeader {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.params.GenesisBlock.Header
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetFinishedIBDChan() chan struct{} {
|
||||
f.rwLock.RLock()
|
||||
defer f.rwLock.RUnlock()
|
||||
return f.finishedIBD
|
||||
}
|
||||
|
||||
func TestHandleRelayInvs(t *testing.T) {
|
||||
triggerIBD := func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(consensushashing.BlockHash(orphanBlock)))
|
||||
@@ -385,7 +289,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes()))
|
||||
context.validateAndInsertBlockResponse = ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes())
|
||||
defer func() {
|
||||
context.validateAndInsertBlockResponse = nil
|
||||
}()
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
|
||||
if err != nil {
|
||||
@@ -435,10 +342,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
name: "sending a known invalid inv",
|
||||
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusInvalid,
|
||||
})
|
||||
}
|
||||
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(knownInvalidBlockHash))
|
||||
if err != nil {
|
||||
@@ -481,29 +388,6 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "got unrequested block",
|
||||
},
|
||||
{
|
||||
name: "sending header only block on relay",
|
||||
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
err := incomingRoute.Enqueue(appmessage.NewMsgInvBlock(headerOnlyBlockHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
|
||||
msg, err := outgoingRoute.DequeueWithTimeout(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("DequeueWithTimeout: %+v", err)
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(headerOnlyBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
}
|
||||
},
|
||||
expectsProtocolError: true,
|
||||
expectsBan: true,
|
||||
expectsErrToContain: "block where expected block with body",
|
||||
},
|
||||
{
|
||||
name: "sending invalid block",
|
||||
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
@@ -518,7 +402,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(invalidBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -542,7 +426,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestRelayBlocks)
|
||||
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes()))
|
||||
context.validateAndInsertBlockResponse = ruleerrors.NewErrMissingParents(orphanBlock.Header.ParentHashes())
|
||||
err = incomingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(orphanBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -568,7 +452,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
{
|
||||
name: "starting IBD when peer is already in IBD",
|
||||
funcToExecute: func(t *testing.T, incomingRoute, outgoingRoute *router.Route, context *fakeRelayInvsContext) {
|
||||
context.SetTrySetIBDRunningResponse(false)
|
||||
context.trySetIBDRunningResponse = false
|
||||
triggerIBD(t, incomingRoute, outgoingRoute, context)
|
||||
|
||||
checkNoActivity(t, outgoingRoute)
|
||||
@@ -674,15 +558,15 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(
|
||||
appmessage.NewBlockHeadersMessage(
|
||||
[]*appmessage.MsgBlockHeader{
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.GetGenesisHeader())},
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.params.GenesisBlock.Header)},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -697,10 +581,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
// Finish the IBD by sending DoneHeaders and send incompatible pruning point
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
@@ -714,7 +598,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
context.SetIsValidPruningPointResponse(false)
|
||||
context.isValidPruningPointResponse = false
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -746,11 +630,11 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrDuplicateBlock)
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrDuplicateBlock
|
||||
err = incomingRoute.Enqueue(
|
||||
appmessage.NewBlockHeadersMessage(
|
||||
[]*appmessage.MsgBlockHeader{
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.GetGenesisHeader())},
|
||||
appmessage.DomainBlockHeaderToBlockHeader(context.params.GenesisBlock.Header)},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -765,10 +649,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
// Finish the IBD by sending DoneHeaders and send incompatible pruning point
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
@@ -782,7 +666,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
context.SetIsValidPruningPointResponse(false)
|
||||
context.isValidPruningPointResponse = false
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -814,7 +698,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
context.SetValidateAndInsertBlockResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
|
||||
err = incomingRoute.Enqueue(
|
||||
appmessage.NewBlockHeadersMessage(
|
||||
[]*appmessage.MsgBlockHeader{
|
||||
@@ -854,10 +738,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -906,10 +790,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -922,7 +806,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointHashMessage)
|
||||
|
||||
context.SetIsValidPruningPointResponse(false)
|
||||
context.isValidPruningPointResponse = false
|
||||
err = incomingRoute.Enqueue(appmessage.NewPruningPointHashMessage(invalidPruningPointHash))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -956,10 +840,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1021,10 +905,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1084,10 +968,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1153,10 +1037,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1180,7 +1064,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
context.validateAndInsertImportedPruningPointResponse = ruleerrors.ErrBadMerkleRoot
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidPruningPointBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -1220,10 +1104,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1247,7 +1131,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
|
||||
|
||||
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrSuggestedPruningViolatesFinality)
|
||||
context.validateAndInsertImportedPruningPointResponse = ruleerrors.ErrSuggestedPruningViolatesFinality
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(validPruningPointBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -1284,10 +1168,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1363,10 +1247,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1440,10 +1324,10 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
|
||||
// This is done so it'll think it added the high hash to the DAG and proceed with fetching
|
||||
// the pruning point UTXO set.
|
||||
context.SetGetBlockInfoResponse(externalapi.BlockInfo{
|
||||
context.getBlockInfoResponse = &externalapi.BlockInfo{
|
||||
Exists: true,
|
||||
BlockStatus: externalapi.StatusHeaderOnly,
|
||||
})
|
||||
}
|
||||
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
@@ -1483,7 +1367,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
}
|
||||
_ = msg.(*appmessage.MsgRequestIBDBlocks)
|
||||
|
||||
context.SetValidateAndInsertImportedPruningPointResponse(ruleerrors.ErrBadMerkleRoot)
|
||||
context.validateAndInsertBlockResponse = ruleerrors.ErrBadMerkleRoot
|
||||
err = incomingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(invalidBlock)))
|
||||
if err != nil {
|
||||
t.Fatalf("Enqueue: %+v", err)
|
||||
@@ -1527,17 +1411,17 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
select {
|
||||
case err := <-errChan:
|
||||
checkFlowError(t, err, test.expectsProtocolError, test.expectsBan, test.expectsErrToContain)
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("waiting for error timed out after %s", 10*time.Second)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("waiting for error timed out after %s", time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-context.GetFinishedIBDChan():
|
||||
case <-context.finishedIBD:
|
||||
if !test.expectsIBDToFinish {
|
||||
t.Fatalf("IBD unexpecetedly finished")
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
case <-time.After(time.Second):
|
||||
if test.expectsIBDToFinish {
|
||||
t.Fatalf("IBD didn't finished after %d", time.Second)
|
||||
}
|
||||
@@ -1552,7 +1436,7 @@ func TestHandleRelayInvs(t *testing.T) {
|
||||
if !errors.Is(err, router.ErrRouteClosed) {
|
||||
t.Fatalf("unexpected error %+v", err)
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("waiting for flow to finish timed out after %s", time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
package testing
|
||||
|
||||
// Because of a bug in Go coverage fails if you have packages with test files only. See https://github.com/golang/go/issues/27333
|
||||
// So this is a dummy non-test go file in the package.
|
||||
@@ -191,7 +191,7 @@ func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransact
|
||||
continue
|
||||
}
|
||||
|
||||
return protocolerrors.Errorf(true, "rejected transaction %s: %s", txID, ruleErr)
|
||||
return protocolerrors.Errorf(true, "rejected transaction %s", txID)
|
||||
}
|
||||
err = flow.broadcastAcceptedTransactions([]*externalapi.DomainTransactionID{txID})
|
||||
if err != nil {
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -69,11 +69,6 @@ func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowconte
|
||||
m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
|
||||
func (m *Manager) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler flowcontext.OnPruningPointUTXOSetOverrideHandler) {
|
||||
m.context.SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler)
|
||||
}
|
||||
|
||||
// SetOnTransactionAddedToMempoolHandler sets the onTransactionAddedToMempool handler
|
||||
func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler flowcontext.OnTransactionAddedToMempoolHandler) {
|
||||
m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler)
|
||||
|
||||
@@ -2,6 +2,8 @@ package peer
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.PROT)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/rejects"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/addressexchange"
|
||||
@@ -59,20 +57,8 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
|
||||
peer, err := handshake.HandleHandshake(m.context, netConnection, receiveVersionRoute,
|
||||
sendVersionRoute, router.OutgoingRoute())
|
||||
|
||||
if err != nil {
|
||||
// non-blocking read from channel
|
||||
select {
|
||||
case innerError := <-errChan:
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
m.handleError(innerError, netConnection, router.OutgoingRoute())
|
||||
} else {
|
||||
log.Errorf("Peer %s sent invalid message: %s", netConnection, innerError)
|
||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||
}
|
||||
default:
|
||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||
}
|
||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||
return
|
||||
}
|
||||
defer m.context.RemoveFromPeers(peer)
|
||||
@@ -88,12 +74,12 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
}
|
||||
|
||||
func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection, outgoingRoute *routerpkg.Route) {
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
if protocolErr := &(protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
|
||||
if !m.context.Config().DisableBanning && protocolErr.ShouldBan {
|
||||
log.Warnf("Banning %s (reason: %s)", netConnection, protocolErr.Cause)
|
||||
|
||||
err := m.context.ConnectionManager().Ban(netConnection)
|
||||
if !errors.Is(err, connmanager.ErrCannotBanPermanent) {
|
||||
if err != nil && !errors.Is(err, addressmanager.ErrAddressNotFound) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -102,7 +88,7 @@ func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
log.Infof("Disconnecting from %s (reason: %s)", netConnection, protocolErr.Cause)
|
||||
log.Debugf("Disconnecting from %s (reason: %s)", netConnection, protocolErr.Cause)
|
||||
netConnection.Disconnect()
|
||||
return
|
||||
}
|
||||
@@ -149,16 +135,11 @@ func (m *Manager) registerBlockRelayFlows(router *routerpkg.Router, isStopping *
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
|
||||
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.SendVirtualSelectedParentInv(m.context, outgoingRoute, peer)
|
||||
}),
|
||||
|
||||
m.registerFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator, appmessage.CmdIBDBlock,
|
||||
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdBlockHeaders, appmessage.CmdPruningPointHash, appmessage.CmdIBDBlockLocatorHighestHash,
|
||||
appmessage.CmdIBDBlockLocatorHighestHashNotFound, appmessage.CmdDonePruningPointUTXOSetChunks},
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayInvs(m.context, incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
@@ -238,7 +219,7 @@ func (m *Manager) registerTransactionRelayFlow(router *routerpkg.Router, isStopp
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
|
||||
m.registerFlow("HandleRelayedTransactions", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRelayedTransactions(m.context, incomingRoute, outgoingRoute)
|
||||
@@ -274,24 +255,6 @@ func (m *Manager) registerFlow(name string, router *routerpkg.Router, messageTyp
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return m.registerFlowForRoute(route, name, isStopping, errChan, initializeFunc)
|
||||
}
|
||||
|
||||
func (m *Manager) registerFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
|
||||
messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc flowInitializeFunc) *flow {
|
||||
|
||||
route, err := router.AddIncomingRouteWithCapacity(capacity, messageTypes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return m.registerFlowForRoute(route, name, isStopping, errChan, initializeFunc)
|
||||
}
|
||||
|
||||
func (m *Manager) registerFlowForRoute(route *routerpkg.Route, name string, isStopping *uint32,
|
||||
errChan chan error, initializeFunc flowInitializeFunc) *flow {
|
||||
|
||||
return &flow{
|
||||
name: name,
|
||||
executeFunc: func(peer *peerpkg.Peer) {
|
||||
|
||||
@@ -12,19 +12,19 @@ type ProtocolError struct {
|
||||
Cause error
|
||||
}
|
||||
|
||||
func (e ProtocolError) Error() string {
|
||||
func (e *ProtocolError) Error() string {
|
||||
return e.Cause.Error()
|
||||
}
|
||||
|
||||
// Unwrap returns the cause of ProtocolError, to be used with `errors.Unwrap()`
|
||||
func (e ProtocolError) Unwrap() error {
|
||||
func (e *ProtocolError) Unwrap() error {
|
||||
return e.Cause
|
||||
}
|
||||
|
||||
// Errorf formats according to a format specifier and returns the string
|
||||
// as a ProtocolError.
|
||||
func Errorf(shouldBan bool, format string, args ...interface{}) error {
|
||||
return ProtocolError{
|
||||
return &ProtocolError{
|
||||
ShouldBan: shouldBan,
|
||||
Cause: errors.Errorf(format, args...),
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func Errorf(shouldBan bool, format string, args ...interface{}) error {
|
||||
// New returns a ProtocolError with the supplied message.
|
||||
// New also records the stack trace at the point it was called.
|
||||
func New(shouldBan bool, message string) error {
|
||||
return ProtocolError{
|
||||
return &ProtocolError{
|
||||
ShouldBan: shouldBan,
|
||||
Cause: errors.New(message),
|
||||
}
|
||||
@@ -41,7 +41,7 @@ func New(shouldBan bool, message string) error {
|
||||
|
||||
// Wrap wraps the given error and returns it as a ProtocolError.
|
||||
func Wrap(shouldBan bool, err error, message string) error {
|
||||
return ProtocolError{
|
||||
return &ProtocolError{
|
||||
ShouldBan: shouldBan,
|
||||
Cause: errors.Wrap(err, message),
|
||||
}
|
||||
@@ -49,7 +49,7 @@ func Wrap(shouldBan bool, err error, message string) error {
|
||||
|
||||
// Wrapf wraps the given error with the given format and returns it as a ProtocolError.
|
||||
func Wrapf(shouldBan bool, err error, format string, args ...interface{}) error {
|
||||
return ProtocolError{
|
||||
return &ProtocolError{
|
||||
ShouldBan: shouldBan,
|
||||
Cause: errors.Wrapf(err, format, args...),
|
||||
}
|
||||
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("RPCS")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.RPCS)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -69,31 +69,10 @@ func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, blockIns
|
||||
return err
|
||||
}
|
||||
|
||||
msgBlock := appmessage.DomainBlockToMsgBlock(block)
|
||||
blockVerboseData, err := m.context.BuildBlockVerboseData(block.Header, block, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(msgBlock, blockVerboseData)
|
||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(appmessage.DomainBlockToMsgBlock(block))
|
||||
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
|
||||
// resets due to pruning point change via IBD.
|
||||
func (m *Manager) NotifyPruningPointUTXOSetOverride() error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyPruningPointUTXOSetOverride")
|
||||
defer onEnd()
|
||||
|
||||
if m.context.Config.UTXOIndex {
|
||||
err := m.notifyPruningPointUTXOSetOverride()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyFinalityConflict notifies the manager that there's a finality conflict in the DAG
|
||||
func (m *Manager) NotifyFinalityConflict(violatingBlockHash string) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyFinalityConflict")
|
||||
@@ -116,25 +95,13 @@ func (m *Manager) notifyUTXOsChanged(blockInsertionResult *externalapi.BlockInse
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyUTXOsChanged")
|
||||
defer onEnd()
|
||||
|
||||
utxoIndexChanges, err := m.context.UTXOIndex.Update(blockInsertionResult)
|
||||
utxoIndexChanges, err := m.context.UTXOIndex.Update(blockInsertionResult.VirtualSelectedParentChainChanges)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.context.NotificationManager.NotifyUTXOsChanged(utxoIndexChanges)
|
||||
}
|
||||
|
||||
func (m *Manager) notifyPruningPointUTXOSetOverride() error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.notifyPruningPointUTXOSetOverride")
|
||||
defer onEnd()
|
||||
|
||||
err := m.context.UTXOIndex.Reset()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.context.NotificationManager.NotifyPruningPointUTXOSetOverride()
|
||||
}
|
||||
|
||||
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged() error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentBlueScoreChanged")
|
||||
defer onEnd()
|
||||
|
||||
@@ -35,15 +35,9 @@ var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdShutDownRequestMessage: rpchandlers.HandleShutDown,
|
||||
appmessage.CmdGetHeadersRequestMessage: rpchandlers.HandleGetHeaders,
|
||||
appmessage.CmdNotifyUTXOsChangedRequestMessage: rpchandlers.HandleNotifyUTXOsChanged,
|
||||
appmessage.CmdStopNotifyingUTXOsChangedRequestMessage: rpchandlers.HandleStopNotifyingUTXOsChanged,
|
||||
appmessage.CmdGetUTXOsByAddressesRequestMessage: rpchandlers.HandleGetUTXOsByAddresses,
|
||||
appmessage.CmdGetVirtualSelectedParentBlueScoreRequestMessage: rpchandlers.HandleGetVirtualSelectedParentBlueScore,
|
||||
appmessage.CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualSelectedParentBlueScoreChanged,
|
||||
appmessage.CmdBanRequestMessage: rpchandlers.HandleBan,
|
||||
appmessage.CmdUnbanRequestMessage: rpchandlers.HandleUnban,
|
||||
appmessage.CmdGetInfoRequestMessage: rpchandlers.HandleGetInfo,
|
||||
appmessage.CmdNotifyPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleNotifyPruningPointUTXOSetOverrideRequest,
|
||||
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
|
||||
}
|
||||
|
||||
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {
|
||||
|
||||
@@ -2,6 +2,8 @@ package rpccontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("RPCS")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.RPCS)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -30,9 +30,8 @@ type NotificationListener struct {
|
||||
propagateFinalityConflictResolvedNotifications bool
|
||||
propagateUTXOsChangedNotifications bool
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications bool
|
||||
propagatePruningPointUTXOSetOverrideNotifications bool
|
||||
|
||||
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
||||
propagateUTXOsChangedNotificationAddresses []*UTXOsChangedNotificationAddress
|
||||
}
|
||||
|
||||
// NewNotificationManager creates a new NotificationManager
|
||||
@@ -181,23 +180,6 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentBlueScoreChanged(
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverride notifies the notification manager that the UTXO index
|
||||
// reset due to pruning point change via IBD.
|
||||
func (nm *NotificationManager) NotifyPruningPointUTXOSetOverride() error {
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagatePruningPointUTXOSetOverrideNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(appmessage.NewPruningPointUTXOSetOverrideNotificationMessage())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newNotificationListener() *NotificationListener {
|
||||
return &NotificationListener{
|
||||
propagateBlockAddedNotifications: false,
|
||||
@@ -206,7 +188,6 @@ func newNotificationListener() *NotificationListener {
|
||||
propagateFinalityConflictResolvedNotifications: false,
|
||||
propagateUTXOsChangedNotifications: false,
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications: false,
|
||||
propagatePruningPointUTXOSetOverrideNotifications: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,70 +216,34 @@ func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications()
|
||||
}
|
||||
|
||||
// PropagateUTXOsChangedNotifications instructs the listener to send UTXOs changed notifications
|
||||
// to the remote listener for the given addresses. Subsequent calls instruct the listener to
|
||||
// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses
|
||||
// are ignored.
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
nl.propagateUTXOsChangedNotifications = true
|
||||
nl.propagateUTXOsChangedNotificationAddresses =
|
||||
make(map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress, len(addresses))
|
||||
}
|
||||
|
||||
for _, address := range addresses {
|
||||
nl.propagateUTXOsChangedNotificationAddresses[address.ScriptPublicKeyString] = address
|
||||
}
|
||||
}
|
||||
|
||||
// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs
|
||||
// changed notifications to the remote listener for the given addresses. Addresses for which
|
||||
// notifications are not currently sent are ignored.
|
||||
func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
return
|
||||
}
|
||||
|
||||
for _, address := range addresses {
|
||||
delete(nl.propagateUTXOsChangedNotificationAddresses, address.ScriptPublicKeyString)
|
||||
}
|
||||
nl.propagateUTXOsChangedNotifications = true
|
||||
nl.propagateUTXOsChangedNotificationAddresses = addresses
|
||||
}
|
||||
|
||||
func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
||||
utxoChanges *utxoindex.UTXOChanges) *appmessage.UTXOsChangedNotificationMessage {
|
||||
|
||||
// As an optimization, we iterate over the smaller set (O(n)) among the two below
|
||||
// and check existence over the larger set (O(1))
|
||||
utxoChangesSize := len(utxoChanges.Added) + len(utxoChanges.Removed)
|
||||
addressesSize := len(nl.propagateUTXOsChangedNotificationAddresses)
|
||||
|
||||
notification := &appmessage.UTXOsChangedNotificationMessage{}
|
||||
if utxoChangesSize < addressesSize {
|
||||
for scriptPublicKeyString, addedPairs := range utxoChanges.Added {
|
||||
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
|
||||
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
|
||||
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
|
||||
notification.Added = append(notification.Added,
|
||||
ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)...)
|
||||
}
|
||||
for scriptPublicKeyString, removedOutpoints := range utxoChanges.Removed {
|
||||
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
|
||||
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
|
||||
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
||||
for outpoint := range removedOutpoints {
|
||||
notification.Removed = append(notification.Removed, &appmessage.UTXOsByAddressesEntry{
|
||||
Address: listenerAddress.Address,
|
||||
Outpoint: &appmessage.RPCOutpoint{
|
||||
TransactionID: outpoint.TransactionID.String(),
|
||||
Index: outpoint.Index,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return notification
|
||||
}
|
||||
|
||||
@@ -307,15 +252,3 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
||||
func (nl *NotificationListener) PropagateVirtualSelectedParentBlueScoreChangedNotifications() {
|
||||
nl.propagateVirtualSelectedParentBlueScoreChangedNotifications = true
|
||||
}
|
||||
|
||||
// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications
|
||||
// to the remote listener.
|
||||
func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() {
|
||||
nl.propagatePruningPointUTXOSetOverrideNotifications = true
|
||||
}
|
||||
|
||||
// StopPropagatingPruningPointUTXOSetOverrideNotifications instructs the listener to stop sending pruning
|
||||
// point UTXO set override notifications to the remote listener.
|
||||
func (nl *NotificationListener) StopPropagatingPruningPointUTXOSetOverrideNotifications() {
|
||||
nl.propagatePruningPointUTXOSetOverrideNotifications = false
|
||||
}
|
||||
|
||||
@@ -2,9 +2,6 @@ package rpccontext
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||
@@ -31,43 +28,3 @@ func ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(address string, pair
|
||||
}
|
||||
return utxosByAddressesEntries
|
||||
}
|
||||
|
||||
// convertUTXOOutpointsToUTXOsByAddressesEntries converts
|
||||
// UTXOOutpoints to a slice of UTXOsByAddressesEntry
|
||||
func convertUTXOOutpointsToUTXOsByAddressesEntries(address string, outpoints utxoindex.UTXOOutpoints) []*appmessage.UTXOsByAddressesEntry {
|
||||
utxosByAddressesEntries := make([]*appmessage.UTXOsByAddressesEntry, 0, len(outpoints))
|
||||
for outpoint := range outpoints {
|
||||
utxosByAddressesEntries = append(utxosByAddressesEntries, &appmessage.UTXOsByAddressesEntry{
|
||||
Address: address,
|
||||
Outpoint: &appmessage.RPCOutpoint{
|
||||
TransactionID: outpoint.TransactionID.String(),
|
||||
Index: outpoint.Index,
|
||||
},
|
||||
})
|
||||
}
|
||||
return utxosByAddressesEntries
|
||||
}
|
||||
|
||||
// ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings
|
||||
// to UTXOsChangedNotificationAddresses
|
||||
func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
|
||||
addressStrings []string) ([]*UTXOsChangedNotificationAddress, error) {
|
||||
|
||||
addresses := make([]*UTXOsChangedNotificationAddress, len(addressStrings))
|
||||
for i, addressString := range addressStrings {
|
||||
address, err := util.DecodeAddress(addressString, ctx.Config.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Could not decode address '%s': %s", addressString, err)
|
||||
}
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||
}
|
||||
scriptPublicKeyString := utxoindex.ConvertScriptPublicKeyToString(scriptPublicKey)
|
||||
addresses[i] = &UTXOsChangedNotificationAddress{
|
||||
Address: addressString,
|
||||
ScriptPublicKeyString: scriptPublicKeyString,
|
||||
}
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
@@ -3,15 +3,12 @@ package rpccontext
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
"math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/estimatedsize"
|
||||
@@ -23,36 +20,17 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/pointers"
|
||||
)
|
||||
|
||||
// ErrBuildBlockVerboseDataInvalidBlock indicates that a block that was given to BuildBlockVerboseData is invalid.
|
||||
var ErrBuildBlockVerboseDataInvalidBlock = errors.New("ErrBuildBlockVerboseDataInvalidBlock")
|
||||
|
||||
// BuildBlockVerboseData builds a BlockVerboseData from the given blockHeader.
|
||||
// A block may optionally also be given if it's available in the calling context.
|
||||
func (ctx *Context) BuildBlockVerboseData(blockHeader externalapi.BlockHeader, block *externalapi.DomainBlock,
|
||||
includeTransactionVerboseData bool) (*appmessage.BlockVerboseData, error) {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "BuildBlockVerboseData")
|
||||
defer onEnd()
|
||||
|
||||
// BuildBlockVerboseData builds a BlockVerboseData from the given block.
|
||||
func (ctx *Context) BuildBlockVerboseData(blockHeader externalapi.BlockHeader, includeTransactionVerboseData bool) (*appmessage.BlockVerboseData, error) {
|
||||
hash := consensushashing.HeaderHash(blockHeader)
|
||||
|
||||
blockInfo, err := ctx.Domain.Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus == externalapi.StatusInvalid {
|
||||
return nil, errors.Wrap(ErrBuildBlockVerboseDataInvalidBlock, "cannot build verbose data for "+
|
||||
"invalid block")
|
||||
}
|
||||
|
||||
childrenHashes, err := ctx.Domain.Consensus().GetBlockChildren(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &appmessage.BlockVerboseData{
|
||||
Hash: hash.String(),
|
||||
Version: blockHeader.Version(),
|
||||
@@ -61,7 +39,6 @@ func (ctx *Context) BuildBlockVerboseData(blockHeader externalapi.BlockHeader, b
|
||||
AcceptedIDMerkleRoot: blockHeader.AcceptedIDMerkleRoot().String(),
|
||||
UTXOCommitment: blockHeader.UTXOCommitment().String(),
|
||||
ParentHashes: hashes.ToStrings(blockHeader.ParentHashes()),
|
||||
ChildrenHashes: hashes.ToStrings(childrenHashes),
|
||||
Nonce: blockHeader.Nonce(),
|
||||
Time: blockHeader.TimeInMilliseconds(),
|
||||
Bits: strconv.FormatInt(int64(blockHeader.Bits()), 16),
|
||||
@@ -71,11 +48,9 @@ func (ctx *Context) BuildBlockVerboseData(blockHeader externalapi.BlockHeader, b
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
||||
if block == nil {
|
||||
block, err = ctx.Domain.Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block, err := ctx.Domain.Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIDs := make([]string, len(block.Transactions))
|
||||
@@ -125,9 +100,6 @@ func (ctx *Context) BuildTransactionVerboseData(tx *externalapi.DomainTransactio
|
||||
blockHeader externalapi.BlockHeader, blockHash string) (
|
||||
*appmessage.TransactionVerboseData, error) {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "BuildTransactionVerboseData")
|
||||
defer onEnd()
|
||||
|
||||
var payloadHash string
|
||||
if tx.SubnetworkID != subnetworks.SubnetworkIDNative {
|
||||
payloadHash = tx.PayloadHash.String()
|
||||
@@ -195,7 +167,7 @@ func (ctx *Context) buildTransactionVerboseOutputs(tx *externalapi.DomainTransac
|
||||
passesFilter := len(filterAddrMap) == 0
|
||||
var encodedAddr string
|
||||
if addr != nil {
|
||||
encodedAddr = addr.EncodeAddress()
|
||||
encodedAddr = *pointers.String(addr.EncodeAddress())
|
||||
|
||||
// If the filter doesn't already pass, make it pass if
|
||||
// the address exists in the filter.
|
||||
@@ -212,7 +184,6 @@ func (ctx *Context) buildTransactionVerboseOutputs(tx *externalapi.DomainTransac
|
||||
output.Index = uint32(i)
|
||||
output.Value = transactionOutput.Value
|
||||
output.ScriptPubKey = &appmessage.ScriptPubKeyResult{
|
||||
Version: transactionOutput.ScriptPublicKey.Version,
|
||||
Address: encodedAddr,
|
||||
Hex: hex.EncodeToString(transactionOutput.ScriptPublicKey.Script),
|
||||
Type: scriptClass.String(),
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"net"
|
||||
)
|
||||
|
||||
// HandleBan handles the respectively named RPC command
|
||||
func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
banRequest := request.(*appmessage.BanRequestMessage)
|
||||
ip := net.ParseIP(banRequest.IP)
|
||||
if ip == nil {
|
||||
errorMessage := &appmessage.BanResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not parse IP %s", banRequest.IP)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
err := context.ConnectionManager.BanByIP(ip)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.BanResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not ban IP: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
response := appmessage.NewBanResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleGetBlock handles the respectively named RPC command
|
||||
@@ -29,16 +28,10 @@ func HandleGetBlock(context *rpccontext.Context, _ *router.Router, request appme
|
||||
|
||||
response := appmessage.NewGetBlockResponseMessage()
|
||||
|
||||
blockVerboseData, err := context.BuildBlockVerboseData(header, nil, getBlockRequest.IncludeTransactionVerboseData)
|
||||
blockVerboseData, err := context.BuildBlockVerboseData(header, getBlockRequest.IncludeTransactionVerboseData)
|
||||
if err != nil {
|
||||
if errors.Is(err, rpccontext.ErrBuildBlockVerboseDataInvalidBlock) {
|
||||
errorMessage := &appmessage.GetBlockResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Block %s is invalid", hash)
|
||||
return errorMessage, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response.BlockVerboseData = blockVerboseData
|
||||
|
||||
return response, nil
|
||||
|
||||
@@ -36,11 +36,5 @@ func HandleGetBlockDAGInfo(context *rpccontext.Context, _ *router.Router, _ appm
|
||||
response.Difficulty = context.GetDifficultyRatio(virtualInfo.Bits, context.Config.ActiveNetParams)
|
||||
response.PastMedianTime = virtualInfo.PastMedianTime
|
||||
|
||||
pruningPoint, err := context.Domain.Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.PruningPointHash = pruningPoint.String()
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -3,104 +3,18 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxBlocksInGetBlocksResponse is the max amount of blocks that are
|
||||
// allowed in a GetBlocksResult.
|
||||
maxBlocksInGetBlocksResponse = 1000
|
||||
maxBlocksInGetBlocksResponse = 100
|
||||
)
|
||||
|
||||
// HandleGetBlocks handles the respectively named RPC command
|
||||
func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
getBlocksRequest := request.(*appmessage.GetBlocksRequestMessage)
|
||||
|
||||
// Validate that user didn't set IncludeTransactionVerboseData without setting IncludeBlockVerboseData
|
||||
if !getBlocksRequest.IncludeBlockVerboseData && getBlocksRequest.IncludeTransactionVerboseData {
|
||||
return &appmessage.GetBlocksResponseMessage{
|
||||
Error: appmessage.RPCErrorf(
|
||||
"If includeTransactionVerboseData is set, then includeBlockVerboseData must be set as well"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Decode lowHash
|
||||
// If lowHash is empty - use genesis instead.
|
||||
lowHash := context.Config.ActiveNetParams.GenesisHash
|
||||
if getBlocksRequest.LowHash != "" {
|
||||
var err error
|
||||
lowHash, err = externalapi.NewDomainHashFromString(getBlocksRequest.LowHash)
|
||||
if err != nil {
|
||||
return &appmessage.GetBlocksResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Could not decode lowHash %s: %s", getBlocksRequest.LowHash, err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
blockInfo, err := context.Domain.Consensus().GetBlockInfo(lowHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !blockInfo.Exists {
|
||||
return &appmessage.GetBlocksResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Could not find lowHash %s", getBlocksRequest.LowHash),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get hashes between lowHash and virtualSelectedParent
|
||||
virtualSelectedParent, err := context.Domain.Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockHashes, err := context.Domain.Consensus().GetHashesBetween(
|
||||
lowHash, virtualSelectedParent, maxBlocksInGetBlocksResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// prepend low hash to make it inclusive
|
||||
blockHashes = append([]*externalapi.DomainHash{lowHash}, blockHashes...)
|
||||
|
||||
// If there are no maxBlocksInGetBlocksResponse between lowHash and virtualSelectedParent -
|
||||
// add virtualSelectedParent's anticone
|
||||
if len(blockHashes) < maxBlocksInGetBlocksResponse {
|
||||
virtualSelectedParentAnticone, err := context.Domain.Consensus().Anticone(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockHashes = append(blockHashes, virtualSelectedParentAnticone...)
|
||||
}
|
||||
|
||||
// Both GetHashesBetween and Anticone might return more then the allowed number of blocks, so
|
||||
// trim any extra blocks.
|
||||
if len(blockHashes) > maxBlocksInGetBlocksResponse {
|
||||
blockHashes = blockHashes[:maxBlocksInGetBlocksResponse]
|
||||
}
|
||||
|
||||
// Prepare the response
|
||||
response := &appmessage.GetBlocksResponseMessage{
|
||||
BlockHashes: hashes.ToStrings(blockHashes),
|
||||
}
|
||||
|
||||
// Retrieve all block data in case BlockVerboseData was requested
|
||||
if getBlocksRequest.IncludeBlockVerboseData {
|
||||
response.BlockVerboseData = make([]*appmessage.BlockVerboseData, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := context.Domain.Consensus().GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockVerboseData, err := context.BuildBlockVerboseData(blockHeader, nil,
|
||||
getBlocksRequest.IncludeTransactionVerboseData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response.BlockVerboseData[i] = blockVerboseData
|
||||
}
|
||||
}
|
||||
response := &appmessage.GetBlocksResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -1,144 +0,0 @@
|
||||
package rpchandlers_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpchandlers"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
)
|
||||
|
||||
type fakeDomain struct {
|
||||
testapi.TestConsensus
|
||||
}
|
||||
|
||||
func (d fakeDomain) Consensus() externalapi.Consensus { return d }
|
||||
func (d fakeDomain) MiningManager() miningmanager.MiningManager { return nil }
|
||||
|
||||
func TestHandleGetBlocks(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
|
||||
factory := consensus.NewFactory()
|
||||
tc, teardown, err := factory.NewTestConsensus(params, false, "TestHandleGetBlocks")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up consensus: %+v", err)
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
fakeContext := rpccontext.Context{
|
||||
Config: &config.Config{Flags: &config.Flags{NetworkFlags: config.NetworkFlags{ActiveNetParams: params}}},
|
||||
Domain: fakeDomain{tc},
|
||||
}
|
||||
|
||||
getBlocks := func(lowHash *externalapi.DomainHash) *appmessage.GetBlocksResponseMessage {
|
||||
request := appmessage.GetBlocksRequestMessage{}
|
||||
if lowHash != nil {
|
||||
request.LowHash = lowHash.String()
|
||||
}
|
||||
response, err := rpchandlers.HandleGetBlocks(&fakeContext, nil, &request)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected empty request to not fail, instead: '%v'", err)
|
||||
}
|
||||
return response.(*appmessage.GetBlocksResponseMessage)
|
||||
}
|
||||
|
||||
filterAntiPast := func(povBlock *externalapi.DomainHash, slice []*externalapi.DomainHash) []*externalapi.DomainHash {
|
||||
antipast := make([]*externalapi.DomainHash, 0, len(slice))
|
||||
|
||||
for _, blockHash := range slice {
|
||||
isInPastOfPovBlock, err := tc.DAGTopologyManager().IsAncestorOf(blockHash, povBlock)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed doing reachability check: '%v'", err)
|
||||
}
|
||||
if !isInPastOfPovBlock {
|
||||
antipast = append(antipast, blockHash)
|
||||
}
|
||||
}
|
||||
return antipast
|
||||
}
|
||||
|
||||
// Create a DAG with the following structure:
|
||||
// merging block
|
||||
// / | \
|
||||
// split1 split2 split3
|
||||
// \ | /
|
||||
// merging block
|
||||
// / | \
|
||||
// split1 split2 split3
|
||||
// \ | /
|
||||
// etc.
|
||||
expectedOrder := make([]*externalapi.DomainHash, 0, 40)
|
||||
mergingBlock := params.GenesisHash
|
||||
for i := 0; i < 10; i++ {
|
||||
splitBlocks := make([]*externalapi.DomainHash, 0, 3)
|
||||
for j := 0; j < 3; j++ {
|
||||
blockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{mergingBlock}, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed adding block: %v", err)
|
||||
}
|
||||
splitBlocks = append(splitBlocks, blockHash)
|
||||
}
|
||||
sort.Sort(sort.Reverse(testutils.NewTestGhostDAGSorter(splitBlocks, tc, t)))
|
||||
restOfSplitBlocks, selectedParent := splitBlocks[:len(splitBlocks)-1], splitBlocks[len(splitBlocks)-1]
|
||||
expectedOrder = append(expectedOrder, selectedParent)
|
||||
expectedOrder = append(expectedOrder, restOfSplitBlocks...)
|
||||
|
||||
mergingBlock, _, err = tc.AddBlock(splitBlocks, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed adding block: %v", err)
|
||||
}
|
||||
expectedOrder = append(expectedOrder, mergingBlock)
|
||||
}
|
||||
|
||||
virtualSelectedParent, err := tc.GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting SelectedParent: %v", err)
|
||||
}
|
||||
if !virtualSelectedParent.Equal(expectedOrder[len(expectedOrder)-1]) {
|
||||
t.Fatalf("Expected %s to be selectedParent, instead found: %s", expectedOrder[len(expectedOrder)-1], virtualSelectedParent)
|
||||
}
|
||||
|
||||
requestSelectedParent := getBlocks(virtualSelectedParent)
|
||||
if !reflect.DeepEqual(requestSelectedParent.BlockHashes, hashes.ToStrings([]*externalapi.DomainHash{virtualSelectedParent})) {
|
||||
t.Fatalf("TestHandleGetBlocks expected:\n%v\nactual:\n%v", virtualSelectedParent, requestSelectedParent.BlockHashes)
|
||||
}
|
||||
|
||||
for i, blockHash := range expectedOrder {
|
||||
expectedBlocks := filterAntiPast(blockHash, expectedOrder)
|
||||
expectedBlocks = append([]*externalapi.DomainHash{blockHash}, expectedBlocks...)
|
||||
|
||||
actualBlocks := getBlocks(blockHash)
|
||||
if !reflect.DeepEqual(actualBlocks.BlockHashes, hashes.ToStrings(expectedBlocks)) {
|
||||
t.Fatalf("TestHandleGetBlocks %d \nexpected: \n%v\nactual:\n%v", i,
|
||||
hashes.ToStrings(expectedBlocks), actualBlocks.BlockHashes)
|
||||
}
|
||||
}
|
||||
|
||||
// Make explicitly sure that if lowHash==highHash we get a slice with a single hash.
|
||||
actualBlocks := getBlocks(virtualSelectedParent)
|
||||
if !reflect.DeepEqual(actualBlocks.BlockHashes, []string{virtualSelectedParent.String()}) {
|
||||
t.Fatalf("TestHandleGetBlocks expected blocks to contain just '%s', instead got: \n%v",
|
||||
virtualSelectedParent, actualBlocks.BlockHashes)
|
||||
}
|
||||
|
||||
expectedOrder = append([]*externalapi.DomainHash{params.GenesisHash}, expectedOrder...)
|
||||
actualOrder := getBlocks(nil)
|
||||
if !reflect.DeepEqual(actualOrder.BlockHashes, hashes.ToStrings(expectedOrder)) {
|
||||
t.Fatalf("TestHandleGetBlocks \nexpected: %v \nactual:\n%v", expectedOrder, actualOrder.BlockHashes)
|
||||
}
|
||||
|
||||
requestAllExplictly := getBlocks(params.GenesisHash)
|
||||
if !reflect.DeepEqual(requestAllExplictly.BlockHashes, hashes.ToStrings(expectedOrder)) {
|
||||
t.Fatalf("TestHandleGetBlocks \nexpected: \n%v\n. actual:\n%v", expectedOrder, requestAllExplictly.BlockHashes)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetInfo handles the respectively named RPC command
|
||||
func HandleGetInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
response := appmessage.NewGetInfoResponseMessage(context.NetAdapter.ID().String())
|
||||
return response, nil
|
||||
}
|
||||
@@ -5,5 +5,5 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("RPCS")
|
||||
var log, _ = logger.Get(logger.SubsystemTags.RPCS)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleNotifyPruningPointUTXOSetOverrideRequest handles the respectively named RPC command
|
||||
func HandleNotifyPruningPointUTXOSetOverrideRequest(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagatePruningPointUTXOSetOverrideNotifications()
|
||||
|
||||
response := appmessage.NewNotifyPruningPointUTXOSetOverrideResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -3,7 +3,10 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// HandleNotifyUTXOsChanged handles the respectively named RPC command
|
||||
@@ -15,11 +18,26 @@ func HandleNotifyUTXOsChanged(context *rpccontext.Context, router *router.Router
|
||||
}
|
||||
|
||||
notifyUTXOsChangedRequest := request.(*appmessage.NotifyUTXOsChangedRequestMessage)
|
||||
addresses, err := context.ConvertAddressStringsToUTXOsChangedNotificationAddresses(notifyUTXOsChangedRequest.Addresses)
|
||||
if err != nil {
|
||||
errorMessage := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
errorMessage.Error = appmessage.RPCErrorf("Parsing error: %s", err)
|
||||
return errorMessage, nil
|
||||
|
||||
addresses := make([]*rpccontext.UTXOsChangedNotificationAddress, len(notifyUTXOsChangedRequest.Addresses))
|
||||
for i, addressString := range notifyUTXOsChangedRequest.Addresses {
|
||||
address, err := util.DecodeAddress(addressString, context.Config.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
errorMessage := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not decode address '%s': %s", addressString, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
errorMessage := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
scriptPublicKeyString := utxoindex.ConvertScriptPublicKeyToString(scriptPublicKey)
|
||||
addresses[i] = &rpccontext.UTXOsChangedNotificationAddress{
|
||||
Address: addressString,
|
||||
ScriptPublicKeyString: scriptPublicKeyString,
|
||||
}
|
||||
}
|
||||
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleStopNotifyingPruningPointUTXOSetOverrideRequest handles the respectively named RPC command
|
||||
func HandleStopNotifyingPruningPointUTXOSetOverrideRequest(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.StopPropagatingPruningPointUTXOSetOverrideNotifications()
|
||||
|
||||
response := appmessage.NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleStopNotifyingUTXOsChanged handles the respectively named RPC command
|
||||
func HandleStopNotifyingUTXOsChanged(context *rpccontext.Context, router *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if !context.Config.UTXOIndex {
|
||||
errorMessage := appmessage.NewStopNotifyingUTXOsChangedResponseMessage()
|
||||
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
stopNotifyingUTXOsChangedRequest := request.(*appmessage.StopNotifyingUTXOsChangedRequestMessage)
|
||||
addresses, err := context.ConvertAddressStringsToUTXOsChangedNotificationAddresses(stopNotifyingUTXOsChangedRequest.Addresses)
|
||||
if err != nil {
|
||||
errorMessage := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
errorMessage.Error = appmessage.RPCErrorf("Parsing error: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.StopPropagatingUTXOsChangedNotifications(addresses)
|
||||
|
||||
response := appmessage.NewStopNotifyingUTXOsChangedResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@@ -26,11 +25,9 @@ func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request ap
|
||||
|
||||
err := context.ProtocolManager.AddBlock(domainBlock)
|
||||
if err != nil {
|
||||
isProtocolOrRuleError := errors.As(err, &ruleerrors.RuleError{}) || errors.As(err, &protocolerrors.ProtocolError{})
|
||||
if !isProtocolOrRuleError {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &appmessage.SubmitBlockResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Block rejected. Reason: %s", err),
|
||||
RejectReason: appmessage.RejectReasonBlockInvalid,
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"net"
|
||||
)
|
||||
|
||||
// HandleUnban handles the respectively named RPC command
|
||||
func HandleUnban(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
unbanRequest := request.(*appmessage.UnbanRequestMessage)
|
||||
ip := net.ParseIP(unbanRequest.IP)
|
||||
if ip == nil {
|
||||
errorMessage := &appmessage.UnbanResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not parse IP %s", unbanRequest.IP)
|
||||
return errorMessage, nil
|
||||
}
|
||||
err := context.AddressManager.Unban(appmessage.NewNetAddressIPPort(ip, 0, 0))
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.UnbanResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not unban IP: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
response := appmessage.NewUnbanResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
|
||||
Kaspad v0.9.2 - 2021-03-31
|
||||
===========================
|
||||
* Increase the route capacity of InvTransaction messages. (#1603) (#1637)
|
||||
|
||||
Kaspad v0.9.1 - 2021-03-14
|
||||
===========================
|
||||
* Testnet network reset
|
||||
|
||||
Kaspad v0.9.0 - 2021-03-04
|
||||
===========================
|
||||
|
||||
* Merge big subdags in pick virtual parents (#1574)
|
||||
* Write in the reject message the tx rejection reason (#1573)
|
||||
* Add nil checks for protowire (#1570)
|
||||
* Increase getBlocks limit to 1000 (#1572)
|
||||
* Return RPC error if getBlock's lowHash doesn't exist (#1569)
|
||||
* Add default dns-seeder to testnet (#1568)
|
||||
* Fix utxoindex deserialization (#1566)
|
||||
* Add pruning point hash to GetBlockDagInfo response (#1565)
|
||||
* Use EmitUnpopulated so that kaspactl prints all fields, even the default ones (#1561)
|
||||
* Stop logging an error whenever an RPC/P2P connection is canceled (#1562)
|
||||
* Cleanup the logger and make it asynchronous (#1524)
|
||||
* Close all iterators (#1542)
|
||||
* Add childrenHashes to GetBlock/s RPC commands (#1560)
|
||||
* Add ScriptPublicKey.Version to RPC (#1559)
|
||||
* Fix the target block rate to create less bursty mining (#1554)
|
||||
|
||||
Kaspad v0.8.10 - 2021-02-25
|
||||
===========================
|
||||
|
||||
* Fix bug where invalid mempool transactions were not removed (#1551)
|
||||
* Add RPC reconnection to the miner (#1552)
|
||||
* Remove virtual diff parents - only selectedTip is virtualDiffParent now (#1550)
|
||||
* Fix UTXO index (#1548)
|
||||
* Prevent fast failing (#1545)
|
||||
* Increase the sleep time in kaspaminer when the node is not synced (#1544)
|
||||
* Disallow header only blocks on RPC, relay and when requesting IBD full blocks (#1537)
|
||||
* Make templateManager hold a DomainBlock and isSynced bool instead of a GetBlockTemplateResponseMessage (#1538)
|
||||
@@ -4,7 +4,7 @@ kaspactl is an RPC client for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.14 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ var commandTypes = []reflect.Type{
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetConnectedPeerInfoRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetPeerAddressesRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetCurrentNetworkRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetInfoRequest{}),
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetBlockRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetBlocksRequest{}),
|
||||
@@ -33,9 +32,6 @@ var commandTypes = []reflect.Type{
|
||||
reflect.TypeOf(protowire.KaspadMessage_SubmitTransactionRequest{}),
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetUtxosByAddressesRequest{}),
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_BanRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_UnbanRequest{}),
|
||||
}
|
||||
|
||||
type commandDescription struct {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.16-alpine AS build
|
||||
FROM golang:1.14-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
@@ -2,11 +2,10 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
|
||||
@@ -68,7 +67,7 @@ func postCommand(cfg *configFlags, client *grpcclient.GRPCClient, responseChan c
|
||||
if err != nil {
|
||||
printErrorAndExit(fmt.Sprintf("error posting the request to the RPC server: %s", err))
|
||||
}
|
||||
responseBytes, err := protojson.MarshalOptions{EmitUnpopulated: true}.Marshal(response)
|
||||
responseBytes, err := protojson.Marshal(response)
|
||||
if err != nil {
|
||||
printErrorAndExit(errors.Wrapf(err, "error parsing the response from the RPC server").Error())
|
||||
}
|
||||
@@ -93,7 +92,6 @@ func prettifyResponse(response string) string {
|
||||
|
||||
marshalOptions := &protojson.MarshalOptions{}
|
||||
marshalOptions.Indent = " "
|
||||
marshalOptions.EmitUnpopulated = true
|
||||
return marshalOptions.Format(kaspadMessage)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Kaspaminer is a CPU-based miner for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.14 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -5,95 +5,42 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient"
|
||||
"github.com/pkg/errors"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const minerTimeout = 10 * time.Second
|
||||
|
||||
type minerClient struct {
|
||||
isReconnecting uint32
|
||||
clientLock sync.RWMutex
|
||||
rpcClient *rpcclient.RPCClient
|
||||
*rpcclient.RPCClient
|
||||
|
||||
cfg *configFlags
|
||||
blockAddedNotificationChan chan struct{}
|
||||
}
|
||||
|
||||
func (mc *minerClient) safeRPCClient() *rpcclient.RPCClient {
|
||||
mc.clientLock.RLock()
|
||||
defer mc.clientLock.RUnlock()
|
||||
return mc.rpcClient
|
||||
}
|
||||
|
||||
func (mc *minerClient) reconnect() {
|
||||
swapped := atomic.CompareAndSwapUint32(&mc.isReconnecting, 0, 1)
|
||||
if !swapped {
|
||||
return
|
||||
}
|
||||
|
||||
defer atomic.StoreUint32(&mc.isReconnecting, 0)
|
||||
|
||||
mc.clientLock.Lock()
|
||||
defer mc.clientLock.Unlock()
|
||||
|
||||
retryDuration := time.Second
|
||||
const maxRetryDuration = time.Minute
|
||||
log.Infof("Reconnecting RPC connection")
|
||||
for {
|
||||
err := mc.connect()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if retryDuration < time.Minute {
|
||||
retryDuration *= 2
|
||||
} else {
|
||||
retryDuration = maxRetryDuration
|
||||
}
|
||||
|
||||
log.Errorf("Got error '%s' while reconnecting. Trying again in %s", err, retryDuration)
|
||||
time.Sleep(retryDuration)
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *minerClient) connect() error {
|
||||
rpcAddress, err := mc.cfg.NetParams().NormalizeRPCServerAddress(mc.cfg.RPCServer)
|
||||
func newMinerClient(cfg *configFlags) (*minerClient, error) {
|
||||
rpcAddress, err := cfg.NetParams().NormalizeRPCServerAddress(cfg.RPCServer)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
mc.rpcClient, err = rpcclient.NewRPCClient(rpcAddress)
|
||||
rpcClient, err := rpcclient.NewRPCClient(rpcAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
mc.rpcClient.SetTimeout(minerTimeout)
|
||||
mc.rpcClient.SetLogger(backendLog, logger.LevelTrace)
|
||||
rpcClient.SetTimeout(minerTimeout)
|
||||
rpcClient.SetLogger(backendLog, logger.LevelTrace)
|
||||
|
||||
err = mc.rpcClient.RegisterForBlockAddedNotifications(func(_ *appmessage.BlockAddedNotificationMessage) {
|
||||
minerClient := &minerClient{
|
||||
RPCClient: rpcClient,
|
||||
blockAddedNotificationChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
err = rpcClient.RegisterForBlockAddedNotifications(func(_ *appmessage.BlockAddedNotificationMessage) {
|
||||
select {
|
||||
case mc.blockAddedNotificationChan <- struct{}{}:
|
||||
case minerClient.blockAddedNotificationChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error requesting block-added notifications")
|
||||
}
|
||||
|
||||
log.Infof("Connected to %s", rpcAddress)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newMinerClient(cfg *configFlags) (*minerClient, error) {
|
||||
minerClient := &minerClient{
|
||||
cfg: cfg,
|
||||
blockAddedNotificationChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
err := minerClient.connect()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "error requesting block-added notifications")
|
||||
}
|
||||
|
||||
return minerClient, nil
|
||||
|
||||
@@ -17,9 +17,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLogFilename = "kaspaminer.log"
|
||||
defaultErrLogFilename = "kaspaminer_err.log"
|
||||
defaultTargetBlockRateRatio = 2.0
|
||||
defaultLogFilename = "kaspaminer.log"
|
||||
defaultErrLogFilename = "kaspaminer_err.log"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -31,13 +30,13 @@ var (
|
||||
)
|
||||
|
||||
type configFlags struct {
|
||||
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
|
||||
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
|
||||
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
|
||||
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
|
||||
TargetBlocksPerSecond *float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. 0 means no limit (The default one is 2 * target network block rate)"`
|
||||
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
|
||||
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
|
||||
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
|
||||
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
|
||||
TargetBlocksPerSecond float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. This flag is for debugging purposes."`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
@@ -65,11 +64,6 @@ func parseConfig() (*configFlags, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.TargetBlocksPerSecond == nil {
|
||||
targetBlocksPerSecond := defaultTargetBlockRateRatio / cfg.NetParams().TargetTimePerBlock.Seconds()
|
||||
cfg.TargetBlocksPerSecond = &targetBlocksPerSecond
|
||||
}
|
||||
|
||||
if cfg.Profile != "" {
|
||||
profilePort, err := strconv.Atoi(cfg.Profile)
|
||||
if err != nil || profilePort < 1024 || profilePort > 65535 {
|
||||
@@ -77,10 +71,6 @@ func parseConfig() (*configFlags, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.MiningAddr == "" {
|
||||
return nil, errors.New("--miningaddr is required")
|
||||
}
|
||||
|
||||
initLog(defaultLogFile, defaultErrLogFile)
|
||||
|
||||
return cfg, nil
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.16-alpine AS build
|
||||
FROM golang:1.14-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ var (
|
||||
)
|
||||
|
||||
func initLog(logFile, errLogFile string) {
|
||||
log.SetLevel(logger.LevelDebug)
|
||||
err := backendLog.AddLogFile(logFile, logger.LevelTrace)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logger.LevelTrace, err)
|
||||
@@ -25,15 +24,4 @@ func initLog(logFile, errLogFile string) {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logger.LevelWarn, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = backendLog.AddLogWriter(os.Stdout, logger.LevelInfo)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding stdout to the loggerfor level %s: %s", logger.LevelWarn, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = backendLog.Run()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error starting the logger: %s ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ func main() {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing command-line arguments: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer backendLog.Close()
|
||||
|
||||
// Show version at startup.
|
||||
log.Infof("Version %s", version.Version())
|
||||
@@ -40,7 +39,7 @@ func main() {
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error connecting to the RPC server"))
|
||||
}
|
||||
defer client.safeRPCClient().Disconnect()
|
||||
defer client.Disconnect()
|
||||
|
||||
miningAddr, err := util.DecodeAddress(cfg.MiningAddr, cfg.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
@@ -49,7 +48,7 @@ func main() {
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
spawn("mineLoop", func() {
|
||||
err = mineLoop(client, cfg.NumberOfBlocks, *cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr)
|
||||
err = mineLoop(client, cfg.NumberOfBlocks, cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error in mine loop"))
|
||||
}
|
||||
|
||||
@@ -2,13 +2,12 @@ package main
|
||||
|
||||
import (
|
||||
nativeerrors "errors"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspaminer/templatemanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/pow"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
|
||||
@@ -30,19 +29,11 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, targetBlocksPerSecond
|
||||
rand.Seed(time.Now().UnixNano()) // Seed the global concurrent-safe random source.
|
||||
|
||||
errChan := make(chan error)
|
||||
|
||||
templateStopChan := make(chan struct{})
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
|
||||
// We don't want to send router.DefaultMaxMessages blocks at once because there's
|
||||
// a high chance we'll get disconnected from the node, so we make the channel
|
||||
// capacity router.DefaultMaxMessages/2 (we give some slack for getBlockTemplate
|
||||
// requests)
|
||||
foundBlockChan := make(chan *externalapi.DomainBlock, router.DefaultMaxMessages/2)
|
||||
|
||||
spawn("templatesLoop", func() {
|
||||
templatesLoop(client, miningAddr, errChan)
|
||||
})
|
||||
|
||||
spawn("blocksLoop", func() {
|
||||
spawn("mineLoop-internalLoop", func() {
|
||||
const windowSize = 10
|
||||
var expectedDurationForWindow time.Duration
|
||||
var windowExpectedEndTime time.Time
|
||||
@@ -53,37 +44,31 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, targetBlocksPerSecond
|
||||
}
|
||||
blockInWindowIndex := 0
|
||||
|
||||
sleepTime := 0 * time.Second
|
||||
for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ {
|
||||
|
||||
for {
|
||||
foundBlockChan <- mineNextBlock(mineWhenNotSynced)
|
||||
foundBlock := make(chan *externalapi.DomainBlock)
|
||||
mineNextBlock(client, miningAddr, foundBlock, mineWhenNotSynced, templateStopChan, errChan)
|
||||
block := <-foundBlock
|
||||
templateStopChan <- struct{}{}
|
||||
err := handleFoundBlock(client, block)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
|
||||
if hasBlockRateTarget {
|
||||
blockInWindowIndex++
|
||||
if blockInWindowIndex == windowSize-1 {
|
||||
deviation := windowExpectedEndTime.Sub(time.Now())
|
||||
if deviation > 0 {
|
||||
sleepTime = deviation / windowSize
|
||||
log.Infof("Finished to mine %d blocks %s earlier than expected. Setting the miner "+
|
||||
"to sleep %s between blocks to compensate",
|
||||
windowSize, deviation, sleepTime)
|
||||
log.Infof("Finished to mine %d blocks %s earlier than expected. Sleeping %s to compensate",
|
||||
windowSize, deviation, deviation)
|
||||
time.Sleep(deviation)
|
||||
}
|
||||
blockInWindowIndex = 0
|
||||
windowExpectedEndTime = time.Now().Add(expectedDurationForWindow)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
spawn("handleFoundBlock", func() {
|
||||
for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ {
|
||||
block := <-foundBlockChan
|
||||
err := handleFoundBlock(client, block)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
doneChan <- struct{}{}
|
||||
})
|
||||
@@ -114,105 +99,104 @@ func logHashRate() {
|
||||
})
|
||||
}
|
||||
|
||||
func mineNextBlock(client *minerClient, miningAddr util.Address, foundBlock chan *externalapi.DomainBlock, mineWhenNotSynced bool,
|
||||
templateStopChan chan struct{}, errChan chan error) {
|
||||
|
||||
newTemplateChan := make(chan *appmessage.GetBlockTemplateResponseMessage)
|
||||
spawn("templatesLoop", func() {
|
||||
templatesLoop(client, miningAddr, newTemplateChan, errChan, templateStopChan)
|
||||
})
|
||||
spawn("solveLoop", func() {
|
||||
solveLoop(newTemplateChan, foundBlock, mineWhenNotSynced)
|
||||
})
|
||||
}
|
||||
|
||||
func handleFoundBlock(client *minerClient, block *externalapi.DomainBlock) error {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
log.Infof("Submitting block %s to %s", blockHash, client.safeRPCClient().Address())
|
||||
log.Infof("Found block %s with parents %s. Submitting to %s", blockHash, block.Header.ParentHashes(), client.Address())
|
||||
|
||||
rejectReason, err := client.safeRPCClient().SubmitBlock(block)
|
||||
rejectReason, err := client.SubmitBlock(block)
|
||||
if err != nil {
|
||||
if nativeerrors.Is(err, router.ErrTimeout) {
|
||||
log.Warnf("Got timeout while submitting block %s to %s: %s", blockHash, client.safeRPCClient().Address(), err)
|
||||
client.reconnect()
|
||||
return nil
|
||||
}
|
||||
if rejectReason == appmessage.RejectReasonIsInIBD {
|
||||
const waitTime = 1 * time.Second
|
||||
log.Warnf("Block %s was rejected because the node is in IBD. Waiting for %s", blockHash, waitTime)
|
||||
time.Sleep(waitTime)
|
||||
log.Warnf("Block %s was rejected because the node is in IBD", blockHash)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "Error submitting block %s to %s", blockHash, client.safeRPCClient().Address())
|
||||
return errors.Errorf("Error submitting block %s to %s: %s", blockHash, client.Address(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mineNextBlock(mineWhenNotSynced bool) *externalapi.DomainBlock {
|
||||
nonce := rand.Uint64() // Use the global concurrent-safe random source.
|
||||
for {
|
||||
nonce++
|
||||
// For each nonce we try to build a block from the most up to date
|
||||
// block template.
|
||||
// In the rare case where the nonce space is exhausted for a specific
|
||||
// block, it'll keep looping the nonce until a new block template
|
||||
// is discovered.
|
||||
block := getBlockForMining(mineWhenNotSynced)
|
||||
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
|
||||
headerForMining := block.Header.ToMutable()
|
||||
headerForMining.SetNonce(nonce)
|
||||
atomic.AddUint64(&hashesTried, 1)
|
||||
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
|
||||
block.Header = headerForMining.ToImmutable()
|
||||
log.Infof("Found block %s with parents %s", consensushashing.BlockHash(block), block.Header.ParentHashes())
|
||||
return block
|
||||
func solveBlock(block *externalapi.DomainBlock, stopChan chan struct{}, foundBlock chan *externalapi.DomainBlock) {
|
||||
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
|
||||
headerForMining := block.Header.ToMutable()
|
||||
initialNonce := rand.Uint64() // Use the global concurrent-safe random source.
|
||||
for i := initialNonce; i != initialNonce-1; i++ {
|
||||
select {
|
||||
case <-stopChan:
|
||||
return
|
||||
default:
|
||||
headerForMining.SetNonce(i)
|
||||
atomic.AddUint64(&hashesTried, 1)
|
||||
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
|
||||
block.Header = headerForMining.ToImmutable()
|
||||
foundBlock <- block
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getBlockForMining(mineWhenNotSynced bool) *externalapi.DomainBlock {
|
||||
tryCount := 0
|
||||
func templatesLoop(client *minerClient, miningAddr util.Address,
|
||||
newTemplateChan chan *appmessage.GetBlockTemplateResponseMessage, errChan chan error, stopChan chan struct{}) {
|
||||
|
||||
const sleepTime = 500 * time.Millisecond
|
||||
const sleepTimeWhenNotSynced = 5 * time.Second
|
||||
|
||||
for {
|
||||
tryCount++
|
||||
|
||||
shouldLog := (tryCount-1)%10 == 0
|
||||
template, isSynced := templatemanager.Get()
|
||||
if template == nil {
|
||||
if shouldLog {
|
||||
log.Info("Waiting for the initial template")
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
continue
|
||||
}
|
||||
if !isSynced && !mineWhenNotSynced {
|
||||
if shouldLog {
|
||||
log.Warnf("Kaspad is not synced. Skipping current block template")
|
||||
}
|
||||
time.Sleep(sleepTimeWhenNotSynced)
|
||||
continue
|
||||
}
|
||||
|
||||
return template
|
||||
}
|
||||
}
|
||||
|
||||
func templatesLoop(client *minerClient, miningAddr util.Address, errChan chan error) {
|
||||
getBlockTemplate := func() {
|
||||
template, err := client.safeRPCClient().GetBlockTemplate(miningAddr.String())
|
||||
template, err := client.GetBlockTemplate(miningAddr.String())
|
||||
if nativeerrors.Is(err, router.ErrTimeout) {
|
||||
log.Warnf("Got timeout while requesting block template from %s: %s", client.safeRPCClient().Address(), err)
|
||||
client.reconnect()
|
||||
log.Infof("Got timeout while requesting block template from %s", client.Address())
|
||||
return
|
||||
} else if err != nil {
|
||||
errChan <- errors.Errorf("Error getting block template from %s: %s", client.Address(), err)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
errChan <- errors.Wrapf(err, "Error getting block template from %s", client.safeRPCClient().Address())
|
||||
return
|
||||
}
|
||||
templatemanager.Set(template)
|
||||
newTemplateChan <- template
|
||||
}
|
||||
|
||||
getBlockTemplate()
|
||||
const tickerTime = 500 * time.Millisecond
|
||||
ticker := time.NewTicker(tickerTime)
|
||||
for {
|
||||
select {
|
||||
case <-stopChan:
|
||||
close(newTemplateChan)
|
||||
return
|
||||
case <-client.blockAddedNotificationChan:
|
||||
getBlockTemplate()
|
||||
ticker.Reset(tickerTime)
|
||||
case <-ticker.C:
|
||||
case <-time.Tick(500 * time.Millisecond):
|
||||
getBlockTemplate()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func solveLoop(newTemplateChan chan *appmessage.GetBlockTemplateResponseMessage, foundBlock chan *externalapi.DomainBlock,
|
||||
mineWhenNotSynced bool) {
|
||||
|
||||
var stopOldTemplateSolving chan struct{}
|
||||
for template := range newTemplateChan {
|
||||
if !template.IsSynced && !mineWhenNotSynced {
|
||||
log.Warnf("Kaspad is not synced. Skipping current block template")
|
||||
continue
|
||||
}
|
||||
|
||||
if stopOldTemplateSolving != nil {
|
||||
close(stopOldTemplateSolving)
|
||||
}
|
||||
|
||||
stopOldTemplateSolving = make(chan struct{})
|
||||
block := appmessage.MsgBlockToDomainBlock(template.MsgBlock)
|
||||
|
||||
stopOldTemplateSolvingCopy := stopOldTemplateSolving
|
||||
spawn("solveBlock", func() {
|
||||
solveBlock(block, stopOldTemplateSolvingCopy, foundBlock)
|
||||
})
|
||||
}
|
||||
if stopOldTemplateSolving != nil {
|
||||
close(stopOldTemplateSolving)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
package templatemanager
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var currentTemplate *externalapi.DomainBlock
|
||||
var isSynced bool
|
||||
var lock = &sync.Mutex{}
|
||||
|
||||
// Get returns the template to work on
|
||||
func Get() (*externalapi.DomainBlock, bool) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
// Shallow copy the block so when the user replaces the header it won't affect the template here.
|
||||
if currentTemplate == nil {
|
||||
return nil, false
|
||||
}
|
||||
block := *currentTemplate
|
||||
return &block, isSynced
|
||||
}
|
||||
|
||||
// Set sets the current template to work on
|
||||
func Set(template *appmessage.GetBlockTemplateResponseMessage) {
|
||||
block := appmessage.MsgBlockToDomainBlock(template.MsgBlock)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
currentTemplate = block
|
||||
isSynced = template.IsSynced
|
||||
}
|
||||
@@ -10,7 +10,7 @@ It is capable of generating wallet key-pairs, printing a wallet's current balanc
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.14 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.16-alpine AS build
|
||||
FROM golang:1.14-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
@@ -16,12 +16,9 @@ RUN go get -u golang.org/x/lint/golint \
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
# Cache kaspad dependencies
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN ./build_and_test.sh
|
||||
RUN NO_PARALLEL=1 ./build_and_test.sh
|
||||
|
||||
# --- multistage docker build: stage #2: runtime image
|
||||
FROM alpine
|
||||
@@ -30,7 +27,7 @@ WORKDIR /app
|
||||
RUN apk add --no-cache ca-certificates tini
|
||||
|
||||
COPY --from=build /go/src/github.com/kaspanet/kaspad/kaspad /app/
|
||||
COPY --from=build /go/src/github.com/kaspanet/kaspad/infrastructure/config/sample-kaspad.conf /app/
|
||||
COPY --from=build /go/src/github.com/kaspanet/kaspad/sample-kaspad.conf /app/
|
||||
|
||||
USER nobody
|
||||
ENTRYPOINT [ "/sbin/tini", "--" ]
|
||||
|
||||
@@ -157,15 +157,6 @@ func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalap
|
||||
return blockInfo, nil
|
||||
}
|
||||
|
||||
func (s *consensus) GetBlockChildren(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
blockRelation, err := s.blockRelationStore.BlockRelation(s.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blockRelation.Children, nil
|
||||
}
|
||||
|
||||
func (s *consensus) GetBlockAcceptanceData(blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
@@ -232,30 +223,6 @@ func (s *consensus) GetPruningPointUTXOs(expectedPruningPointHash *externalapi.D
|
||||
return pruningPointUTXOs, nil
|
||||
}
|
||||
|
||||
func (s *consensus) GetVirtualUTXOs(expectedVirtualParents []*externalapi.DomainHash,
|
||||
fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) {
|
||||
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
virtualParents, err := s.dagTopologyManager.Parents(model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !externalapi.HashesEqual(expectedVirtualParents, virtualParents) {
|
||||
return nil, errors.Wrapf(ruleerrors.ErrGetVirtualUTXOsWrongVirtualParents, "expected virtual parents %s but got %s",
|
||||
expectedVirtualParents,
|
||||
virtualParents)
|
||||
}
|
||||
|
||||
virtualUTXOs, err := s.consensusStateStore.VirtualUTXOs(s.databaseContext, fromOutpoint, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return virtualUTXOs, nil
|
||||
}
|
||||
|
||||
func (s *consensus) PruningPoint() (*externalapi.DomainHash, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
@@ -436,15 +403,3 @@ func (s *consensus) GetHeadersSelectedTip() (*externalapi.DomainHash, error) {
|
||||
|
||||
return s.headersSelectedTipStore.HeadersSelectedTip(s.databaseContext)
|
||||
}
|
||||
|
||||
func (s *consensus) Anticone(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
err := s.validateBlockHashExists(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.dagTraversalManager.Anticone(blockHash)
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"math/big"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GHOSTDAGManagerConstructor is the function signature for a constructor of a type implementing model.GHOSTDAGManager
|
||||
type GHOSTDAGManagerConstructor func(model.DBReader, model.DAGTopologyManager,
|
||||
model.GHOSTDAGDataStore, model.BlockHeaderStore, model.KType) model.GHOSTDAGManager
|
||||
|
||||
// DifficultyManagerConstructor is the function signature for a constructor of a type implementing model.DifficultyManager
|
||||
type DifficultyManagerConstructor func(model.DBReader, model.GHOSTDAGManager, model.GHOSTDAGDataStore,
|
||||
model.BlockHeaderStore, model.DAGTopologyManager, model.DAGTraversalManager, *big.Int, int, bool, time.Duration,
|
||||
*externalapi.DomainHash) model.DifficultyManager
|
||||
|
||||
// PastMedianTimeManagerConstructor is the function signature for a constructor of a type implementing model.PastMedianTimeManager
|
||||
type PastMedianTimeManagerConstructor func(int, model.DBReader, model.DAGTraversalManager, model.BlockHeaderStore,
|
||||
model.GHOSTDAGDataStore) model.PastMedianTimeManager
|
||||
@@ -0,0 +1,52 @@
|
||||
package binaryserialization
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
"github.com/kaspanet/kaspad/util/binaryserializer"
|
||||
"io"
|
||||
)
|
||||
|
||||
// SerializeUTXOCollection serializes the given utxoCollection into the given writer
|
||||
func SerializeUTXOCollection(writer io.Writer, utxoCollection model.UTXOCollection) error {
|
||||
length := uint64(utxoCollection.Len())
|
||||
err := binaryserializer.PutUint64(writer, length)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
utxoIterator := utxoCollection.Iterator()
|
||||
for ok := utxoIterator.First(); ok; ok = utxoIterator.Next() {
|
||||
outpoint, utxoEntry, err := utxoIterator.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = utxo.SerializeUTXOIntoWriter(writer, utxoEntry, outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeserializeUTXOCollection deserializes a utxoCollection out of the given reader
|
||||
func DeserializeUTXOCollection(reader io.Reader) (model.UTXOCollection, error) {
|
||||
length, err := binaryserializer.Uint64(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxoMap := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry, length)
|
||||
for i := uint64(0); i < length; i++ {
|
||||
utxoEntry, outpoint, err := utxo.DeserializeUTXOOutOfReader(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoMap[*outpoint] = utxoEntry
|
||||
}
|
||||
|
||||
utxoCollection := utxo.NewUTXOCollection(utxoMap)
|
||||
return utxoCollection, nil
|
||||
}
|
||||
29
domain/consensus/database/binaryserialization/utxo_diff.go
Normal file
29
domain/consensus/database/binaryserialization/utxo_diff.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package binaryserialization
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
"io"
|
||||
)
|
||||
|
||||
// SerializeUTXODiff serializes the given utxoDiff into the given writer
|
||||
func SerializeUTXODiff(writer io.Writer, utxoDiff model.UTXODiff) error {
|
||||
err := SerializeUTXOCollection(writer, utxoDiff.ToAdd())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return SerializeUTXOCollection(writer, utxoDiff.ToRemove())
|
||||
}
|
||||
|
||||
// DeserializeUTXODiff deserializes a utxoDiff out of the given reader
|
||||
func DeserializeUTXODiff(reader io.Reader) (model.UTXODiff, error) {
|
||||
toAdd, err := DeserializeUTXOCollection(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
toRemove, err := DeserializeUTXOCollection(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utxo.NewUTXODiffFromCollections(toAdd, toRemove)
|
||||
}
|
||||
@@ -3,40 +3,25 @@ package database
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dbCursor struct {
|
||||
cursor database.Cursor
|
||||
isClosed bool
|
||||
cursor database.Cursor
|
||||
}
|
||||
|
||||
func (d dbCursor) Next() bool {
|
||||
if d.isClosed {
|
||||
panic("Tried using a closed DBCursor")
|
||||
}
|
||||
|
||||
return d.cursor.Next()
|
||||
}
|
||||
|
||||
func (d dbCursor) First() bool {
|
||||
if d.isClosed {
|
||||
panic("Tried using a closed DBCursor")
|
||||
}
|
||||
return d.cursor.First()
|
||||
}
|
||||
|
||||
func (d dbCursor) Seek(key model.DBKey) error {
|
||||
if d.isClosed {
|
||||
return errors.New("Tried using a closed DBCursor")
|
||||
}
|
||||
return d.cursor.Seek(dbKeyToDatabaseKey(key))
|
||||
}
|
||||
|
||||
func (d dbCursor) Key() (model.DBKey, error) {
|
||||
if d.isClosed {
|
||||
return nil, errors.New("Tried using a closed DBCursor")
|
||||
}
|
||||
key, err := d.cursor.Key()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -46,23 +31,11 @@ func (d dbCursor) Key() (model.DBKey, error) {
|
||||
}
|
||||
|
||||
func (d dbCursor) Value() ([]byte, error) {
|
||||
if d.isClosed {
|
||||
return nil, errors.New("Tried using a closed DBCursor")
|
||||
}
|
||||
return d.cursor.Value()
|
||||
}
|
||||
|
||||
func (d dbCursor) Close() error {
|
||||
if d.isClosed {
|
||||
return errors.New("Tried using a closed DBCursor")
|
||||
}
|
||||
d.isClosed = true
|
||||
err := d.cursor.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.cursor = nil
|
||||
return nil
|
||||
return d.cursor.Close()
|
||||
}
|
||||
|
||||
func newDBCursor(cursor database.Cursor) model.DBCursor {
|
||||
|
||||
@@ -1473,6 +1473,100 @@ func (x *DbUtxoDiff) GetToRemove() []*DbUtxoCollectionItem {
|
||||
return nil
|
||||
}
|
||||
|
||||
type DbPruningPointUTXOSetBytes struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Bytes []byte `protobuf:"bytes,1,opt,name=bytes,proto3" json:"bytes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DbPruningPointUTXOSetBytes) Reset() {
|
||||
*x = DbPruningPointUTXOSetBytes{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_dbobjects_proto_msgTypes[24]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DbPruningPointUTXOSetBytes) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DbPruningPointUTXOSetBytes) ProtoMessage() {}
|
||||
|
||||
func (x *DbPruningPointUTXOSetBytes) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_dbobjects_proto_msgTypes[24]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DbPruningPointUTXOSetBytes.ProtoReflect.Descriptor instead.
|
||||
func (*DbPruningPointUTXOSetBytes) Descriptor() ([]byte, []int) {
|
||||
return file_dbobjects_proto_rawDescGZIP(), []int{24}
|
||||
}
|
||||
|
||||
func (x *DbPruningPointUTXOSetBytes) GetBytes() []byte {
|
||||
if x != nil {
|
||||
return x.Bytes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DbHeaderTips struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Tips []*DbHash `protobuf:"bytes,1,rep,name=tips,proto3" json:"tips,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DbHeaderTips) Reset() {
|
||||
*x = DbHeaderTips{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_dbobjects_proto_msgTypes[25]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DbHeaderTips) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DbHeaderTips) ProtoMessage() {}
|
||||
|
||||
func (x *DbHeaderTips) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_dbobjects_proto_msgTypes[25]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DbHeaderTips.ProtoReflect.Descriptor instead.
|
||||
func (*DbHeaderTips) Descriptor() ([]byte, []int) {
|
||||
return file_dbobjects_proto_rawDescGZIP(), []int{25}
|
||||
}
|
||||
|
||||
func (x *DbHeaderTips) GetTips() []*DbHash {
|
||||
if x != nil {
|
||||
return x.Tips
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DbTips struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -1484,7 +1578,7 @@ type DbTips struct {
|
||||
func (x *DbTips) Reset() {
|
||||
*x = DbTips{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_dbobjects_proto_msgTypes[24]
|
||||
mi := &file_dbobjects_proto_msgTypes[26]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1497,7 +1591,7 @@ func (x *DbTips) String() string {
|
||||
func (*DbTips) ProtoMessage() {}
|
||||
|
||||
func (x *DbTips) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_dbobjects_proto_msgTypes[24]
|
||||
mi := &file_dbobjects_proto_msgTypes[26]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1510,7 +1604,7 @@ func (x *DbTips) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DbTips.ProtoReflect.Descriptor instead.
|
||||
func (*DbTips) Descriptor() ([]byte, []int) {
|
||||
return file_dbobjects_proto_rawDescGZIP(), []int{24}
|
||||
return file_dbobjects_proto_rawDescGZIP(), []int{26}
|
||||
}
|
||||
|
||||
func (x *DbTips) GetTips() []*DbHash {
|
||||
@@ -1520,6 +1614,53 @@ func (x *DbTips) GetTips() []*DbHash {
|
||||
return nil
|
||||
}
|
||||
|
||||
type DbVirtualDiffParents struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
VirtualDiffParents []*DbHash `protobuf:"bytes,1,rep,name=virtualDiffParents,proto3" json:"virtualDiffParents,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DbVirtualDiffParents) Reset() {
|
||||
*x = DbVirtualDiffParents{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_dbobjects_proto_msgTypes[27]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DbVirtualDiffParents) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DbVirtualDiffParents) ProtoMessage() {}
|
||||
|
||||
func (x *DbVirtualDiffParents) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_dbobjects_proto_msgTypes[27]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DbVirtualDiffParents.ProtoReflect.Descriptor instead.
|
||||
func (*DbVirtualDiffParents) Descriptor() ([]byte, []int) {
|
||||
return file_dbobjects_proto_rawDescGZIP(), []int{27}
|
||||
}
|
||||
|
||||
func (x *DbVirtualDiffParents) GetVirtualDiffParents() []*DbHash {
|
||||
if x != nil {
|
||||
return x.VirtualDiffParents
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DbBlockCount struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -1531,7 +1672,7 @@ type DbBlockCount struct {
|
||||
func (x *DbBlockCount) Reset() {
|
||||
*x = DbBlockCount{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_dbobjects_proto_msgTypes[25]
|
||||
mi := &file_dbobjects_proto_msgTypes[28]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1544,7 +1685,7 @@ func (x *DbBlockCount) String() string {
|
||||
func (*DbBlockCount) ProtoMessage() {}
|
||||
|
||||
func (x *DbBlockCount) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_dbobjects_proto_msgTypes[25]
|
||||
mi := &file_dbobjects_proto_msgTypes[28]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1557,7 +1698,7 @@ func (x *DbBlockCount) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DbBlockCount.ProtoReflect.Descriptor instead.
|
||||
func (*DbBlockCount) Descriptor() ([]byte, []int) {
|
||||
return file_dbobjects_proto_rawDescGZIP(), []int{25}
|
||||
return file_dbobjects_proto_rawDescGZIP(), []int{28}
|
||||
}
|
||||
|
||||
func (x *DbBlockCount) GetCount() uint64 {
|
||||
@@ -1578,7 +1719,7 @@ type DbBlockHeaderCount struct {
|
||||
func (x *DbBlockHeaderCount) Reset() {
|
||||
*x = DbBlockHeaderCount{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_dbobjects_proto_msgTypes[26]
|
||||
mi := &file_dbobjects_proto_msgTypes[29]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1591,7 +1732,7 @@ func (x *DbBlockHeaderCount) String() string {
|
||||
func (*DbBlockHeaderCount) ProtoMessage() {}
|
||||
|
||||
func (x *DbBlockHeaderCount) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_dbobjects_proto_msgTypes[26]
|
||||
mi := &file_dbobjects_proto_msgTypes[29]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1604,7 +1745,7 @@ func (x *DbBlockHeaderCount) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DbBlockHeaderCount.ProtoReflect.Descriptor instead.
|
||||
func (*DbBlockHeaderCount) Descriptor() ([]byte, []int) {
|
||||
return file_dbobjects_proto_rawDescGZIP(), []int{26}
|
||||
return file_dbobjects_proto_rawDescGZIP(), []int{29}
|
||||
}
|
||||
|
||||
func (x *DbBlockHeaderCount) GetCount() uint64 {
|
||||
@@ -1840,19 +1981,32 @@ var file_dbobjects_proto_rawDesc = []byte{
|
||||
0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55,
|
||||
0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65,
|
||||
0x6d, 0x52, 0x08, 0x74, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x33, 0x0a, 0x06, 0x44,
|
||||
0x62, 0x54, 0x69, 0x70, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20,
|
||||
0x6d, 0x52, 0x08, 0x74, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x44,
|
||||
0x62, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58,
|
||||
0x4f, 0x53, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74,
|
||||
0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22,
|
||||
0x39, 0x0a, 0x0c, 0x44, 0x62, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x69, 0x70, 0x73, 0x12,
|
||||
0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e,
|
||||
0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62,
|
||||
0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74, 0x69, 0x70, 0x73, 0x22, 0x33, 0x0a, 0x06, 0x44, 0x62,
|
||||
0x54, 0x69, 0x70, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03,
|
||||
0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74, 0x69, 0x70, 0x73, 0x22,
|
||||
0x5d, 0x0a, 0x14, 0x44, 0x62, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66,
|
||||
0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x12, 0x76, 0x69, 0x72, 0x74, 0x75,
|
||||
0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74, 0x69, 0x70, 0x73,
|
||||
0x22, 0x24, 0x0a, 0x0c, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
|
||||
0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x2a, 0x0a, 0x12, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63,
|
||||
0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05,
|
||||
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75,
|
||||
0x6e, 0x74, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x64,
|
||||
0x2f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x12, 0x76, 0x69, 0x72, 0x74,
|
||||
0x75, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x24,
|
||||
0x0a, 0x0c, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x22, 0x2a, 0x0a, 0x12, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48,
|
||||
0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f,
|
||||
0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b,
|
||||
0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x2f, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -1867,7 +2021,7 @@ func file_dbobjects_proto_rawDescGZIP() []byte {
|
||||
return file_dbobjects_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_dbobjects_proto_msgTypes = make([]protoimpl.MessageInfo, 27)
|
||||
var file_dbobjects_proto_msgTypes = make([]protoimpl.MessageInfo, 30)
|
||||
var file_dbobjects_proto_goTypes = []interface{}{
|
||||
(*DbBlock)(nil), // 0: serialization.DbBlock
|
||||
(*DbBlockHeader)(nil), // 1: serialization.DbBlockHeader
|
||||
@@ -1893,9 +2047,12 @@ var file_dbobjects_proto_goTypes = []interface{}{
|
||||
(*DbReachabilityData)(nil), // 21: serialization.DbReachabilityData
|
||||
(*DbReachabilityInterval)(nil), // 22: serialization.DbReachabilityInterval
|
||||
(*DbUtxoDiff)(nil), // 23: serialization.DbUtxoDiff
|
||||
(*DbTips)(nil), // 24: serialization.DbTips
|
||||
(*DbBlockCount)(nil), // 25: serialization.DbBlockCount
|
||||
(*DbBlockHeaderCount)(nil), // 26: serialization.DbBlockHeaderCount
|
||||
(*DbPruningPointUTXOSetBytes)(nil), // 24: serialization.DbPruningPointUTXOSetBytes
|
||||
(*DbHeaderTips)(nil), // 25: serialization.DbHeaderTips
|
||||
(*DbTips)(nil), // 26: serialization.DbTips
|
||||
(*DbVirtualDiffParents)(nil), // 27: serialization.DbVirtualDiffParents
|
||||
(*DbBlockCount)(nil), // 28: serialization.DbBlockCount
|
||||
(*DbBlockHeaderCount)(nil), // 29: serialization.DbBlockHeaderCount
|
||||
}
|
||||
var file_dbobjects_proto_depIdxs = []int32{
|
||||
1, // 0: serialization.DbBlock.header:type_name -> serialization.DbBlockHeader
|
||||
@@ -1933,12 +2090,14 @@ var file_dbobjects_proto_depIdxs = []int32{
|
||||
2, // 32: serialization.DbReachabilityData.futureCoveringSet:type_name -> serialization.DbHash
|
||||
18, // 33: serialization.DbUtxoDiff.toAdd:type_name -> serialization.DbUtxoCollectionItem
|
||||
18, // 34: serialization.DbUtxoDiff.toRemove:type_name -> serialization.DbUtxoCollectionItem
|
||||
2, // 35: serialization.DbTips.tips:type_name -> serialization.DbHash
|
||||
36, // [36:36] is the sub-list for method output_type
|
||||
36, // [36:36] is the sub-list for method input_type
|
||||
36, // [36:36] is the sub-list for extension type_name
|
||||
36, // [36:36] is the sub-list for extension extendee
|
||||
0, // [0:36] is the sub-list for field type_name
|
||||
2, // 35: serialization.DbHeaderTips.tips:type_name -> serialization.DbHash
|
||||
2, // 36: serialization.DbTips.tips:type_name -> serialization.DbHash
|
||||
2, // 37: serialization.DbVirtualDiffParents.virtualDiffParents:type_name -> serialization.DbHash
|
||||
38, // [38:38] is the sub-list for method output_type
|
||||
38, // [38:38] is the sub-list for method input_type
|
||||
38, // [38:38] is the sub-list for extension type_name
|
||||
38, // [38:38] is the sub-list for extension extendee
|
||||
0, // [0:38] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_dbobjects_proto_init() }
|
||||
@@ -2236,7 +2395,7 @@ func file_dbobjects_proto_init() {
|
||||
}
|
||||
}
|
||||
file_dbobjects_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DbTips); i {
|
||||
switch v := v.(*DbPruningPointUTXOSetBytes); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -2248,7 +2407,7 @@ func file_dbobjects_proto_init() {
|
||||
}
|
||||
}
|
||||
file_dbobjects_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DbBlockCount); i {
|
||||
switch v := v.(*DbHeaderTips); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -2260,6 +2419,42 @@ func file_dbobjects_proto_init() {
|
||||
}
|
||||
}
|
||||
file_dbobjects_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DbTips); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_dbobjects_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DbVirtualDiffParents); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_dbobjects_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DbBlockCount); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_dbobjects_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DbBlockHeaderCount); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -2278,7 +2473,7 @@ func file_dbobjects_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_dbobjects_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 27,
|
||||
NumMessages: 30,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -139,10 +139,22 @@ message DbUtxoDiff {
|
||||
repeated DbUtxoCollectionItem toRemove = 2;
|
||||
}
|
||||
|
||||
message DbPruningPointUTXOSetBytes {
|
||||
bytes bytes = 1;
|
||||
}
|
||||
|
||||
message DbHeaderTips {
|
||||
repeated DbHash tips = 1;
|
||||
}
|
||||
|
||||
message DbTips {
|
||||
repeated DbHash tips = 1;
|
||||
}
|
||||
|
||||
message DbVirtualDiffParents {
|
||||
repeated DbHash virtualDiffParents = 1;
|
||||
}
|
||||
|
||||
message DbBlockCount {
|
||||
uint64 count = 1;
|
||||
}
|
||||
|
||||
17
domain/consensus/database/serialization/header_tips.go
Normal file
17
domain/consensus/database/serialization/header_tips.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package serialization
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// HeaderTipsToDBHeaderTips converts a slice of hashes to DbHeaderTips
|
||||
func HeaderTipsToDBHeaderTips(tips []*externalapi.DomainHash) *DbHeaderTips {
|
||||
return &DbHeaderTips{
|
||||
Tips: DomainHashesToDbHashes(tips),
|
||||
}
|
||||
}
|
||||
|
||||
// DBHeaderTipsToHeaderTips converts DbHeaderTips to a slice of hashes
|
||||
func DBHeaderTipsToHeaderTips(dbHeaderTips *DbHeaderTips) ([]*externalapi.DomainHash, error) {
|
||||
return DbHashesToDomainHashes(dbHeaderTips.Tips)
|
||||
}
|
||||
@@ -1,15 +1,15 @@
|
||||
package serialization
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
)
|
||||
|
||||
func utxoCollectionToDBUTXOCollection(utxoCollection externalapi.UTXOCollection) ([]*DbUtxoCollectionItem, error) {
|
||||
func utxoCollectionToDBUTXOCollection(utxoCollection model.UTXOCollection) ([]*DbUtxoCollectionItem, error) {
|
||||
items := make([]*DbUtxoCollectionItem, utxoCollection.Len())
|
||||
i := 0
|
||||
utxoIterator := utxoCollection.Iterator()
|
||||
defer utxoIterator.Close()
|
||||
for ok := utxoIterator.First(); ok; ok = utxoIterator.Next() {
|
||||
outpoint, entry, err := utxoIterator.Get()
|
||||
if err != nil {
|
||||
@@ -26,7 +26,7 @@ func utxoCollectionToDBUTXOCollection(utxoCollection externalapi.UTXOCollection)
|
||||
return items, nil
|
||||
}
|
||||
|
||||
func dbUTXOCollectionToUTXOCollection(items []*DbUtxoCollectionItem) (externalapi.UTXOCollection, error) {
|
||||
func dbUTXOCollectionToUTXOCollection(items []*DbUtxoCollectionItem) (model.UTXOCollection, error) {
|
||||
utxoMap := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry, len(items))
|
||||
for _, item := range items {
|
||||
outpoint, err := DbOutpointToDomainOutpoint(item.Outpoint)
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package serialization
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
)
|
||||
|
||||
// UTXODiffToDBUTXODiff converts UTXODiff to DbUtxoDiff
|
||||
func UTXODiffToDBUTXODiff(diff externalapi.UTXODiff) (*DbUtxoDiff, error) {
|
||||
func UTXODiffToDBUTXODiff(diff model.UTXODiff) (*DbUtxoDiff, error) {
|
||||
toAdd, err := utxoCollectionToDBUTXOCollection(diff.ToAdd())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -24,7 +24,7 @@ func UTXODiffToDBUTXODiff(diff externalapi.UTXODiff) (*DbUtxoDiff, error) {
|
||||
}
|
||||
|
||||
// DBUTXODiffToUTXODiff converts DbUtxoDiff to UTXODiff
|
||||
func DBUTXODiffToUTXODiff(diff *DbUtxoDiff) (externalapi.UTXODiff, error) {
|
||||
func DBUTXODiffToUTXODiff(diff *DbUtxoDiff) (model.UTXODiff, error) {
|
||||
toAdd, err := dbUTXOCollectionToUTXOCollection(diff.ToAdd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
package serialization
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// VirtualDiffParentsToDBHeaderVirtualDiffParents converts a slice of hashes to DbVirtualDiffParents
|
||||
func VirtualDiffParentsToDBHeaderVirtualDiffParents(tips []*externalapi.DomainHash) *DbVirtualDiffParents {
|
||||
return &DbVirtualDiffParents{
|
||||
VirtualDiffParents: DomainHashesToDbHashes(tips),
|
||||
}
|
||||
}
|
||||
|
||||
// DBVirtualDiffParentsToVirtualDiffParents converts DbHeaderTips to a slice of hashes
|
||||
func DBVirtualDiffParentsToVirtualDiffParents(dbVirtualDiffParents *DbVirtualDiffParents) ([]*externalapi.DomainHash, error) {
|
||||
return DbHashesToDomainHashes(dbVirtualDiffParents.VirtualDiffParents)
|
||||
}
|
||||
@@ -19,11 +19,11 @@ type acceptanceDataStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new AcceptanceDataStore
|
||||
func New(cacheSize int, preallocate bool) model.AcceptanceDataStore {
|
||||
func New(cacheSize int) model.AcceptanceDataStore {
|
||||
return &acceptanceDataStore{
|
||||
staging: make(map[externalapi.DomainHash]externalapi.AcceptanceData),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,11 +21,11 @@ type blockHeaderStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new BlockHeaderStore
|
||||
func New(dbContext model.DBReader, cacheSize int, preallocate bool) (model.BlockHeaderStore, error) {
|
||||
func New(dbContext model.DBReader, cacheSize int) (model.BlockHeaderStore, error) {
|
||||
blockHeaderStore := &blockHeaderStore{
|
||||
staging: make(map[externalapi.DomainHash]externalapi.BlockHeader),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
|
||||
err := blockHeaderStore.initializeCount(dbContext)
|
||||
|
||||
@@ -18,10 +18,10 @@ type blockRelationStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new BlockRelationStore
|
||||
func New(cacheSize int, preallocate bool) model.BlockRelationStore {
|
||||
func New(cacheSize int) model.BlockRelationStore {
|
||||
return &blockRelationStore{
|
||||
staging: make(map[externalapi.DomainHash]*model.BlockRelations),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,10 +18,10 @@ type blockStatusStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new BlockStatusStore
|
||||
func New(cacheSize int, preallocate bool) model.BlockStatusStore {
|
||||
func New(cacheSize int) model.BlockStatusStore {
|
||||
return &blockStatusStore{
|
||||
staging: make(map[externalapi.DomainHash]externalapi.BlockStatus),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bucket = database.MakeBucket([]byte("blocks"))
|
||||
@@ -22,11 +21,11 @@ type blockStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new BlockStore
|
||||
func New(dbContext model.DBReader, cacheSize int, preallocate bool) (model.BlockStore, error) {
|
||||
func New(dbContext model.DBReader, cacheSize int) (model.BlockStore, error) {
|
||||
blockStore := &blockStore{
|
||||
staging: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
|
||||
err := blockStore.initializeCount(dbContext)
|
||||
@@ -213,57 +212,3 @@ func (bs *blockStore) serializeBlockCount(count uint64) ([]byte, error) {
|
||||
dbBlockCount := &serialization.DbBlockCount{Count: count}
|
||||
return proto.Marshal(dbBlockCount)
|
||||
}
|
||||
|
||||
type allBlockHashesIterator struct {
|
||||
cursor model.DBCursor
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
func (a allBlockHashesIterator) First() bool {
|
||||
if a.isClosed {
|
||||
panic("Tried using a closed AllBlockHashesIterator")
|
||||
}
|
||||
return a.cursor.First()
|
||||
}
|
||||
|
||||
func (a allBlockHashesIterator) Next() bool {
|
||||
if a.isClosed {
|
||||
panic("Tried using a closed AllBlockHashesIterator")
|
||||
}
|
||||
return a.cursor.Next()
|
||||
}
|
||||
|
||||
func (a allBlockHashesIterator) Get() (*externalapi.DomainHash, error) {
|
||||
if a.isClosed {
|
||||
return nil, errors.New("Tried using a closed AllBlockHashesIterator")
|
||||
}
|
||||
key, err := a.cursor.Key()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockHashBytes := key.Suffix()
|
||||
return externalapi.NewDomainHashFromByteSlice(blockHashBytes)
|
||||
}
|
||||
|
||||
func (a allBlockHashesIterator) Close() error {
|
||||
if a.isClosed {
|
||||
return errors.New("Tried using a closed AllBlockHashesIterator")
|
||||
}
|
||||
a.isClosed = true
|
||||
err := a.cursor.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.cursor = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *blockStore) AllBlockHashesIterator(dbContext model.DBReader) (model.BlockIterator, error) {
|
||||
cursor, err := dbContext.Cursor(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &allBlockHashesIterator{cursor: cursor}, nil
|
||||
}
|
||||
|
||||
@@ -8,24 +8,27 @@ import (
|
||||
|
||||
// consensusStateStore represents a store for the current consensus state
|
||||
type consensusStateStore struct {
|
||||
tipsStaging []*externalapi.DomainHash
|
||||
virtualUTXODiffStaging externalapi.UTXODiff
|
||||
tipsStaging []*externalapi.DomainHash
|
||||
virtualDiffParentsStaging []*externalapi.DomainHash
|
||||
virtualUTXODiffStaging model.UTXODiff
|
||||
|
||||
virtualUTXOSetCache *utxolrucache.LRUCache
|
||||
|
||||
tipsCache []*externalapi.DomainHash
|
||||
tipsCache []*externalapi.DomainHash
|
||||
virtualDiffParentsCache []*externalapi.DomainHash
|
||||
}
|
||||
|
||||
// New instantiates a new ConsensusStateStore
|
||||
func New(utxoSetCacheSize int, preallocate bool) model.ConsensusStateStore {
|
||||
func New(utxoSetCacheSize int) model.ConsensusStateStore {
|
||||
return &consensusStateStore{
|
||||
virtualUTXOSetCache: utxolrucache.New(utxoSetCacheSize, preallocate),
|
||||
virtualUTXOSetCache: utxolrucache.New(utxoSetCacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) Discard() {
|
||||
css.tipsStaging = nil
|
||||
css.virtualUTXODiffStaging = nil
|
||||
css.virtualDiffParentsStaging = nil
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) Commit(dbTx model.DBTransaction) error {
|
||||
@@ -33,6 +36,10 @@ func (css *consensusStateStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = css.commitVirtualDiffParents(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = css.commitVirtualUTXODiff(dbTx)
|
||||
if err != nil {
|
||||
@@ -46,5 +53,6 @@ func (css *consensusStateStore) Commit(dbTx model.DBTransaction) error {
|
||||
|
||||
func (css *consensusStateStore) IsStaged() bool {
|
||||
return css.tipsStaging != nil ||
|
||||
css.virtualDiffParentsStaging != nil ||
|
||||
css.virtualUTXODiffStaging != nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user