mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 11:17:05 +00:00
Compare commits
133 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
377d9aaaeb | ||
|
|
beee947dda | ||
|
|
d4a27bf1c1 | ||
|
|
eec6eb9669 | ||
|
|
d5c10832c2 | ||
|
|
9fbfba17b6 | ||
|
|
09d698dd0e | ||
|
|
ec51c6926a | ||
|
|
7d44275eb1 | ||
|
|
a3387a56b3 | ||
|
|
c2ae03fc89 | ||
|
|
6c774c966b | ||
|
|
2d54c9693b | ||
|
|
d8350d62b0 | ||
|
|
26c7db251f | ||
|
|
4d435f2b3a | ||
|
|
067688f549 | ||
|
|
3a3fa0d3f0 | ||
|
|
cf4073b773 | ||
|
|
6a5e7c9e3f | ||
|
|
7e9b5b9010 | ||
|
|
953838e0d8 | ||
|
|
a1dcb34c29 | ||
|
|
23764e1b0b | ||
|
|
0838cc8e32 | ||
|
|
9f51330f38 | ||
|
|
f6d46fd23f | ||
|
|
2a7e03e232 | ||
|
|
3286a7d010 | ||
|
|
aabbc741d7 | ||
|
|
20b7ab89f9 | ||
|
|
10f1e7e3f4 | ||
|
|
d941c73701 | ||
|
|
3f80638c86 | ||
|
|
266ec6c270 | ||
|
|
9ee409afaa | ||
|
|
715cb3b1ac | ||
|
|
eb693c4a86 | ||
|
|
7a61c637b0 | ||
|
|
c7bd84ef9d | ||
|
|
b26b9f6c4b | ||
|
|
1c9bb54cc2 | ||
|
|
b9093d59eb | ||
|
|
18d000f625 | ||
|
|
c5aade7e7f | ||
|
|
d4b741fd7c | ||
|
|
74a4f927e9 | ||
|
|
847aafc91f | ||
|
|
c87e541570 | ||
|
|
2ea1c4f922 | ||
|
|
5e9c28b77b | ||
|
|
d957a6d93a | ||
|
|
b2648aa5bd | ||
|
|
3908f274ae | ||
|
|
fa7ea121ff | ||
|
|
24848da895 | ||
|
|
b200b77541 | ||
|
|
d50ad0667c | ||
|
|
5cea285960 | ||
|
|
7eb5085f6b | ||
|
|
491e3569d2 | ||
|
|
440aea19b0 | ||
|
|
968d47c3e6 | ||
|
|
052193865e | ||
|
|
85febcb551 | ||
|
|
a4d9fa10bf | ||
|
|
cd5fd86ad3 | ||
|
|
b84d6fed2c | ||
|
|
24c94b38be | ||
|
|
4dd7113dc5 | ||
|
|
48c7fa0104 | ||
|
|
4d0cf2169a | ||
|
|
5f7cc079e9 | ||
|
|
016ddfdfce | ||
|
|
5d24e2afbc | ||
|
|
8735da045f | ||
|
|
c839337425 | ||
|
|
7390651072 | ||
|
|
52fbeedf20 | ||
|
|
1660cf0cf1 | ||
|
|
2b5202be7a | ||
|
|
9ffbb15160 | ||
|
|
540b0d3a22 | ||
|
|
8d5faee53a | ||
|
|
6e2fd0633b | ||
|
|
beb038c815 | ||
|
|
35a959b56f | ||
|
|
57c6118be8 | ||
|
|
723aebbec9 | ||
|
|
2b395e34b1 | ||
|
|
ada559f007 | ||
|
|
357e8ce73c | ||
|
|
6725902663 | ||
|
|
99bb21c512 | ||
|
|
a4669f3fb5 | ||
|
|
e8f40bdff9 | ||
|
|
68a407ea37 | ||
|
|
80879cabe1 | ||
|
|
71afc62298 | ||
|
|
ca5c8549b9 | ||
|
|
ab73def07a | ||
|
|
3f840233d8 | ||
|
|
90d9edb8e5 | ||
|
|
b9b360bce4 | ||
|
|
27654961f9 | ||
|
|
d45af760d8 | ||
|
|
95fa045297 | ||
|
|
cb65dae63d | ||
|
|
21b82d7efc | ||
|
|
63c6d7443b | ||
|
|
753f4a2ec1 | ||
|
|
ed667f7e54 | ||
|
|
c4a034eb43 | ||
|
|
2eca0f0b5f | ||
|
|
58d627e05a | ||
|
|
639183ba0e | ||
|
|
9fa08442cf | ||
|
|
0dd50394ec | ||
|
|
ac8d4e1341 | ||
|
|
2488fbde78 | ||
|
|
2ab8065142 | ||
|
|
25410b86ae | ||
|
|
4e44dd8510 | ||
|
|
1e56a22b32 | ||
|
|
7a95f0c7a4 | ||
|
|
c81506220b | ||
|
|
e5598c15a7 | ||
|
|
433af5e0fe | ||
|
|
b7be807167 | ||
|
|
e687ceeae7 | ||
|
|
04e35321aa | ||
|
|
061e65be93 | ||
|
|
190e725dd0 |
7
.github/workflows/deploy.yaml
vendored
7
.github/workflows/deploy.yaml
vendored
@@ -19,16 +19,11 @@ jobs:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Increase the pagefile size on Windows to aviod running out of memory
|
||||
- name: Increase pagefile size on Windows
|
||||
if: runner.os == 'Windows'
|
||||
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
||||
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
|
||||
- name: Build on Linux
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
2
.github/workflows/race.yaml
vendored
2
.github/workflows/race.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
|
||||
- name: Set scheduled branch name
|
||||
shell: bash
|
||||
|
||||
6
.github/workflows/tests.yaml
vendored
6
.github/workflows/tests.yaml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
|
||||
|
||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
|
||||
- name: Delete the stability tests from coverage
|
||||
run: rm -r stability-tests
|
||||
|
||||
@@ -7,15 +7,13 @@ Kaspad
|
||||
|
||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
||||
|
||||
This project is currently under active development and is in Beta state.
|
||||
|
||||
## What is kaspa
|
||||
|
||||
Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations and sub-second block times. It is based on [the PHANTOM protocol](https://eprint.iacr.org/2018/104.pdf), a generalization of Nakamoto consensus.
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.18 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -69,6 +69,10 @@ const (
|
||||
CmdReady
|
||||
CmdTrustedData
|
||||
CmdBlockWithTrustedDataV4
|
||||
CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||
CmdRequestIBDChainBlockLocator
|
||||
CmdIBDChainBlockLocator
|
||||
CmdRequestAnticone
|
||||
|
||||
// rpc
|
||||
CmdGetCurrentNetworkRequestMessage
|
||||
@@ -152,6 +156,13 @@ const (
|
||||
CmdVirtualDaaScoreChangedNotificationMessage
|
||||
CmdGetBalancesByAddressesRequestMessage
|
||||
CmdGetBalancesByAddressesResponseMessage
|
||||
CmdNotifyNewBlockTemplateRequestMessage
|
||||
CmdNotifyNewBlockTemplateResponseMessage
|
||||
CmdNewBlockTemplateNotificationMessage
|
||||
CmdGetMempoolEntriesByAddressesRequestMessage
|
||||
CmdGetMempoolEntriesByAddressesResponseMessage
|
||||
CmdGetCoinSupplyRequestMessage
|
||||
CmdGetCoinSupplyResponseMessage
|
||||
)
|
||||
|
||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -195,6 +206,10 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdReady: "Ready",
|
||||
CmdTrustedData: "TrustedData",
|
||||
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
|
||||
CmdRequestNextPruningPointAndItsAnticoneBlocks: "RequestNextPruningPointAndItsAnticoneBlocks",
|
||||
CmdRequestIBDChainBlockLocator: "RequestIBDChainBlockLocator",
|
||||
CmdIBDChainBlockLocator: "IBDChainBlockLocator",
|
||||
CmdRequestAnticone: "RequestAnticone",
|
||||
}
|
||||
|
||||
// RPCMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -278,6 +293,13 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
|
||||
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
|
||||
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
|
||||
CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest",
|
||||
CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse",
|
||||
CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification",
|
||||
CmdGetMempoolEntriesByAddressesRequestMessage: "GetMempoolEntriesByAddressesRequest",
|
||||
CmdGetMempoolEntriesByAddressesResponseMessage: "GetMempoolEntriesByAddressesResponse",
|
||||
CmdGetCoinSupplyRequestMessage: "GetCoinSupplyRequest",
|
||||
CmdGetCoinSupplyResponseMessage: "GetCoinSupplyResponse",
|
||||
}
|
||||
|
||||
// Message is an interface that describes a kaspa message. A type that
|
||||
|
||||
27
app/appmessage/p2p_msgibdchainblocklocator.go
Normal file
27
app/appmessage/p2p_msgibdchainblocklocator.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||
// locator message. It is used to find the blockLocator of a peer that is
|
||||
// syncing with you.
|
||||
type MsgIBDChainBlockLocator struct {
|
||||
baseMessage
|
||||
BlockLocatorHashes []*externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgIBDChainBlockLocator) Command() MessageCommand {
|
||||
return CmdIBDChainBlockLocator
|
||||
}
|
||||
|
||||
// NewMsgIBDChainBlockLocator returns a new kaspa locator message that conforms to
|
||||
// the Message interface. See MsgBlockLocator for details.
|
||||
func NewMsgIBDChainBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgIBDChainBlockLocator {
|
||||
return &MsgIBDChainBlockLocator{
|
||||
BlockLocatorHashes: locatorHashes,
|
||||
}
|
||||
}
|
||||
33
app/appmessage/p2p_msgrequestanticone.go
Normal file
33
app/appmessage/p2p_msgrequestanticone.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgRequestAnticone implements the Message interface and represents a kaspa
|
||||
// RequestHeaders message. It is used to request the set past(ContextHash) \cap anticone(BlockHash)
|
||||
type MsgRequestAnticone struct {
|
||||
baseMessage
|
||||
BlockHash *externalapi.DomainHash
|
||||
ContextHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestAnticone) Command() MessageCommand {
|
||||
return CmdRequestAnticone
|
||||
}
|
||||
|
||||
// NewMsgRequestAnticone returns a new kaspa RequestPastDiff message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgRequestAnticone(blockHash, contextHash *externalapi.DomainHash) *MsgRequestAnticone {
|
||||
return &MsgRequestAnticone{
|
||||
BlockHash: blockHash,
|
||||
ContextHash: contextHash,
|
||||
}
|
||||
}
|
||||
31
app/appmessage/p2p_msgrequestibdchainblocklocator.go
Normal file
31
app/appmessage/p2p_msgrequestibdchainblocklocator.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgRequestIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||
// IBDRequestChainBlockLocator message. It is used to request a block locator between low
|
||||
// and high hash.
|
||||
// The locator is returned via a locator message (MsgIBDChainBlockLocator).
|
||||
type MsgRequestIBDChainBlockLocator struct {
|
||||
baseMessage
|
||||
HighHash *externalapi.DomainHash
|
||||
LowHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestIBDChainBlockLocator) Command() MessageCommand {
|
||||
return CmdRequestIBDChainBlockLocator
|
||||
}
|
||||
|
||||
// NewMsgIBDRequestChainBlockLocator returns a new IBDRequestChainBlockLocator message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgIBDRequestChainBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestIBDChainBlockLocator {
|
||||
return &MsgRequestIBDChainBlockLocator{
|
||||
HighHash: highHash,
|
||||
LowHash: lowHash,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package appmessage
|
||||
|
||||
// MsgRequestNextPruningPointAndItsAnticoneBlocks implements the Message interface and represents a kaspa
|
||||
// RequestNextPruningPointAndItsAnticoneBlocks message. It is used to notify the IBD syncer peer to send
|
||||
// more blocks from the pruning anticone.
|
||||
//
|
||||
// This message has no payload.
|
||||
type MsgRequestNextPruningPointAndItsAnticoneBlocks struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestNextPruningPointAndItsAnticoneBlocks) Command() MessageCommand {
|
||||
return CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||
}
|
||||
|
||||
// NewMsgRequestNextPruningPointAndItsAnticoneBlocks returns a new kaspa RequestNextPruningPointAndItsAnticoneBlocks message that conforms to the
|
||||
// Message interface.
|
||||
func NewMsgRequestNextPruningPointAndItsAnticoneBlocks() *MsgRequestNextPruningPointAndItsAnticoneBlocks {
|
||||
return &MsgRequestNextPruningPointAndItsAnticoneBlocks{}
|
||||
}
|
||||
@@ -133,8 +133,8 @@ func TestTx(t *testing.T) {
|
||||
|
||||
// TestTxHash tests the ability to generate the hash of a transaction accurately.
|
||||
func TestTxHashAndID(t *testing.T) {
|
||||
txHash1Str := "93663e597f6c968d32d229002f76408edf30d6a0151ff679fc729812d8cb2acc"
|
||||
txID1Str := "24079c6d2bdf602fc389cc307349054937744a9c8dc0f07c023e6af0e949a4e7"
|
||||
txHash1Str := "b06f8b650115b5cf4d59499e10764a9312742930cb43c9b4ff6495d76f332ed7"
|
||||
txID1Str := "e20225c3d065ee41743607ee627db44d01ef396dc9779b05b2caf55bac50e12d"
|
||||
wantTxID1, err := transactionid.FromString(txID1Str)
|
||||
if err != nil {
|
||||
t.Fatalf("NewTxIDFromStr: %v", err)
|
||||
@@ -185,7 +185,7 @@ func TestTxHashAndID(t *testing.T) {
|
||||
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
|
||||
}
|
||||
|
||||
hash2Str := "8dafd1bec24527d8e3b443ceb0a3b92fffc0d60026317f890b2faf5e9afc177a"
|
||||
hash2Str := "fa16a8ce88d52ca1ff45187bbba0d33044e9f5fe309e8d0b22d4812dcf1782b7"
|
||||
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
|
||||
if err != nil {
|
||||
t.Errorf("NewTxIDFromStr: %v", err)
|
||||
|
||||
@@ -5,6 +5,7 @@ package appmessage
|
||||
type GetBlockTemplateRequestMessage struct {
|
||||
baseMessage
|
||||
PayAddress string
|
||||
ExtraData string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -13,9 +14,10 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlockTemplateRequestMessage returns a instance of the message
|
||||
func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
|
||||
func NewGetBlockTemplateRequestMessage(payAddress, extraData string) *GetBlockTemplateRequestMessage {
|
||||
return &GetBlockTemplateRequestMessage{
|
||||
PayAddress: payAddress,
|
||||
ExtraData: extraData,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
40
app/appmessage/rpc_get_coin_supply.go
Normal file
40
app/appmessage/rpc_get_coin_supply.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package appmessage
|
||||
|
||||
// GetCoinSupplyRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetCoinSupplyRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetCoinSupplyRequestMessage) Command() MessageCommand {
|
||||
return CmdGetCoinSupplyRequestMessage
|
||||
}
|
||||
|
||||
// NewGetCoinSupplyRequestMessage returns a instance of the message
|
||||
func NewGetCoinSupplyRequestMessage() *GetCoinSupplyRequestMessage {
|
||||
return &GetCoinSupplyRequestMessage{}
|
||||
}
|
||||
|
||||
// GetCoinSupplyResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetCoinSupplyResponseMessage struct {
|
||||
baseMessage
|
||||
MaxSompi uint64
|
||||
CirculatingSompi uint64
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetCoinSupplyResponseMessage) Command() MessageCommand {
|
||||
return CmdGetCoinSupplyResponseMessage
|
||||
}
|
||||
|
||||
// NewGetCoinSupplyResponseMessage returns a instance of the message
|
||||
func NewGetCoinSupplyResponseMessage(maxSompi uint64, circulatingSompi uint64) *GetCoinSupplyResponseMessage {
|
||||
return &GetCoinSupplyResponseMessage{
|
||||
MaxSompi: maxSompi,
|
||||
CirculatingSompi: circulatingSompi,
|
||||
}
|
||||
}
|
||||
@@ -23,6 +23,8 @@ type GetInfoResponseMessage struct {
|
||||
P2PID string
|
||||
MempoolSize uint64
|
||||
ServerVersion string
|
||||
IsUtxoIndexed bool
|
||||
IsSynced bool
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@@ -33,10 +35,12 @@ func (msg *GetInfoResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetInfoResponseMessage returns a instance of the message
|
||||
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string) *GetInfoResponseMessage {
|
||||
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string, isUtxoIndexed bool, isSynced bool) *GetInfoResponseMessage {
|
||||
return &GetInfoResponseMessage{
|
||||
P2PID: p2pID,
|
||||
MempoolSize: mempoolSize,
|
||||
ServerVersion: serverVersion,
|
||||
IsUtxoIndexed: isUtxoIndexed,
|
||||
IsSynced: isSynced,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetMempoolEntriesRequestMessage struct {
|
||||
baseMessage
|
||||
IncludeOrphanPool bool
|
||||
FilterTransactionPool bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -12,8 +14,11 @@ func (msg *GetMempoolEntriesRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntriesRequestMessage returns a instance of the message
|
||||
func NewGetMempoolEntriesRequestMessage() *GetMempoolEntriesRequestMessage {
|
||||
return &GetMempoolEntriesRequestMessage{}
|
||||
func NewGetMempoolEntriesRequestMessage(includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesRequestMessage {
|
||||
return &GetMempoolEntriesRequestMessage{
|
||||
IncludeOrphanPool: includeOrphanPool,
|
||||
FilterTransactionPool: filterTransactionPool,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMempoolEntriesResponseMessage is an appmessage corresponding to
|
||||
|
||||
52
app/appmessage/rpc_get_mempool_entries_by_addresses.go
Normal file
52
app/appmessage/rpc_get_mempool_entries_by_addresses.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package appmessage
|
||||
|
||||
// MempoolEntryByAddress represents MempoolEntries associated with some address
|
||||
type MempoolEntryByAddress struct {
|
||||
Address string
|
||||
Receiving []*MempoolEntry
|
||||
Sending []*MempoolEntry
|
||||
}
|
||||
|
||||
// GetMempoolEntriesByAddressesRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetMempoolEntriesByAddressesRequestMessage struct {
|
||||
baseMessage
|
||||
Addresses []string
|
||||
IncludeOrphanPool bool
|
||||
FilterTransactionPool bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetMempoolEntriesByAddressesRequestMessage) Command() MessageCommand {
|
||||
return CmdGetMempoolEntriesByAddressesRequestMessage
|
||||
}
|
||||
|
||||
// NewGetMempoolEntriesByAddressesRequestMessage returns a instance of the message
|
||||
func NewGetMempoolEntriesByAddressesRequestMessage(addresses []string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesByAddressesRequestMessage {
|
||||
return &GetMempoolEntriesByAddressesRequestMessage{
|
||||
Addresses: addresses,
|
||||
IncludeOrphanPool: includeOrphanPool,
|
||||
FilterTransactionPool: filterTransactionPool,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMempoolEntriesByAddressesResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetMempoolEntriesByAddressesResponseMessage struct {
|
||||
baseMessage
|
||||
Entries []*MempoolEntryByAddress
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetMempoolEntriesByAddressesResponseMessage) Command() MessageCommand {
|
||||
return CmdGetMempoolEntriesByAddressesResponseMessage
|
||||
}
|
||||
|
||||
// NewGetMempoolEntriesByAddressesResponseMessage returns a instance of the message
|
||||
func NewGetMempoolEntriesByAddressesResponseMessage(entries []*MempoolEntryByAddress) *GetMempoolEntriesByAddressesResponseMessage {
|
||||
return &GetMempoolEntriesByAddressesResponseMessage{
|
||||
Entries: entries,
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,9 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetMempoolEntryRequestMessage struct {
|
||||
baseMessage
|
||||
TxID string
|
||||
TxID string
|
||||
IncludeOrphanPool bool
|
||||
FilterTransactionPool bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -13,8 +15,12 @@ func (msg *GetMempoolEntryRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntryRequestMessage returns a instance of the message
|
||||
func NewGetMempoolEntryRequestMessage(txID string) *GetMempoolEntryRequestMessage {
|
||||
return &GetMempoolEntryRequestMessage{TxID: txID}
|
||||
func NewGetMempoolEntryRequestMessage(txID string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntryRequestMessage {
|
||||
return &GetMempoolEntryRequestMessage{
|
||||
TxID: txID,
|
||||
IncludeOrphanPool: includeOrphanPool,
|
||||
FilterTransactionPool: filterTransactionPool,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMempoolEntryResponseMessage is an appmessage corresponding to
|
||||
@@ -30,6 +36,7 @@ type GetMempoolEntryResponseMessage struct {
|
||||
type MempoolEntry struct {
|
||||
Fee uint64
|
||||
Transaction *RPCTransaction
|
||||
IsOrphan bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -38,11 +45,12 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntryResponseMessage returns a instance of the message
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction, isOrphan bool) *GetMempoolEntryResponseMessage {
|
||||
return &GetMempoolEntryResponseMessage{
|
||||
Entry: &MempoolEntry{
|
||||
Fee: fee,
|
||||
Transaction: transaction,
|
||||
IsOrphan: isOrphan,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,8 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
|
||||
baseMessage
|
||||
StartHash string
|
||||
StartHash string
|
||||
IncludeAcceptedTransactionIDs bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -13,18 +14,29 @@ func (msg *GetVirtualSelectedParentChainFromBlockRequestMessage) Command() Messa
|
||||
}
|
||||
|
||||
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
|
||||
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *GetVirtualSelectedParentChainFromBlockRequestMessage {
|
||||
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(
|
||||
startHash string, includeAcceptedTransactionIDs bool) *GetVirtualSelectedParentChainFromBlockRequestMessage {
|
||||
|
||||
return &GetVirtualSelectedParentChainFromBlockRequestMessage{
|
||||
StartHash: startHash,
|
||||
StartHash: startHash,
|
||||
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptedTransactionIDs is a part of the GetVirtualSelectedParentChainFromBlockResponseMessage and
|
||||
// VirtualSelectedParentChainChangedNotificationMessage appmessages
|
||||
type AcceptedTransactionIDs struct {
|
||||
AcceptingBlockHash string
|
||||
AcceptedTransactionIDs []string
|
||||
}
|
||||
|
||||
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
|
||||
baseMessage
|
||||
RemovedChainBlockHashes []string
|
||||
AddedChainBlockHashes []string
|
||||
AcceptedTransactionIDs []*AcceptedTransactionIDs
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@@ -36,10 +48,11 @@ func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() Mess
|
||||
|
||||
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
|
||||
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes,
|
||||
addedChainBlockHashes []string) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
||||
addedChainBlockHashes []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *GetVirtualSelectedParentChainFromBlockResponseMessage {
|
||||
|
||||
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
|
||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||
AddedChainBlockHashes: addedChainBlockHashes,
|
||||
AcceptedTransactionIDs: acceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
||||
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package appmessage
|
||||
|
||||
// NotifyNewBlockTemplateRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyNewBlockTemplateRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyNewBlockTemplateRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyNewBlockTemplateRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyNewBlockTemplateRequestMessage returns an instance of the message
|
||||
func NewNotifyNewBlockTemplateRequestMessage() *NotifyNewBlockTemplateRequestMessage {
|
||||
return &NotifyNewBlockTemplateRequestMessage{}
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplateResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyNewBlockTemplateResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyNewBlockTemplateResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyNewBlockTemplateResponseMessage
|
||||
}
|
||||
|
||||
// NewNotifyNewBlockTemplateResponseMessage returns an instance of the message
|
||||
func NewNotifyNewBlockTemplateResponseMessage() *NotifyNewBlockTemplateResponseMessage {
|
||||
return &NotifyNewBlockTemplateResponseMessage{}
|
||||
}
|
||||
|
||||
// NewBlockTemplateNotificationMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NewBlockTemplateNotificationMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NewBlockTemplateNotificationMessage) Command() MessageCommand {
|
||||
return CmdNewBlockTemplateNotificationMessage
|
||||
}
|
||||
|
||||
// NewNewBlockTemplateNotificationMessage returns an instance of the message
|
||||
func NewNewBlockTemplateNotificationMessage() *NewBlockTemplateNotificationMessage {
|
||||
return &NewBlockTemplateNotificationMessage{}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
|
||||
baseMessage
|
||||
IncludeAcceptedTransactionIDs bool
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -11,9 +12,13 @@ func (msg *NotifyVirtualSelectedParentChainChangedRequestMessage) Command() Mess
|
||||
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns a instance of the message
|
||||
func NewNotifyVirtualSelectedParentChainChangedRequestMessage() *NotifyVirtualSelectedParentChainChangedRequestMessage {
|
||||
return &NotifyVirtualSelectedParentChainChangedRequestMessage{}
|
||||
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns an instance of the message
|
||||
func NewNotifyVirtualSelectedParentChainChangedRequestMessage(
|
||||
includeAcceptedTransactionIDs bool) *NotifyVirtualSelectedParentChainChangedRequestMessage {
|
||||
|
||||
return &NotifyVirtualSelectedParentChainChangedRequestMessage{
|
||||
IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
||||
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
|
||||
@@ -39,6 +44,7 @@ type VirtualSelectedParentChainChangedNotificationMessage struct {
|
||||
baseMessage
|
||||
RemovedChainBlockHashes []string
|
||||
AddedChainBlockHashes []string
|
||||
AcceptedTransactionIDs []*AcceptedTransactionIDs
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -48,10 +54,11 @@ func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() Messa
|
||||
|
||||
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
|
||||
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes,
|
||||
addedChainBlocks []string) *VirtualSelectedParentChainChangedNotificationMessage {
|
||||
addedChainBlocks []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *VirtualSelectedParentChainChangedNotificationMessage {
|
||||
|
||||
return &VirtualSelectedParentChainChangedNotificationMessage{
|
||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||
AddedChainBlockHashes: addedChainBlocks,
|
||||
AcceptedTransactionIDs: acceptedTransactionIDs,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol"
|
||||
@@ -67,6 +69,7 @@ func (a *ComponentManager) Stop() {
|
||||
}
|
||||
|
||||
a.protocolManager.Close()
|
||||
close(a.protocolManager.Context().Domain().ConsensusEventsChannel())
|
||||
|
||||
return
|
||||
}
|
||||
@@ -118,7 +121,7 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, interrupt)
|
||||
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, domain.ConsensusEventsChannel(), interrupt)
|
||||
|
||||
return &ComponentManager{
|
||||
cfg: cfg,
|
||||
@@ -139,6 +142,7 @@ func setupRPC(
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
utxoIndex *utxoindex.UTXOIndex,
|
||||
consensusEventsChan chan externalapi.ConsensusEvent,
|
||||
shutDownChan chan<- struct{},
|
||||
) *rpc.Manager {
|
||||
|
||||
@@ -150,10 +154,10 @@ func setupRPC(
|
||||
connectionManager,
|
||||
addressManager,
|
||||
utxoIndex,
|
||||
consensusEventsChan,
|
||||
shutDownChan,
|
||||
)
|
||||
protocolManager.SetOnVirtualChange(rpcManager.NotifyVirtualChange)
|
||||
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
|
||||
protocolManager.SetOnNewBlockTemplateHandler(rpcManager.NotifyNewBlockTemplate)
|
||||
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
||||
|
||||
return rpcManager
|
||||
|
||||
@@ -16,53 +16,42 @@ import (
|
||||
// OnNewBlock updates the mempool after a new block arrival, and
|
||||
// relays newly unorphaned transactions and possibly rebroadcast
|
||||
// manually added transactions when not in IBD.
|
||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock) error {
|
||||
|
||||
hash := consensushashing.BlockHash(block)
|
||||
log.Debugf("OnNewBlock start for block %s", hash)
|
||||
defer log.Debugf("OnNewBlock end for block %s", hash)
|
||||
log.Tracef("OnNewBlock start for block %s", hash)
|
||||
defer log.Tracef("OnNewBlock end for block %s", hash)
|
||||
|
||||
unorphaningResults, err := f.UnorphanBlocks(block)
|
||||
unorphanedBlocks, err := f.UnorphanBlocks(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
|
||||
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphanedBlocks))
|
||||
|
||||
newBlocks := []*externalapi.DomainBlock{block}
|
||||
newVirtualChangeSets := []*externalapi.VirtualChangeSet{virtualChangeSet}
|
||||
for _, unorphaningResult := range unorphaningResults {
|
||||
newBlocks = append(newBlocks, unorphaningResult.block)
|
||||
newVirtualChangeSets = append(newVirtualChangeSets, unorphaningResult.virtualChangeSet)
|
||||
}
|
||||
newBlocks = append(newBlocks, unorphanedBlocks...)
|
||||
|
||||
allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0)
|
||||
for i, newBlock := range newBlocks {
|
||||
for _, newBlock := range newBlocks {
|
||||
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
|
||||
acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...)
|
||||
|
||||
if f.onBlockAddedToDAGHandler != nil {
|
||||
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
|
||||
virtualChangeSet = newVirtualChangeSets[i]
|
||||
err := f.onBlockAddedToDAGHandler(newBlock, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions)
|
||||
}
|
||||
|
||||
// OnVirtualChange calls the handler function whenever the virtual block changes.
|
||||
func (f *FlowContext) OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
if f.onVirtualChangeHandler != nil && virtualChangeSet != nil {
|
||||
return f.onVirtualChangeHandler(virtualChangeSet)
|
||||
// OnNewBlockTemplate calls the handler function whenever a new block template is available for miners.
|
||||
func (f *FlowContext) OnNewBlockTemplate() error {
|
||||
// Clear current template cache. Note we call this even if the handler is nil, in order to keep the
|
||||
// state consistent without dependency on external event registration
|
||||
f.Domain().MiningManager().ClearBlockTemplate()
|
||||
if f.onNewBlockTemplateHandler != nil {
|
||||
return f.onNewBlockTemplateHandler()
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -118,14 +107,18 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
||||
return protocolerrors.Errorf(false, "cannot add header only block")
|
||||
}
|
||||
|
||||
virtualChangeSet, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = f.OnNewBlock(block, virtualChangeSet)
|
||||
err = f.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -150,7 +143,7 @@ func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
||||
return false
|
||||
}
|
||||
f.ibdPeer = ibdPeer
|
||||
log.Infof("IBD started")
|
||||
log.Infof("IBD started with peer %s", ibdPeer)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package flowcontext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
@@ -9,6 +10,11 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPingTimeout signifies that a ping operation timed out.
|
||||
ErrPingTimeout = protocolerrors.New(false, "timeout expired on ping")
|
||||
)
|
||||
|
||||
// HandleError handles an error from a flow,
|
||||
// It sends the error to errChan if isStopping == 0 and increments isStopping
|
||||
//
|
||||
@@ -21,8 +27,15 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
if errors.Is(err, ErrPingTimeout) {
|
||||
// Avoid printing the call stack on ping timeouts, since users get panicked and this case is not interesting
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
} else {
|
||||
// Explain to the user that this is not a panic, but only a protocol error with a specific peer
|
||||
logFrame := strings.Repeat("=", 52)
|
||||
log.Errorf("Non-critical peer protocol error from %s, printing the full stack for debug purposes: \n%s\n%+v \n%s",
|
||||
flowName, logFrame, err, logFrame)
|
||||
}
|
||||
}
|
||||
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
|
||||
@@ -18,12 +18,8 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||
)
|
||||
|
||||
// OnBlockAddedToDAGHandler is a handler function that's triggered
|
||||
// when a block is added to the DAG
|
||||
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
|
||||
// OnVirtualChangeHandler is a handler function that's triggered when the virtual changes
|
||||
type OnVirtualChangeHandler func(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
// OnNewBlockTemplateHandler is a handler function that's triggered when a new block template is available
|
||||
type OnNewBlockTemplateHandler func() error
|
||||
|
||||
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
@@ -44,8 +40,7 @@ type FlowContext struct {
|
||||
|
||||
timeStarted int64
|
||||
|
||||
onVirtualChangeHandler OnVirtualChangeHandler
|
||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
||||
onNewBlockTemplateHandler OnNewBlockTemplateHandler
|
||||
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||
|
||||
@@ -102,14 +97,14 @@ func (f *FlowContext) ShutdownChan() <-chan struct{} {
|
||||
return f.shutdownChan
|
||||
}
|
||||
|
||||
// SetOnVirtualChangeHandler sets the onVirtualChangeHandler handler
|
||||
func (f *FlowContext) SetOnVirtualChangeHandler(onVirtualChangeHandler OnVirtualChangeHandler) {
|
||||
f.onVirtualChangeHandler = onVirtualChangeHandler
|
||||
// IsNearlySynced returns whether current consensus is considered synced or close to being synced.
|
||||
func (f *FlowContext) IsNearlySynced() (bool, error) {
|
||||
return f.Domain().Consensus().IsNearlySynced()
|
||||
}
|
||||
|
||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
||||
func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
|
||||
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
|
||||
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplateHandler handler
|
||||
func (f *FlowContext) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler OnNewBlockTemplateHandler) {
|
||||
f.onNewBlockTemplateHandler = onNewBlockTemplateHandler
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
||||
|
||||
@@ -72,3 +72,10 @@ func (f *FlowContext) Peers() []*peerpkg.Peer {
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// HasPeers returns whether there are currently active peers
|
||||
func (f *FlowContext) HasPeers() bool {
|
||||
f.peersMutex.RLock()
|
||||
defer f.peersMutex.RUnlock()
|
||||
return len(f.peers) > 0
|
||||
}
|
||||
|
||||
@@ -15,12 +15,6 @@ import (
|
||||
// on: 2^orphanResolutionRange * PHANTOM K.
|
||||
const maxOrphans = 600
|
||||
|
||||
// UnorphaningResult is the result of unorphaning a block
|
||||
type UnorphaningResult struct {
|
||||
block *externalapi.DomainBlock
|
||||
virtualChangeSet *externalapi.VirtualChangeSet
|
||||
}
|
||||
|
||||
// AddOrphan adds the block to the orphan set
|
||||
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
|
||||
f.orphansMutex.Lock()
|
||||
@@ -57,7 +51,7 @@ func (f *FlowContext) IsOrphan(blockHash *externalapi.DomainHash) bool {
|
||||
}
|
||||
|
||||
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
|
||||
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*UnorphaningResult, error) {
|
||||
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*externalapi.DomainBlock, error) {
|
||||
f.orphansMutex.Lock()
|
||||
defer f.orphansMutex.Unlock()
|
||||
|
||||
@@ -66,7 +60,7 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
||||
rootBlockHash := consensushashing.BlockHash(rootBlock)
|
||||
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
|
||||
|
||||
var unorphaningResults []*UnorphaningResult
|
||||
var unorphanedBlocks []*externalapi.DomainBlock
|
||||
for len(processQueue) > 0 {
|
||||
var orphanHash externalapi.DomainHash
|
||||
orphanHash, processQueue = processQueue[0], processQueue[1:]
|
||||
@@ -90,21 +84,18 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*Uno
|
||||
}
|
||||
}
|
||||
if canBeUnorphaned {
|
||||
virtualChangeSet, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||
unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if unorphaningSucceeded {
|
||||
unorphaningResults = append(unorphaningResults, &UnorphaningResult{
|
||||
block: orphanBlock,
|
||||
virtualChangeSet: virtualChangeSet,
|
||||
})
|
||||
unorphanedBlocks = append(unorphanedBlocks, orphanBlock)
|
||||
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return unorphaningResults, nil
|
||||
return unorphanedBlocks, nil
|
||||
}
|
||||
|
||||
// addChildOrphansToProcessQueue finds all child orphans of `blockHash`
|
||||
@@ -143,24 +134,24 @@ func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash)
|
||||
return childOrphans
|
||||
}
|
||||
|
||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.VirtualChangeSet, bool, error) {
|
||||
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (bool, error) {
|
||||
orphanBlock, ok := f.orphans[orphanHash]
|
||||
if !ok {
|
||||
return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
||||
return false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
|
||||
}
|
||||
delete(f.orphans, orphanHash)
|
||||
|
||||
virtualChangeSet, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
||||
err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
|
||||
return nil, false, nil
|
||||
return false, nil
|
||||
}
|
||||
return nil, false, err
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Infof("Unorphaned block %s", orphanHash)
|
||||
return virtualChangeSet, true, nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
package flowcontext
|
||||
|
||||
import "github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
const (
|
||||
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
|
||||
)
|
||||
|
||||
// ShouldMine returns whether it's ok to use block template from this node
|
||||
// for mining purposes.
|
||||
func (f *FlowContext) ShouldMine() (bool, error) {
|
||||
peers := f.Peers()
|
||||
if len(peers) == 0 {
|
||||
log.Debugf("The node is not connected, so ShouldMine returns false")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if f.IsIBDRunning() {
|
||||
log.Debugf("IBD is running, so ShouldMine returns false")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
virtualSelectedParent, err := f.domain.Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if virtualSelectedParent.Equal(f.Config().NetParams().GenesisHash) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
now := mstime.Now().UnixMilliseconds()
|
||||
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
|
||||
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
|
||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
|
||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
||||
return false, nil
|
||||
}
|
||||
@@ -18,9 +18,9 @@ var (
|
||||
|
||||
// minAcceptableProtocolVersion is the lowest protocol version that a
|
||||
// connected peer may support.
|
||||
minAcceptableProtocolVersion = uint32(4)
|
||||
minAcceptableProtocolVersion = uint32(5)
|
||||
|
||||
maxAcceptableProtocolVersion = uint32(4)
|
||||
maxAcceptableProtocolVersion = uint32(5)
|
||||
)
|
||||
|
||||
type receiveVersionFlow struct {
|
||||
|
||||
16
app/protocol/flows/v5/blockrelay/batch_size_test.go
Normal file
16
app/protocol/flows/v5/blockrelay/batch_size_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
|
||||
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
|
||||
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
|
||||
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
|
||||
if ibdBatchSize >= router.DefaultMaxMessages {
|
||||
t.Fatalf("IBD batch size (%d) must be smaller than router.DefaultMaxMessages (%d)",
|
||||
ibdBatchSize, router.DefaultMaxMessages)
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,7 @@ func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*ex
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||
case *appmessage.MsgBlockLocator:
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
@@ -34,7 +33,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists {
|
||||
if !blockInfo.HasHeader() {
|
||||
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
|
||||
"with an unknown targetHash %s", targetHash)
|
||||
}
|
||||
@@ -47,7 +46,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
|
||||
}
|
||||
|
||||
// The IBD block locator is checking only existing blocks with bodies.
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
if !blockInfo.HasBody() {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -28,18 +27,15 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
||||
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
|
||||
for i, hash := range msgRequestIBDBlocks.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
block, found, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "IBD block %s not found", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
||||
@@ -0,0 +1,85 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RequestIBDChainBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
|
||||
type RequestIBDChainBlockLocatorContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestIBDChainBlockLocatorFlow struct {
|
||||
RequestIBDChainBlockLocatorContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestIBDChainBlockLocator handles getBlockLocator messages
|
||||
func HandleRequestIBDChainBlockLocator(context RequestIBDChainBlockLocatorContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestIBDChainBlockLocatorFlow{
|
||||
RequestIBDChainBlockLocatorContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) start() error {
|
||||
for {
|
||||
highHash, lowHash, err := flow.receiveRequestIBDChainBlockLocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received getIBDChainBlockLocator with highHash: %s, lowHash: %s", highHash, lowHash)
|
||||
|
||||
var locator externalapi.BlockLocator
|
||||
if highHash == nil || lowHash == nil {
|
||||
locator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
} else {
|
||||
locator, err = flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
// The chain has been modified, signal it by sending an empty locator
|
||||
locator, err = externalapi.BlockLocator{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Debugf("Received error from CreateHeadersSelectedChainBlockLocator: %s", err)
|
||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||
"locator between %s and %s", lowHash, highHash)
|
||||
}
|
||||
|
||||
err = flow.sendIBDChainBlockLocator(locator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) receiveRequestIBDChainBlockLocator() (highHash, lowHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgGetBlockLocator := message.(*appmessage.MsgRequestIBDChainBlockLocator)
|
||||
|
||||
return msgGetBlockLocator.HighHash, msgGetBlockLocator.LowHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) sendIBDChainBlockLocator(locator externalapi.BlockLocator) error {
|
||||
msgIBDChainBlockLocator := appmessage.NewMsgIBDChainBlockLocator(locator)
|
||||
err := flow.outgoingRoute.Enqueue(msgIBDChainBlockLocator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -118,16 +118,33 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
|
||||
return err
|
||||
}
|
||||
|
||||
for _, blockHash := range pointAndItsAnticone {
|
||||
block, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||
for i, blockHash := range pointAndItsAnticone {
|
||||
block, found, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "pruning point anticone block %s not found", blockHash)
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if (i+1)%ibdBatchSize == 0 {
|
||||
// No timeout here, as we don't care if the syncee takes its time computing,
|
||||
// since it only blocks this dedicated flow
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := message.(*appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks); !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks, message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -29,18 +28,15 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
|
||||
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
|
||||
for _, hash := range getRelayBlocksMessage.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
block, found, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "Relay block %s not found", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
||||
@@ -7,9 +7,11 @@ import (
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
@@ -24,8 +26,8 @@ var orphanResolutionRange uint32 = 5
|
||||
type RelayInvsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnNewBlock(block *externalapi.DomainBlock) error
|
||||
OnNewBlockTemplate() error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
||||
Broadcast(message appmessage.Message) error
|
||||
@@ -34,13 +36,19 @@ type RelayInvsContext interface {
|
||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||
IsIBDRunning() bool
|
||||
IsRecoverableError(err error) bool
|
||||
IsNearlySynced() (bool, error)
|
||||
}
|
||||
|
||||
type invRelayBlock struct {
|
||||
Hash *externalapi.DomainHash
|
||||
IsOrphanRoot bool
|
||||
}
|
||||
|
||||
type handleRelayInvsFlow struct {
|
||||
RelayInvsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
invsQueue []*appmessage.MsgInvRelayBlock
|
||||
invsQueue []invRelayBlock
|
||||
}
|
||||
|
||||
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
||||
@@ -53,7 +61,7 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
||||
invsQueue: make([]invRelayBlock, 0),
|
||||
}
|
||||
err := flow.start()
|
||||
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
||||
@@ -104,10 +112,16 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Block relay is disabled during IBD
|
||||
// Block relay is disabled if the node is already during IBD AND considered out of sync
|
||||
if flow.IsIBDRunning() {
|
||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
||||
continue
|
||||
isNearlySynced, err := flow.IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isNearlySynced {
|
||||
log.Debugf("Got block %s while in IBD and the node is out of sync. Continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Requesting block %s", inv.Hash)
|
||||
@@ -130,8 +144,36 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Note we do not apply the heuristic below if inv was queued as an orphan root, since
|
||||
// that means the process started by a proper and relevant relay block
|
||||
if !inv.IsOrphanRoot {
|
||||
// Check bounded merge depth to avoid requesting irrelevant data which cannot be merged under virtual
|
||||
virtualMergeDepthRoot, err := flow.Domain().Consensus().VirtualMergeDepthRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !virtualMergeDepthRoot.Equal(model.VirtualGenesisBlockHash) {
|
||||
mergeDepthRootHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualMergeDepthRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Since `BlueWork` respects topology, this condition means that the relay
|
||||
// block is not in the future of virtual's merge depth root, and thus cannot be merged unless
|
||||
// other valid blocks Kosherize it, in which case it will be obtained once the merger is relayed
|
||||
if block.Header.BlueWork().Cmp(mergeDepthRootHeader.BlueWork()) <= 0 {
|
||||
log.Debugf("Block %s has lower blue work than virtual's merge root %s (%d <= %d), hence we are skipping it",
|
||||
inv.Hash, virtualMergeDepthRoot, block.Header.BlueWork(), mergeDepthRootHeader.BlueWork())
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
||||
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
missingParents, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||
@@ -153,13 +195,48 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Relaying block %s", inv.Hash)
|
||||
err = flow.relayBlock(block)
|
||||
oldVirtualParents := hashset.New()
|
||||
for _, parent := range oldVirtualInfo.ParentHashes {
|
||||
oldVirtualParents.Add(parent)
|
||||
}
|
||||
|
||||
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualHasNewParents := false
|
||||
for _, parent := range newVirtualInfo.ParentHashes {
|
||||
if oldVirtualParents.Contains(parent) {
|
||||
continue
|
||||
}
|
||||
virtualHasNewParents = true
|
||||
block, found, err := flow.Domain().Consensus().GetBlock(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return protocolerrors.Errorf(false, "Virtual parent %s not found", parent)
|
||||
}
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("Relaying block %s", blockHash)
|
||||
err = flow.relayBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if virtualHasNewParents {
|
||||
log.Debugf("Virtual %d has new parents, raising new block template event", newVirtualInfo.DAAScore)
|
||||
err = flow.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -175,24 +252,24 @@ func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.Domai
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
func (flow *handleRelayInvsFlow) readInv() (invRelayBlock, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
var inv invRelayBlock
|
||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return invRelayBlock{}, err
|
||||
}
|
||||
|
||||
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
msgInv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
return invRelayBlock{}, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
"expecting an inv message", msg.Command())
|
||||
}
|
||||
return inv, nil
|
||||
return invRelayBlock{Hash: msgInv.Hash, IsOrphanRoot: false}, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||
@@ -237,7 +314,7 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||
case *appmessage.MsgBlock:
|
||||
return message, nil
|
||||
default:
|
||||
@@ -246,22 +323,25 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, error) {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
return nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
}
|
||||
|
||||
missingParentsError := &ruleerrors.ErrMissingParents{}
|
||||
if errors.As(err, missingParentsError) {
|
||||
return missingParentsError.MissingParentHashes, nil, nil
|
||||
return missingParentsError.MissingParentHashes, nil
|
||||
}
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
|
||||
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
}
|
||||
return nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
return nil, virtualChangeSet, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
||||
@@ -369,12 +449,16 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
|
||||
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
||||
}
|
||||
|
||||
if len(orphanRoots) == 0 {
|
||||
// In some rare cases we get here when there are no orphan roots already
|
||||
return nil
|
||||
}
|
||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
||||
|
||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
||||
invMessages := make([]invRelayBlock, len(orphanRoots))
|
||||
for i, root := range orphanRoots {
|
||||
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
||||
invMessages[i] = appmessage.NewMsgInvBlock(root)
|
||||
invMessages[i] = invRelayBlock{Hash: root, IsOrphanRoot: true}
|
||||
}
|
||||
|
||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
||||
95
app/protocol/flows/v5/blockrelay/handle_request_anticone.go
Normal file
95
app/protocol/flows/v5/blockrelay/handle_request_anticone.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// RequestAnticoneContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestAnticoneContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
type handleRequestAnticoneFlow struct {
|
||||
RequestAnticoneContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peer.Peer
|
||||
}
|
||||
|
||||
// HandleRequestAnticone handles RequestAnticone messages
|
||||
func HandleRequestAnticone(context RequestAnticoneContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
flow := &handleRequestAnticoneFlow{
|
||||
RequestAnticoneContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestAnticoneFlow) start() error {
|
||||
for {
|
||||
blockHash, contextHash, err := receiveRequestAnticone(flow.incomingRoute)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received requestAnticone with blockHash: %s, contextHash: %s", blockHash, contextHash)
|
||||
log.Debugf("Getting past(%s) cap anticone(%s) for peer %s", contextHash, blockHash, flow.peer)
|
||||
|
||||
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
|
||||
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
|
||||
// we relay blocks only if they enter virtual's mergeset. We add a 2 factor for possible sync gaps.
|
||||
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
|
||||
flow.Config().ActiveNetParams.MergeSetSizeLimit*2)
|
||||
if err != nil {
|
||||
return protocolerrors.Wrap(true, err, "Failed querying anticone")
|
||||
}
|
||||
log.Debugf("Got %d header hashes in past(%s) cap anticone(%s)", len(blockHashes), contextHash, blockHash)
|
||||
|
||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
|
||||
}
|
||||
|
||||
// We sort the headers in bottom-up topological order before sending
|
||||
sort.Slice(blockHeaders, func(i, j int) bool {
|
||||
return blockHeaders[i].BlueWork.Cmp(blockHeaders[j].BlueWork) < 0
|
||||
})
|
||||
|
||||
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
|
||||
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func receiveRequestAnticone(incomingRoute *router.Route) (blockHash *externalapi.DomainHash,
|
||||
contextHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgRequestAnticone := message.(*appmessage.MsgRequestAnticone)
|
||||
|
||||
return msgRequestAnticone.BlockHash, msgRequestAnticone.ContextHash, nil
|
||||
}
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
const ibdBatchSize = router.DefaultMaxMessages
|
||||
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
|
||||
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
|
||||
const ibdBatchSize = 99
|
||||
|
||||
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestHeadersContext interface {
|
||||
@@ -42,7 +44,34 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
|
||||
consensus := flow.Domain().Consensus()
|
||||
|
||||
lowHashInfo, err := consensus.GetBlockInfo(lowHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !lowHashInfo.HasHeader() {
|
||||
return protocolerrors.Errorf(true, "Block %s does not exist", lowHash)
|
||||
}
|
||||
|
||||
highHashInfo, err := consensus.GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !highHashInfo.HasHeader() {
|
||||
return protocolerrors.Errorf(true, "Block %s does not exist", highHash)
|
||||
}
|
||||
|
||||
isLowSelectedAncestorOfHigh, err := consensus.IsInSelectedParentChainOf(lowHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isLowSelectedAncestorOfHigh {
|
||||
return protocolerrors.Errorf(true, "Expected %s to be on the selected chain of %s",
|
||||
lowHash, highHash)
|
||||
}
|
||||
|
||||
for !lowHash.Equal(highHash) {
|
||||
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
||||
@@ -51,7 +80,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
// in order to avoid locking the consensus for too long
|
||||
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
||||
const maxBlocks = 1 << 10
|
||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||
blockHashes, _, err := consensus.GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -59,7 +88,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
|
||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||
blockHeader, err := consensus.GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1,12 +1,12 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
type IBDContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnNewBlock(block *externalapi.DomainBlock) error
|
||||
OnNewBlockTemplate() error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||
@@ -71,22 +71,25 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
}
|
||||
|
||||
isFinishedSuccessfully := false
|
||||
var err error
|
||||
defer func() {
|
||||
flow.UnsetIBDRunning()
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
flow.logIBDFinished(isFinishedSuccessfully, err)
|
||||
}()
|
||||
|
||||
highHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
log.Debugf("Syncing blocks up to %s", highHash)
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
||||
relayBlockHash := consensushashing.BlockHash(block)
|
||||
|
||||
log.Infof("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
|
||||
log.Infof("Syncing blocks up to %s", relayBlockHash)
|
||||
log.Infof("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
|
||||
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(
|
||||
block, highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -97,7 +100,7 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
|
||||
if shouldDownloadHeadersProof {
|
||||
log.Infof("Starting IBD with headers proof")
|
||||
err := flow.ibdWithHeadersProof(highHash, block.Header.DAAScore())
|
||||
err = flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -110,27 +113,184 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", highHash)
|
||||
"to the recent pruning point before normal operation can resume.", relayBlockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash, block.Header.DAAScore())
|
||||
err = flow.syncPruningPointFutureHeaders(
|
||||
flow.Domain().Consensus(),
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncMissingBlockBodies(highHash)
|
||||
// We start by syncing missing bodies over the syncer selected chain
|
||||
err = flow.syncMissingBlockBodies(syncerHeaderSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relayBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Relay block might be in the anticone of syncer selected tip, thus
|
||||
// check his chain for missing bodies as well.
|
||||
// Note: this operation can be slightly optimized to avoid the full chain search since relay block
|
||||
// is in syncer virtual mergeset which has bounded size.
|
||||
if relayBlockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
err = flow.syncMissingBlockBodies(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Finished syncing blocks up to %s", highHash)
|
||||
log.Debugf("Finished syncing blocks up to %s", relayBlockHash)
|
||||
isFinishedSuccessfully = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) {
|
||||
/*
|
||||
Algorithm:
|
||||
Request full selected chain block locator from syncer
|
||||
Find the highest block which we know
|
||||
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
|
||||
*/
|
||||
|
||||
// Empty hashes indicate that the full chain is queried
|
||||
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer,
|
||||
len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
syncerHeaderSelectedTipHash := locatorHashes[0]
|
||||
var highestKnownSyncerChainHash *externalapi.DomainHash
|
||||
chainNegotiationRestartCounter := 0
|
||||
chainNegotiationZoomCounts := 0
|
||||
initialLocatorLen := len(locatorHashes)
|
||||
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
|
||||
for _, syncerChainHash := range locatorHashes {
|
||||
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if info.Exists {
|
||||
if info.BlockStatus == externalapi.StatusInvalid {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Sent invalid chain block %s", syncerChainHash)
|
||||
}
|
||||
|
||||
isPruningPointOnSyncerChain, err := flow.Domain().Consensus().IsInSelectedParentChainOf(pruningPoint, syncerChainHash)
|
||||
if err != nil {
|
||||
log.Errorf("Error checking isPruningPointOnSyncerChain: %s", err)
|
||||
}
|
||||
|
||||
// We're only interested in syncer chain blocks that have our pruning
|
||||
// point in their selected chain. Otherwise, it means one of the following:
|
||||
// 1) We will not switch the virtual selected chain to the syncers chain since it will violate finality
|
||||
// (hence we can ignore it unless merged by others).
|
||||
// 2) syncerChainHash is actually in the past of our pruning point so there's no
|
||||
// point in syncing from it.
|
||||
if err == nil && isPruningPointOnSyncerChain {
|
||||
currentHighestKnownSyncerChainHash = syncerChainHash
|
||||
break
|
||||
}
|
||||
}
|
||||
lowestUnknownSyncerChainHash = syncerChainHash
|
||||
}
|
||||
// No unknown blocks, break. Note this can only happen in the first iteration
|
||||
if lowestUnknownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// No shared block, break
|
||||
if currentHighestKnownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = nil
|
||||
break
|
||||
}
|
||||
// No point in zooming further
|
||||
if len(locatorHashes) == 1 {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// Zoom in
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(
|
||||
lowestUnknownSyncerChainHash,
|
||||
currentHighestKnownSyncerChainHash, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) > 0 {
|
||||
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
|
||||
!locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+
|
||||
"hashes to match the locator bounds")
|
||||
}
|
||||
|
||||
chainNegotiationZoomCounts++
|
||||
log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
if len(locatorHashes) == 2 {
|
||||
// We found our search target
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
|
||||
if chainNegotiationZoomCounts > initialLocatorLen*2 {
|
||||
// Since the zoom-in always queries two consecutive entries in the previous locator, it is
|
||||
// expected to decrease in size at least every two iterations
|
||||
return nil, nil, protocolerrors.Errorf(true,
|
||||
"IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d",
|
||||
chainNegotiationZoomCounts, initialLocatorLen)
|
||||
}
|
||||
|
||||
} else { // Empty locator signals a restart due to chain changes
|
||||
chainNegotiationZoomCounts = 0
|
||||
chainNegotiationRestartCounter++
|
||||
if chainNegotiationRestartCounter > 32 {
|
||||
return nil, nil, protocolerrors.Errorf(false,
|
||||
"IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
|
||||
}
|
||||
log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter)
|
||||
|
||||
// An empty locator signals that the syncer chain was modified and no longer contains one of
|
||||
// the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
initialLocatorLen = len(locatorHashes)
|
||||
// Reset syncer's header selected tip
|
||||
syncerHeaderSelectedTipHash = locatorHashes[0]
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Found highest known syncer chain block %s from peer %s",
|
||||
highestKnownSyncerChainHash, flow.peer)
|
||||
|
||||
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
@@ -140,146 +300,66 @@ func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool, err error) {
|
||||
successString := "successfully"
|
||||
if !isFinishedSuccessfully {
|
||||
successString = "(interrupted)"
|
||||
if err != nil {
|
||||
successString = fmt.Sprintf("(interrupted: %s)", err)
|
||||
} else {
|
||||
successString = fmt.Sprintf("(interrupted)")
|
||||
}
|
||||
}
|
||||
log.Infof("IBD finished %s", successString)
|
||||
log.Infof("IBD with peer %s finished %s", flow.peer, successString)
|
||||
}
|
||||
|
||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) findHighestSharedBlockHash(
|
||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
||||
func (flow *handleIBDFlow) getSyncerChainBlockLocator(
|
||||
highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) {
|
||||
|
||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash)
|
||||
err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !highestHashFound {
|
||||
return nil, false, nil
|
||||
}
|
||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if highestHashIndex == 0 ||
|
||||
// If the block locator contains only two adjacent chain blocks, the
|
||||
// syncer will always find the same highest chain block, so to avoid
|
||||
// an endless loop, we explicitly stop the loop in such situation.
|
||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
||||
|
||||
return highestHash, true, nil
|
||||
}
|
||||
|
||||
locatorHashAboveHighestHash := highestHash
|
||||
if highestHashIndex > 0 {
|
||||
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
|
||||
}
|
||||
|
||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
||||
if err != nil {
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
|
||||
"restarting with full block locator")
|
||||
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return blockLocator, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) findHighestHashIndex(
|
||||
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
|
||||
|
||||
highestHashIndex := 0
|
||||
highestHashIndexFound := false
|
||||
for i, blockLocatorHash := range blockLocator {
|
||||
if highestHash.Equal(blockLocatorHash) {
|
||||
highestHashIndex = i
|
||||
highestHashIndexFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !highestHashIndexFound {
|
||||
return 0, protocolerrors.Errorf(true, "highest hash %s "+
|
||||
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
|
||||
}
|
||||
log.Debugf("The index of the highest hash in the original "+
|
||||
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
|
||||
|
||||
return highestHashIndex, nil
|
||||
}
|
||||
|
||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) fetchHighestHash(
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
||||
highestHash := message.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, true, nil
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
||||
return nil, false, nil
|
||||
case *appmessage.MsgIBDChainBlockLocator:
|
||||
if len(message.BlockLocatorHashes) > 64 {
|
||||
return nil, protocolerrors.Errorf(true,
|
||||
"Got block locator of size %d>64 while expecting locator to have size "+
|
||||
"which is logarithmic in DAG size (which should never exceed 2^64)",
|
||||
len(message.BlockLocatorHashes))
|
||||
}
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDChainBlockLocator, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus,
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash *externalapi.DomainHash,
|
||||
highBlockDAAScoreHint uint64) error {
|
||||
|
||||
log.Infof("Downloading headers from %s", flow.peer)
|
||||
|
||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
||||
if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) {
|
||||
// No need to get syncer selected tip headers, so sync relay past and return
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
|
||||
err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestSharedBlockHash)
|
||||
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScore, "block headers")
|
||||
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScoreHint, "block headers")
|
||||
|
||||
// Keep a short queue of BlockHeadersMessages so that there's
|
||||
// never a moment when the node is not validating and inserting
|
||||
@@ -297,6 +377,11 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
close(blockHeadersMessageChan)
|
||||
return
|
||||
}
|
||||
if len(blockHeadersMessage.BlockHeaders) == 0 {
|
||||
// The syncer should have sent a done message if the search completed, and not an empty list
|
||||
errChan <- protocolerrors.Errorf(true, "Received an empty headers message from peer %s", flow.peer)
|
||||
return
|
||||
}
|
||||
|
||||
blockHeadersMessageChan <- blockHeadersMessage
|
||||
|
||||
@@ -312,16 +397,7 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
select {
|
||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
// If the highHash has not been received, the peer is misbehaving
|
||||
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !highHashBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"highHash block %s from peer %s during block download", highHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
@@ -338,11 +414,70 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
||||
func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
// Finished downloading syncer selected tip blocks,
|
||||
// check if we already have the triggering relayBlockHash
|
||||
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
// Send a special header request for the selected tip anticone. This is expected to
|
||||
// be a small set, as it is bounded to the size of virtual's mergeset.
|
||||
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
_, anticoneDone, err = flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
for _, header := range anticoneHeadersMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
||||
// If the relayBlockHash has still not been received, the peer is misbehaving
|
||||
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestAnticone(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
|
||||
msgRequestAnticone := appmessage.NewMsgRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
return flow.outgoingRoute.Enqueue(msgRequestAnticone)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(
|
||||
highestKnownSyncerChainHash, syncerHeaderSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgRequestHeaders := appmessage.NewMsgRequstHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgRequestHeaders)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
||||
@@ -381,7 +516,7 @@ func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlo
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
||||
err = consensus.ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
@@ -459,7 +594,7 @@ func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
||||
|
||||
receivedChunkCount++
|
||||
if receivedChunkCount%ibdBatchSize == 0 {
|
||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
log.Infof("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
receivedChunkCount, receivedUTXOCount)
|
||||
|
||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||
@@ -511,6 +646,12 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
|
||||
highestProcessedDAAScore := lowBlockHeader.DAAScore()
|
||||
|
||||
// If the IBD is small, we want to update the virtual after each block in order to avoid complications and possible bugs.
|
||||
updateVirtual, err := flow.Domain().Consensus().IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
@@ -547,7 +688,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
return err
|
||||
}
|
||||
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
|
||||
err = flow.Domain().Consensus().ValidateAndInsertBlock(block, updateVirtual)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||
@@ -555,7 +696,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
}
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -566,7 +707,15 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
return flow.resolveVirtual(highestProcessedDAAScore)
|
||||
// We need to resolve virtual only if it wasn't updated while syncing block bodies
|
||||
if !updateVirtual {
|
||||
err := flow.resolveVirtual(highestProcessedDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return flow.OnNewBlockTemplate()
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
@@ -579,38 +728,24 @@ func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
|
||||
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
err := flow.Domain().Consensus().ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) {
|
||||
var percents int
|
||||
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||
percents = 100
|
||||
} else {
|
||||
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||
}
|
||||
if percents < 0 {
|
||||
percents = 0
|
||||
} else if percents > 100 {
|
||||
percents = 100
|
||||
}
|
||||
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if i%10 == 0 {
|
||||
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var percents int
|
||||
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||
percents = 100
|
||||
} else {
|
||||
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||
}
|
||||
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||
}
|
||||
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isCompletelyResolved {
|
||||
log.Infof("Resolved virtual")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
log.Infof("Resolved virtual")
|
||||
return nil
|
||||
}
|
||||
@@ -10,6 +10,10 @@ type ibdProgressReporter struct {
|
||||
}
|
||||
|
||||
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
|
||||
if highDAAScore <= lowDAAScore {
|
||||
// Avoid a zero or negative diff
|
||||
highDAAScore = lowDAAScore + 1
|
||||
}
|
||||
return &ibdProgressReporter{
|
||||
lowDAAScore: lowDAAScore,
|
||||
highDAAScore: highDAAScore,
|
||||
@@ -23,7 +27,16 @@ func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName
|
||||
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
|
||||
ipr.processed += processedDelta
|
||||
|
||||
relativeDAAScore := highestProcessedDAAScore - ipr.lowDAAScore
|
||||
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
|
||||
if highestProcessedDAAScore > ipr.highDAAScore {
|
||||
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
|
||||
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
relativeDAAScore := uint64(0)
|
||||
if highestProcessedDAAScore > ipr.lowDAAScore {
|
||||
// Avoid a negative diff
|
||||
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
|
||||
if progressPercent > ipr.lastReportedProgressPercent {
|
||||
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
|
||||
@@ -12,18 +12,20 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
err := flow.Domain().InitStagingConsensus()
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
err := flow.Domain().InitStagingConsensusWithoutGenesis()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(highHash, highBlockDAAScore)
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(syncerHeaderSelectedTipHash, relayBlockHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
if !flow.IsRecoverableError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus. (%s)", flow.peer, err)
|
||||
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
||||
if deleteStagingConsensusErr != nil {
|
||||
return deleteStagingConsensusErr
|
||||
@@ -32,6 +34,8 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash,
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
|
||||
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
|
||||
err = flow.Domain().CommitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -45,11 +49,34 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *externalapi.DomainBlock,
|
||||
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
|
||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
|
||||
relayBlock *externalapi.DomainBlock,
|
||||
highestKnownSyncerChainHash *externalapi.DomainHash) (shouldDownload, shouldSync bool, err error) {
|
||||
|
||||
if !highestSharedBlockFound {
|
||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock)
|
||||
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
|
||||
if highestKnownSyncerChainHash != nil {
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
highestSharedBlockFound = blockInfo.HasBody()
|
||||
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
isPruningPointInSharedBlockChain, err = flow.Domain().Consensus().IsInSelectedParentChainOf(
|
||||
pruningPoint, highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
// Note: in the case where `highestSharedBlockFound == true && isPruningPointInSharedBlockChain == false`
|
||||
// we might have here info which is relevant to finality conflict decisions. This should be taken into
|
||||
// account when we improve this aspect.
|
||||
if !highestSharedBlockFound || !isPruningPointInSharedBlockChain {
|
||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
@@ -58,28 +85,33 @@ func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *ex
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
return false, false, nil
|
||||
if highestKnownSyncerChainHash == nil {
|
||||
log.Infof("Stopping IBD since IBD from this node will cause a finality conflict")
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock *externalapi.DomainBlock) (bool, error) {
|
||||
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
|
||||
virtualSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if highBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
if relayBlock.Header.BlueScore() < virtualSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return highBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
||||
return relayBlock.Header.BlueWork().Cmp(virtualSelectedTipInfo.BlueWork) > 0, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
||||
@@ -114,7 +146,10 @@ func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.Doma
|
||||
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash,
|
||||
highBlockDAAScore uint64) error {
|
||||
|
||||
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -131,19 +166,20 @@ func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalap
|
||||
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash, highBlockDAAScore)
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(),
|
||||
syncerHeaderSelectedTipHash, proofPruningPoint, relayBlockHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Headers downloaded from peer %s", flow.peer)
|
||||
|
||||
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
|
||||
relayBlockInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !highHashInfo.Exists {
|
||||
if !relayBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
|
||||
}
|
||||
|
||||
@@ -206,7 +242,8 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -220,9 +257,19 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We're using i+2 because we want to check if the next block will belong to the next batch, but we already downloaded
|
||||
// the pruning point outside the loop so we use i+2 instead of i+1.
|
||||
if (i+2)%ibdBatchSize == 0 {
|
||||
log.Infof("Downloaded %d blocks from the pruning point anticone", i+1)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextPruningPointAndItsAnticoneBlocks())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
|
||||
log.Infof("Finished downloading pruning point and its anticone from %s. Total blocks downloaded: %d", flow.peer, i+1)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -243,8 +290,14 @@ func (flow *handleIBDFlow) processBlockWithTrustedData(
|
||||
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
|
||||
}
|
||||
|
||||
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||
return err
|
||||
err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return protocolerrors.Wrapf(true, err, "failed validating block with trusted data")
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) {
|
||||
@@ -344,6 +397,7 @@ func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consens
|
||||
log.Info("Fetching the pruning point UTXO set")
|
||||
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
||||
if err != nil {
|
||||
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package ping
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
@@ -61,6 +63,9 @@ func (flow *sendPingsFlow) start() error {
|
||||
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
if errors.Is(err, router.ErrTimeout) {
|
||||
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
pongMessage := message.(*appmessage.MsgPong)
|
||||
@@ -1,14 +1,14 @@
|
||||
package v4
|
||||
package v5
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
@@ -78,6 +78,7 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
|
||||
appmessage.CmdPruningPointProof,
|
||||
appmessage.CmdTrustedData,
|
||||
appmessage.CmdIBDChainBlockLocator,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBD(m.Context(), incomingRoute,
|
||||
@@ -121,7 +122,7 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone}, isStopping, errChan,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone, appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
@@ -134,6 +135,20 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestIBDChainBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDChainBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestIBDChainBlockLocator(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestAnticone", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestAnticone}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestAnticone(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointProofRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
@@ -1,7 +1,7 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -22,7 +22,7 @@ type TransactionsRelayContext interface {
|
||||
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
|
||||
OnTransactionAddedToMempool()
|
||||
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
|
||||
IsIBDRunning() bool
|
||||
IsNearlySynced() (bool, error)
|
||||
}
|
||||
|
||||
type handleRelayedTransactionsFlow struct {
|
||||
@@ -50,7 +50,12 @@ func (flow *handleRelayedTransactionsFlow) start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsIBDRunning() {
|
||||
isNearlySynced, err := flow.IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Transaction relay is disabled if the node is out of sync and thus not mining
|
||||
if !isNearlySynced {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -97,7 +102,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
|
||||
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
|
||||
// Ask the transaction memory pool if the transaction is known
|
||||
// to it in any form (main pool or orphan).
|
||||
if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
|
||||
if _, _, ok := flow.Domain().MiningManager().GetTransaction(txID, true, true); ok {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package transactionrelay_test
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -47,8 +47,8 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
|
||||
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
|
||||
return false
|
||||
func (m *mocTransactionsRelayContext) IsNearlySynced() (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
|
||||
@@ -30,7 +30,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
|
||||
}
|
||||
|
||||
for _, transactionID := range msgRequestTransactions.IDs {
|
||||
tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
|
||||
tx, _, ok := flow.Domain().MiningManager().GetTransaction(transactionID, true, false)
|
||||
|
||||
if !ok {
|
||||
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
|
||||
@@ -40,7 +40,6 @@ func (flow *handleRequestedTransactionsFlow) start() error {
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -2,7 +2,7 @@ package transactionrelay_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
@@ -2,10 +2,11 @@ package protocol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
@@ -90,14 +91,9 @@ func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-c
|
||||
return <-errChan
|
||||
}
|
||||
|
||||
// SetOnVirtualChange sets the onVirtualChangeHandler handler
|
||||
func (m *Manager) SetOnVirtualChange(onVirtualChangeHandler flowcontext.OnVirtualChangeHandler) {
|
||||
m.context.SetOnVirtualChangeHandler(onVirtualChangeHandler)
|
||||
}
|
||||
|
||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
||||
func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowcontext.OnBlockAddedToDAGHandler) {
|
||||
m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)
|
||||
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplate handler
|
||||
func (m *Manager) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler flowcontext.OnNewBlockTemplateHandler) {
|
||||
m.context.SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler)
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
|
||||
@@ -110,12 +106,6 @@ func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMemp
|
||||
m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler)
|
||||
}
|
||||
|
||||
// ShouldMine returns whether it's ok to use block template from this node
|
||||
// for mining purposes.
|
||||
func (m *Manager) ShouldMine() (bool, error) {
|
||||
return m.context.ShouldMine()
|
||||
}
|
||||
|
||||
// IsIBDRunning returns true if IBD is currently marked as running
|
||||
func (m *Manager) IsIBDRunning() bool {
|
||||
return m.context.IsIBDRunning()
|
||||
|
||||
@@ -3,7 +3,7 @@ package protocol
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
|
||||
v4 "github.com/kaspanet/kaspad/app/protocol/flows/v4"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
@@ -23,7 +23,7 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
// errChan is used by the flow goroutines to return to runFlows when an error occurs.
|
||||
// They are both initialized here and passed to register flows.
|
||||
isStopping := uint32(0)
|
||||
errChan := make(chan error)
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
receiveVersionRoute, sendVersionRoute, receiveReadyRoute := registerHandshakeRoutes(router)
|
||||
|
||||
@@ -76,8 +76,8 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
var flows []*common.Flow
|
||||
log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion())
|
||||
switch peer.ProtocolVersion() {
|
||||
case 4:
|
||||
flows = v4.Register(m, router, errChan, &isStopping)
|
||||
case 5:
|
||||
flows = v5.Register(m, router, errChan, &isStopping)
|
||||
default:
|
||||
panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion()))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Manager is an RPC manager
|
||||
@@ -28,6 +29,7 @@ func NewManager(
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
utxoIndex *utxoindex.UTXOIndex,
|
||||
consensusEventsChan chan externalapi.ConsensusEvent,
|
||||
shutDownChan chan<- struct{}) *Manager {
|
||||
|
||||
manager := Manager{
|
||||
@@ -44,50 +46,90 @@ func NewManager(
|
||||
}
|
||||
netAdapter.SetRPCRouterInitializer(manager.routerInitializer)
|
||||
|
||||
manager.initConsensusEventsHandler(consensusEventsChan)
|
||||
|
||||
return &manager
|
||||
}
|
||||
|
||||
// NotifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
|
||||
func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
|
||||
func (m *Manager) initConsensusEventsHandler(consensusEventsChan chan externalapi.ConsensusEvent) {
|
||||
spawn("consensusEventsHandler", func() {
|
||||
for {
|
||||
consensusEvent, ok := <-consensusEventsChan
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
switch event := consensusEvent.(type) {
|
||||
case *externalapi.VirtualChangeSet:
|
||||
err := m.notifyVirtualChange(event)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
case *externalapi.BlockAdded:
|
||||
err := m.notifyBlockAddedToDAG(event.Block)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
default:
|
||||
panic(errors.Errorf("Got event of unsupported type %T", consensusEvent))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// notifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
|
||||
func (m *Manager) notifyBlockAddedToDAG(block *externalapi.DomainBlock) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.notifyBlockAddedToDAG")
|
||||
defer onEnd()
|
||||
|
||||
err := m.NotifyVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
// Before converting the block and populating it, we check if any listeners are interested.
|
||||
// This is done since most nodes do not use this event.
|
||||
if !m.context.NotificationManager.HasBlockAddedListeners() {
|
||||
return nil
|
||||
}
|
||||
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
|
||||
err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
|
||||
err := m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
|
||||
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||
err = m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyVirtualChange notifies the manager that the virtual block has been changed.
|
||||
func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
|
||||
// notifyVirtualChange notifies the manager that the virtual block has been changed.
|
||||
func (m *Manager) notifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualChange")
|
||||
defer onEnd()
|
||||
|
||||
if m.context.Config.UTXOIndex {
|
||||
if m.context.Config.UTXOIndex && virtualChangeSet.VirtualUTXODiff != nil {
|
||||
err := m.notifyUTXOsChanged(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := m.notifyVirtualSelectedParentBlueScoreChanged()
|
||||
err := m.notifyVirtualSelectedParentBlueScoreChanged(virtualChangeSet.VirtualSelectedParentBlueScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.notifyVirtualDaaScoreChanged()
|
||||
err = m.notifyVirtualDaaScoreChanged(virtualChangeSet.VirtualDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if virtualChangeSet.VirtualSelectedParentChainChanges == nil ||
|
||||
(len(virtualChangeSet.VirtualSelectedParentChainChanges.Added) == 0 &&
|
||||
len(virtualChangeSet.VirtualSelectedParentChainChanges.Removed) == 0) {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err = m.notifyVirtualSelectedParentChainChanged(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -96,6 +138,13 @@ func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChang
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplate notifies the manager that a new
|
||||
// block template is available for miners
|
||||
func (m *Manager) NotifyNewBlockTemplate() error {
|
||||
notification := appmessage.NewNewBlockTemplateNotificationMessage()
|
||||
return m.context.NotificationManager.NotifyNewBlockTemplate(notification)
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
|
||||
// resets due to pruning point change via IBD.
|
||||
func (m *Manager) NotifyPruningPointUTXOSetOverride() error {
|
||||
@@ -138,6 +187,7 @@ func (m *Manager) notifyUTXOsChanged(virtualChangeSet *externalapi.VirtualChange
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.context.NotificationManager.NotifyUTXOsChanged(utxoIndexChanges)
|
||||
}
|
||||
|
||||
@@ -153,33 +203,18 @@ func (m *Manager) notifyPruningPointUTXOSetOverride() error {
|
||||
return m.context.NotificationManager.NotifyPruningPointUTXOSetOverride()
|
||||
}
|
||||
|
||||
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged() error {
|
||||
func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged(virtualSelectedParentBlueScore uint64) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentBlueScoreChanged")
|
||||
defer onEnd()
|
||||
|
||||
virtualSelectedParent, err := m.context.Domain.Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockInfo, err := m.context.Domain.Consensus().GetBlockInfo(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(blockInfo.BlueScore)
|
||||
notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(virtualSelectedParentBlueScore)
|
||||
return m.context.NotificationManager.NotifyVirtualSelectedParentBlueScoreChanged(notification)
|
||||
}
|
||||
|
||||
func (m *Manager) notifyVirtualDaaScoreChanged() error {
|
||||
func (m *Manager) notifyVirtualDaaScoreChanged(virtualDAAScore uint64) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualDaaScoreChanged")
|
||||
defer onEnd()
|
||||
|
||||
virtualDAAScore, err := m.context.Domain.Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notification := appmessage.NewVirtualDaaScoreChangedNotificationMessage(virtualDAAScore)
|
||||
return m.context.NotificationManager.NotifyVirtualDaaScoreChanged(notification)
|
||||
}
|
||||
@@ -188,10 +223,16 @@ func (m *Manager) notifyVirtualSelectedParentChainChanged(virtualChangeSet *exte
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged")
|
||||
defer onEnd()
|
||||
|
||||
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
virtualChangeSet.VirtualSelectedParentChainChanges)
|
||||
if err != nil {
|
||||
return err
|
||||
hasListeners, includeAcceptedTransactionIDs := m.context.NotificationManager.HasListenersThatPropagateVirtualSelectedParentChainChanged()
|
||||
|
||||
if hasListeners {
|
||||
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
virtualChangeSet.VirtualSelectedParentChainChanges, includeAcceptedTransactionIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification)
|
||||
}
|
||||
return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -48,6 +48,9 @@ var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
|
||||
appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond,
|
||||
appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged,
|
||||
appmessage.CmdNotifyNewBlockTemplateRequestMessage: rpchandlers.HandleNotifyNewBlockTemplate,
|
||||
appmessage.CmdGetCoinSupplyRequestMessage: rpchandlers.HandleGetCoinSupply,
|
||||
appmessage.CmdGetMempoolEntriesByAddressesRequestMessage: rpchandlers.HandleGetMempoolEntriesByAddresses,
|
||||
}
|
||||
|
||||
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {
|
||||
|
||||
@@ -3,12 +3,14 @@ package rpccontext
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
)
|
||||
|
||||
// ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage converts
|
||||
// VirtualSelectedParentChainChanges to VirtualSelectedParentChainChangedNotificationMessage
|
||||
func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
selectedParentChainChanges *externalapi.SelectedChainPath) (*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
|
||||
selectedParentChainChanges *externalapi.SelectedChainPath, includeAcceptedTransactionIDs bool) (
|
||||
*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
|
||||
|
||||
removedChainBlockHashes := make([]string, len(selectedParentChainChanges.Removed))
|
||||
for i, removed := range selectedParentChainChanges.Removed {
|
||||
@@ -20,5 +22,58 @@ func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotifi
|
||||
addedChainBlocks[i] = added.String()
|
||||
}
|
||||
|
||||
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes, addedChainBlocks), nil
|
||||
var acceptedTransactionIDs []*appmessage.AcceptedTransactionIDs
|
||||
if includeAcceptedTransactionIDs {
|
||||
var err error
|
||||
acceptedTransactionIDs, err = ctx.getAndConvertAcceptedTransactionIDs(selectedParentChainChanges)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage(
|
||||
removedChainBlockHashes, addedChainBlocks, acceptedTransactionIDs), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) getAndConvertAcceptedTransactionIDs(selectedParentChainChanges *externalapi.SelectedChainPath) (
|
||||
[]*appmessage.AcceptedTransactionIDs, error) {
|
||||
|
||||
acceptedTransactionIDs := make([]*appmessage.AcceptedTransactionIDs, len(selectedParentChainChanges.Added))
|
||||
|
||||
const chunk = 1000
|
||||
position := 0
|
||||
|
||||
for position < len(selectedParentChainChanges.Added) {
|
||||
var chainBlocksChunk []*externalapi.DomainHash
|
||||
if position+chunk > len(selectedParentChainChanges.Added) {
|
||||
chainBlocksChunk = selectedParentChainChanges.Added[position:]
|
||||
} else {
|
||||
chainBlocksChunk = selectedParentChainChanges.Added[position : position+chunk]
|
||||
}
|
||||
// We use chunks in order to avoid blocking consensus for too long
|
||||
chainBlocksAcceptanceData, err := ctx.Domain.Consensus().GetBlocksAcceptanceData(chainBlocksChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, addedChainBlock := range chainBlocksChunk {
|
||||
chainBlockAcceptanceData := chainBlocksAcceptanceData[i]
|
||||
acceptedTransactionIDs[position+i] = &appmessage.AcceptedTransactionIDs{
|
||||
AcceptingBlockHash: addedChainBlock.String(),
|
||||
AcceptedTransactionIDs: nil,
|
||||
}
|
||||
for _, blockAcceptanceData := range chainBlockAcceptanceData {
|
||||
for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData {
|
||||
if transactionAcceptanceData.IsAccepted {
|
||||
acceptedTransactionIDs[position+i].AcceptedTransactionIDs =
|
||||
append(acceptedTransactionIDs[position+i].AcceptedTransactionIDs,
|
||||
consensushashing.TransactionID(transactionAcceptanceData.Transaction).String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
position += chunk
|
||||
}
|
||||
|
||||
return acceptedTransactionIDs, nil
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func NewContext(cfg *config.Config,
|
||||
UTXOIndex: utxoIndex,
|
||||
ShutDownChan: shutDownChan,
|
||||
}
|
||||
context.NotificationManager = NewNotificationManager()
|
||||
context.NotificationManager = NewNotificationManager(cfg.ActiveNetParams)
|
||||
|
||||
return context
|
||||
}
|
||||
|
||||
@@ -3,6 +3,11 @@ package rpccontext
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/utxoindex"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
@@ -13,6 +18,7 @@ import (
|
||||
type NotificationManager struct {
|
||||
sync.RWMutex
|
||||
listeners map[*routerpkg.Router]*NotificationListener
|
||||
params *dagconfig.Params
|
||||
}
|
||||
|
||||
// UTXOsChangedNotificationAddress represents a kaspad address.
|
||||
@@ -24,6 +30,8 @@ type UTXOsChangedNotificationAddress struct {
|
||||
|
||||
// NotificationListener represents a registered RPC notification listener
|
||||
type NotificationListener struct {
|
||||
params *dagconfig.Params
|
||||
|
||||
propagateBlockAddedNotifications bool
|
||||
propagateVirtualSelectedParentChainChangedNotifications bool
|
||||
propagateFinalityConflictNotifications bool
|
||||
@@ -32,13 +40,16 @@ type NotificationListener struct {
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications bool
|
||||
propagateVirtualDaaScoreChangedNotifications bool
|
||||
propagatePruningPointUTXOSetOverrideNotifications bool
|
||||
propagateNewBlockTemplateNotifications bool
|
||||
|
||||
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
||||
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
||||
includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications bool
|
||||
}
|
||||
|
||||
// NewNotificationManager creates a new NotificationManager
|
||||
func NewNotificationManager() *NotificationManager {
|
||||
func NewNotificationManager(params *dagconfig.Params) *NotificationManager {
|
||||
return &NotificationManager{
|
||||
params: params,
|
||||
listeners: make(map[*routerpkg.Router]*NotificationListener),
|
||||
}
|
||||
}
|
||||
@@ -48,7 +59,7 @@ func (nm *NotificationManager) AddListener(router *routerpkg.Router) {
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
listener := newNotificationListener()
|
||||
listener := newNotificationListener(nm.params)
|
||||
nm.listeners[router] = listener
|
||||
}
|
||||
|
||||
@@ -72,6 +83,19 @@ func (nm *NotificationManager) Listener(router *routerpkg.Router) (*Notification
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
// HasBlockAddedListeners indicates if the notification manager has any listeners for `BlockAdded` events
|
||||
func (nm *NotificationManager) HasBlockAddedListeners() bool {
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for _, listener := range nm.listeners {
|
||||
if listener.propagateBlockAddedNotifications {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NotifyBlockAdded notifies the notification manager that a block has been added to the DAG
|
||||
func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAddedNotificationMessage) error {
|
||||
nm.RLock()
|
||||
@@ -79,10 +103,8 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateBlockAddedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
log.Warnf("Couldn't send notification: %s", err)
|
||||
} else if err != nil {
|
||||
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -91,13 +113,27 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
|
||||
}
|
||||
|
||||
// NotifyVirtualSelectedParentChainChanged notifies the notification manager that the DAG's selected parent chain has changed
|
||||
func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
|
||||
func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(
|
||||
notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
notificationWithoutAcceptedTransactionIDs := &appmessage.VirtualSelectedParentChainChangedNotificationMessage{
|
||||
RemovedChainBlockHashes: notification.RemovedChainBlockHashes,
|
||||
AddedChainBlockHashes: notification.AddedChainBlockHashes,
|
||||
}
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateVirtualSelectedParentChainChangedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
var err error
|
||||
|
||||
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
|
||||
err = router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
} else {
|
||||
err = router.OutgoingRoute().MaybeEnqueue(notificationWithoutAcceptedTransactionIDs)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -106,6 +142,31 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(notificat
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasListenersThatPropagateVirtualSelectedParentChainChanged returns whether there's any listener that is
|
||||
// subscribed to VirtualSelectedParentChainChanged notifications as well as checks if any such listener requested
|
||||
// to include AcceptedTransactionIDs.
|
||||
func (nm *NotificationManager) HasListenersThatPropagateVirtualSelectedParentChainChanged() (hasListeners, hasListenersThatRequireAcceptedTransactionIDs bool) {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
hasListeners = false
|
||||
hasListenersThatRequireAcceptedTransactionIDs = false
|
||||
|
||||
for _, listener := range nm.listeners {
|
||||
if listener.propagateVirtualSelectedParentChainChangedNotifications {
|
||||
hasListeners = true
|
||||
// Generating acceptedTransactionIDs is a heavy operation, so we check if it's needed by any listener.
|
||||
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
|
||||
hasListenersThatRequireAcceptedTransactionIDs = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hasListeners, hasListenersThatRequireAcceptedTransactionIDs
|
||||
}
|
||||
|
||||
// NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG
|
||||
func (nm *NotificationManager) NotifyFinalityConflict(notification *appmessage.FinalityConflictNotificationMessage) error {
|
||||
nm.RLock()
|
||||
@@ -146,7 +207,10 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateUTXOsChangedNotifications {
|
||||
// Filter utxoChanges and create a notification
|
||||
notification := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges)
|
||||
notification, err := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Don't send the notification if it's empty
|
||||
if len(notification.Added) == 0 && len(notification.Removed) == 0 {
|
||||
@@ -154,7 +218,7 @@ func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOCha
|
||||
}
|
||||
|
||||
// Enqueue the notification
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
err = router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -173,7 +237,7 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentBlueScoreChanged(
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateVirtualSelectedParentBlueScoreChangedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -192,6 +256,25 @@ func (nm *NotificationManager) NotifyVirtualDaaScoreChanged(
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateVirtualDaaScoreChangedNotifications {
|
||||
err := router.OutgoingRoute().MaybeEnqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplate notifies the notification manager that a new
|
||||
// block template is available for miners
|
||||
func (nm *NotificationManager) NotifyNewBlockTemplate(
|
||||
notification *appmessage.NewBlockTemplateNotificationMessage) error {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateNewBlockTemplateNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -218,18 +301,27 @@ func (nm *NotificationManager) NotifyPruningPointUTXOSetOverride() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newNotificationListener() *NotificationListener {
|
||||
func newNotificationListener(params *dagconfig.Params) *NotificationListener {
|
||||
return &NotificationListener{
|
||||
params: params,
|
||||
|
||||
propagateBlockAddedNotifications: false,
|
||||
propagateVirtualSelectedParentChainChangedNotifications: false,
|
||||
propagateFinalityConflictNotifications: false,
|
||||
propagateFinalityConflictResolvedNotifications: false,
|
||||
propagateUTXOsChangedNotifications: false,
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications: false,
|
||||
propagateNewBlockTemplateNotifications: false,
|
||||
propagatePruningPointUTXOSetOverrideNotifications: false,
|
||||
}
|
||||
}
|
||||
|
||||
// IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications returns true if this listener
|
||||
// includes accepted transaction IDs in it's virtual-selected-parent-chain-changed notifications
|
||||
func (nl *NotificationListener) IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications() bool {
|
||||
return nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications
|
||||
}
|
||||
|
||||
// PropagateBlockAddedNotifications instructs the listener to send block added notifications
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateBlockAddedNotifications() {
|
||||
@@ -238,8 +330,9 @@ func (nl *NotificationListener) PropagateBlockAddedNotifications() {
|
||||
|
||||
// PropagateVirtualSelectedParentChainChangedNotifications instructs the listener to send chain changed notifications
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications() {
|
||||
func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications(includeAcceptedTransactionIDs bool) {
|
||||
nl.propagateVirtualSelectedParentChainChangedNotifications = true
|
||||
nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications = includeAcceptedTransactionIDs
|
||||
}
|
||||
|
||||
// PropagateFinalityConflictNotifications instructs the listener to send finality conflict notifications
|
||||
@@ -258,7 +351,11 @@ func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications()
|
||||
// to the remote listener for the given addresses. Subsequent calls instruct the listener to
|
||||
// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses
|
||||
// are ignored.
|
||||
func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
func (nm *NotificationManager) PropagateUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
|
||||
// Apply a write-lock since the internal listener address map is modified
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
nl.propagateUTXOsChangedNotifications = true
|
||||
nl.propagateUTXOsChangedNotificationAddresses =
|
||||
@@ -273,7 +370,11 @@ func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*
|
||||
// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs
|
||||
// changed notifications to the remote listener for the given addresses. Addresses for which
|
||||
// notifications are not currently sent are ignored.
|
||||
func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
|
||||
func (nm *NotificationManager) StopPropagatingUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
|
||||
// Apply a write-lock since the internal listener address map is modified
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
if !nl.propagateUTXOsChangedNotifications {
|
||||
return
|
||||
}
|
||||
@@ -284,7 +385,7 @@ func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(address
|
||||
}
|
||||
|
||||
func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
||||
utxoChanges *utxoindex.UTXOChanges) *appmessage.UTXOsChangedNotificationMessage {
|
||||
utxoChanges *utxoindex.UTXOChanges) (*appmessage.UTXOsChangedNotificationMessage, error) {
|
||||
|
||||
// As an optimization, we iterate over the smaller set (O(n)) among the two below
|
||||
// and check existence over the larger set (O(1))
|
||||
@@ -299,27 +400,64 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
for scriptPublicKeyString, removedOutpoints := range utxoChanges.Removed {
|
||||
for scriptPublicKeyString, removedPairs := range utxoChanges.Removed {
|
||||
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
} else if addressesSize > 0 {
|
||||
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
|
||||
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
|
||||
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
|
||||
if removedPairs, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for scriptPublicKeyString, addedPairs := range utxoChanges.Added {
|
||||
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, addedPairs)
|
||||
notification.Added = append(notification.Added, utxosByAddressesEntries...)
|
||||
}
|
||||
for scriptPublicKeyString, removedPAirs := range utxoChanges.Removed {
|
||||
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, removedPAirs)
|
||||
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
|
||||
}
|
||||
}
|
||||
|
||||
return notification
|
||||
return notification, nil
|
||||
}
|
||||
|
||||
func (nl *NotificationListener) scriptPubKeyStringToAddressString(scriptPublicKeyString utxoindex.ScriptPublicKeyString) (string, error) {
|
||||
scriptPubKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString))
|
||||
|
||||
// ignore error because it is often returned when the script is of unknown type
|
||||
scriptType, address, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey, nl.params)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var addressString string
|
||||
if scriptType == txscript.NonStandardTy {
|
||||
addressString = ""
|
||||
} else {
|
||||
addressString = address.String()
|
||||
}
|
||||
return addressString, nil
|
||||
}
|
||||
|
||||
// PropagateVirtualSelectedParentBlueScoreChangedNotifications instructs the listener to send
|
||||
@@ -334,6 +472,12 @@ func (nl *NotificationListener) PropagateVirtualDaaScoreChangedNotifications() {
|
||||
nl.propagateVirtualDaaScoreChangedNotifications = true
|
||||
}
|
||||
|
||||
// PropagateNewBlockTemplateNotifications instructs the listener to send
|
||||
// new block template notifications to the remote listener
|
||||
func (nl *NotificationListener) PropagateNewBlockTemplateNotifications() {
|
||||
nl.propagateNewBlockTemplateNotifications = true
|
||||
}
|
||||
|
||||
// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications
|
||||
// to the remote listener.
|
||||
func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() {
|
||||
|
||||
@@ -32,22 +32,6 @@ func ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(address string, pair
|
||||
return utxosByAddressesEntries
|
||||
}
|
||||
|
||||
// convertUTXOOutpointsToUTXOsByAddressesEntries converts
|
||||
// UTXOOutpoints to a slice of UTXOsByAddressesEntry
|
||||
func convertUTXOOutpointsToUTXOsByAddressesEntries(address string, outpoints utxoindex.UTXOOutpoints) []*appmessage.UTXOsByAddressesEntry {
|
||||
utxosByAddressesEntries := make([]*appmessage.UTXOsByAddressesEntry, 0, len(outpoints))
|
||||
for outpoint := range outpoints {
|
||||
utxosByAddressesEntries = append(utxosByAddressesEntries, &appmessage.UTXOsByAddressesEntry{
|
||||
Address: address,
|
||||
Outpoint: &appmessage.RPCOutpoint{
|
||||
TransactionID: outpoint.TransactionID.String(),
|
||||
Index: outpoint.Index,
|
||||
},
|
||||
})
|
||||
}
|
||||
return utxosByAddressesEntries
|
||||
}
|
||||
|
||||
// ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings
|
||||
// to UTXOsChangedNotificationAddresses
|
||||
func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
|
||||
@@ -63,7 +47,7 @@ func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||
}
|
||||
scriptPublicKeyString := utxoindex.ConvertScriptPublicKeyToString(scriptPublicKey)
|
||||
scriptPublicKeyString := utxoindex.ScriptPublicKeyString(scriptPublicKey.String())
|
||||
addresses[i] = &UTXOsChangedNotificationAddress{
|
||||
Address: addressString,
|
||||
ScriptPublicKeyString: scriptPublicKeyString,
|
||||
|
||||
@@ -122,6 +122,7 @@ func (ctx *Context) PopulateTransactionWithVerboseData(
|
||||
}
|
||||
|
||||
ctx.Domain.Consensus().PopulateMass(domainTransaction)
|
||||
|
||||
transaction.VerboseData = &appmessage.RPCTransactionVerboseData{
|
||||
TransactionID: consensushashing.TransactionID(domainTransaction).String(),
|
||||
Hash: consensushashing.TransactionHash(domainTransaction).String(),
|
||||
|
||||
@@ -9,6 +9,14 @@ import (
|
||||
|
||||
// HandleAddPeer handles the respectively named RPC command
|
||||
func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("AddPeer RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := appmessage.NewAddPeerResponseMessage()
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("AddPeer RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
AddPeerRequest := request.(*appmessage.AddPeerRequestMessage)
|
||||
address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,14 @@ import (
|
||||
|
||||
// HandleBan handles the respectively named RPC command
|
||||
func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("Ban RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := appmessage.NewBanResponseMessage()
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("Ban RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
banRequest := request.(*appmessage.BanRequestMessage)
|
||||
ip := net.ParseIP(banRequest.IP)
|
||||
if ip == nil {
|
||||
|
||||
@@ -27,6 +27,27 @@ func HandleEstimateNetworkHashesPerSecond(
|
||||
}
|
||||
}
|
||||
|
||||
if context.Config.SafeRPC {
|
||||
const windowSizeLimit = 10000
|
||||
if windowSize > windowSizeLimit {
|
||||
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||
response.Error =
|
||||
appmessage.RPCErrorf(
|
||||
"Requested window size %d is larger than max allowed in RPC safe mode (%d)",
|
||||
windowSize, windowSizeLimit)
|
||||
return response, nil
|
||||
}
|
||||
}
|
||||
|
||||
if uint64(windowSize) > context.Config.ActiveNetParams.PruningDepth() {
|
||||
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||
response.Error =
|
||||
appmessage.RPCErrorf(
|
||||
"Requested window size %d is larger than pruning point depth %d",
|
||||
windowSize, context.Config.ActiveNetParams.PruningDepth())
|
||||
return response, nil
|
||||
}
|
||||
|
||||
networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize)
|
||||
if err != nil {
|
||||
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
|
||||
|
||||
@@ -22,7 +22,7 @@ func HandleGetBalanceByAddress(context *rpccontext.Context, _ *router.Router, re
|
||||
balance, err := getBalanceByAddress(context, getBalanceByAddressRequest.Address)
|
||||
if err != nil {
|
||||
rpcError := &appmessage.RPCError{}
|
||||
if !errors.As(err, rpcError) {
|
||||
if !errors.As(err, &rpcError) {
|
||||
return nil, err
|
||||
}
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
|
||||
@@ -23,7 +23,7 @@ func HandleGetBalancesByAddresses(context *rpccontext.Context, _ *router.Router,
|
||||
|
||||
if err != nil {
|
||||
rpcError := &appmessage.RPCError{}
|
||||
if !errors.As(err, rpcError) {
|
||||
if !errors.As(err, &rpcError) {
|
||||
return nil, err
|
||||
}
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
|
||||
@@ -4,9 +4,11 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
)
|
||||
|
||||
// HandleGetBlockTemplate handles the respectively named RPC command
|
||||
@@ -15,7 +17,7 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
|
||||
payAddress, err := util.DecodeAddress(getBlockTemplateRequest.PayAddress, context.Config.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetBlockResponseMessage{}
|
||||
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not decode address: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
@@ -25,18 +27,20 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
return nil, err
|
||||
}
|
||||
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey}
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version() + "/" + getBlockTemplateRequest.ExtraData)}
|
||||
|
||||
templateBlock, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||
templateBlock, isNearlySynced, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint64(len(templateBlock.Transactions[transactionhelper.CoinbaseTransactionIndex].Payload)) > context.Config.NetParams().MaxCoinbasePayloadLength {
|
||||
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Coinbase payload is above max length (%d). Try to shorten the extra data.", context.Config.NetParams().MaxCoinbasePayloadLength)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
|
||||
|
||||
isSynced, err := context.ProtocolManager.ShouldMine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, isSynced), nil
|
||||
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, context.ProtocolManager.Context().HasPeers() && isNearlySynced), nil
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appm
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !blockInfo.Exists {
|
||||
if !blockInfo.HasHeader() {
|
||||
return &appmessage.GetBlocksResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Could not find lowHash %s", getBlocksRequest.LowHash),
|
||||
}, nil
|
||||
|
||||
@@ -23,6 +23,10 @@ type fakeDomain struct {
|
||||
testapi.TestConsensus
|
||||
}
|
||||
|
||||
func (d fakeDomain) ConsensusEventsChannel() chan externalapi.ConsensusEvent {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d fakeDomain) DeleteStagingConsensus() error {
|
||||
panic("implement me")
|
||||
}
|
||||
@@ -31,7 +35,7 @@ func (d fakeDomain) StagingConsensus() externalapi.Consensus {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d fakeDomain) InitStagingConsensus() error {
|
||||
func (d fakeDomain) InitStagingConsensusWithoutGenesis() error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
||||
29
app/rpc/rpchandlers/get_coin_supply.go
Normal file
29
app/rpc/rpchandlers/get_coin_supply.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetCoinSupply handles the respectively named RPC command
|
||||
func HandleGetCoinSupply(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
if !context.Config.UTXOIndex {
|
||||
errorMessage := &appmessage.GetCoinSupplyResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
circulatingSompiSupply, err := context.UTXOIndex.GetCirculatingSompiSupply()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := appmessage.NewGetCoinSupplyResponseMessage(
|
||||
constants.MaxSompi,
|
||||
circulatingSompiSupply,
|
||||
)
|
||||
|
||||
return response, nil
|
||||
}
|
||||
@@ -9,10 +9,17 @@ import (
|
||||
|
||||
// HandleGetInfo handles the respectively named RPC command
|
||||
func HandleGetInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
isNearlySynced, err := context.Domain.Consensus().IsNearlySynced()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := appmessage.NewGetInfoResponseMessage(
|
||||
context.NetAdapter.ID().String(),
|
||||
uint64(context.Domain.MiningManager().TransactionCount()),
|
||||
uint64(context.Domain.MiningManager().TransactionCount(true, false)),
|
||||
version.Version(),
|
||||
context.Config.UTXOIndex,
|
||||
context.ProtocolManager.Context().HasPeers() && isNearlySynced,
|
||||
)
|
||||
|
||||
return response, nil
|
||||
|
||||
@@ -7,19 +7,40 @@ import (
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntries handles the respectively named RPC command
|
||||
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
transactions := context.Domain.MiningManager().AllTransactions()
|
||||
entries := make([]*appmessage.MempoolEntry, 0, len(transactions))
|
||||
for _, transaction := range transactions {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
getMempoolEntriesRequest := request.(*appmessage.GetMempoolEntriesRequestMessage)
|
||||
|
||||
entries := make([]*appmessage.MempoolEntry, 0)
|
||||
|
||||
transactionPoolTransactions, orphanPoolTransactions := context.Domain.MiningManager().AllTransactions(!getMempoolEntriesRequest.FilterTransactionPool, getMempoolEntriesRequest.IncludeOrphanPool)
|
||||
|
||||
if !getMempoolEntriesRequest.FilterTransactionPool {
|
||||
for _, transaction := range transactionPoolTransactions {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
if getMempoolEntriesRequest.IncludeOrphanPool {
|
||||
for _, transaction := range orphanPoolTransactions {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: true,
|
||||
})
|
||||
}
|
||||
entries = append(entries, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
})
|
||||
}
|
||||
|
||||
return appmessage.NewGetMempoolEntriesResponseMessage(entries), nil
|
||||
|
||||
122
app/rpc/rpchandlers/get_mempool_entries_by_addresses.go
Normal file
122
app/rpc/rpchandlers/get_mempool_entries_by_addresses.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntriesByAddresses handles the respectively named RPC command
|
||||
func HandleGetMempoolEntriesByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
getMempoolEntriesByAddressesRequest := request.(*appmessage.GetMempoolEntriesByAddressesRequestMessage)
|
||||
|
||||
mempoolEntriesByAddresses := make([]*appmessage.MempoolEntryByAddress, 0)
|
||||
|
||||
sendingInTransactionPool, receivingInTransactionPool, sendingInOrphanPool, receivingInOrphanPool, err := context.Domain.MiningManager().GetTransactionsByAddresses(!getMempoolEntriesByAddressesRequest.FilterTransactionPool, getMempoolEntriesByAddressesRequest.IncludeOrphanPool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, addressString := range getMempoolEntriesByAddressesRequest.Addresses {
|
||||
|
||||
address, err := util.DecodeAddress(addressString, context.Config.NetParams().Prefix)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not decode address '%s': %s", addressString, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
sending := make([]*appmessage.MempoolEntry, 0)
|
||||
receiving := make([]*appmessage.MempoolEntry, 0)
|
||||
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not extract scriptPublicKey from address '%s': %s", addressString, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
if !getMempoolEntriesByAddressesRequest.FilterTransactionPool {
|
||||
|
||||
if transaction, found := sendingInTransactionPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sending = append(sending, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if transaction, found := receivingInTransactionPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
receiving = append(receiving, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
if getMempoolEntriesByAddressesRequest.IncludeOrphanPool {
|
||||
|
||||
if transaction, found := sendingInOrphanPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sending = append(sending, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: true,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if transaction, found := receivingInOrphanPool[scriptPublicKey.String()]; found {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
receiving = append(receiving, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
IsOrphan: true,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(sending) > 0 || len(receiving) > 0 {
|
||||
mempoolEntriesByAddresses = append(
|
||||
mempoolEntriesByAddresses,
|
||||
&appmessage.MempoolEntryByAddress{
|
||||
Address: address.String(),
|
||||
Sending: sending,
|
||||
Receiving: receiving,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return appmessage.NewGetMempoolEntriesByAddressesResponseMessage(mempoolEntriesByAddresses), nil
|
||||
}
|
||||
@@ -3,12 +3,18 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntry handles the respectively named RPC command
|
||||
func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
transaction := &externalapi.DomainTransaction{}
|
||||
var found bool
|
||||
var isOrphan bool
|
||||
|
||||
getMempoolEntryRequest := request.(*appmessage.GetMempoolEntryRequestMessage)
|
||||
|
||||
transactionID, err := transactionid.FromString(getMempoolEntryRequest.TxID)
|
||||
@@ -18,17 +24,18 @@ func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, reques
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
transaction, ok := context.Domain.MiningManager().GetTransaction(transactionID)
|
||||
if !ok {
|
||||
mempoolTransaction, isOrphan, found := context.Domain.MiningManager().GetTransaction(transactionID, !getMempoolEntryRequest.FilterTransactionPool, getMempoolEntryRequest.IncludeOrphanPool)
|
||||
|
||||
if !found {
|
||||
errorMessage := &appmessage.GetMempoolEntryResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Transaction %s was not found", transactionID)
|
||||
return errorMessage, nil
|
||||
}
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(mempoolTransaction)
|
||||
err = context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction), nil
|
||||
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction, isOrphan), nil
|
||||
}
|
||||
|
||||
@@ -26,12 +26,14 @@ func HandleGetVirtualSelectedParentChainFromBlock(context *rpccontext.Context, _
|
||||
return response, nil
|
||||
}
|
||||
|
||||
chainChangedNotification, err := context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(virtualSelectedParentChain)
|
||||
chainChangedNotification, err := context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
|
||||
virtualSelectedParentChain, getVirtualSelectedParentChainFromBlockRequest.IncludeAcceptedTransactionIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := appmessage.NewGetVirtualSelectedParentChainFromBlockResponseMessage(
|
||||
chainChangedNotification.RemovedChainBlockHashes, chainChangedNotification.AddedChainBlockHashes)
|
||||
chainChangedNotification.RemovedChainBlockHashes, chainChangedNotification.AddedChainBlockHashes,
|
||||
chainChangedNotification.AcceptedTransactionIDs)
|
||||
return response, nil
|
||||
}
|
||||
|
||||
19
app/rpc/rpchandlers/notify_new_block_template.go
Normal file
19
app/rpc/rpchandlers/notify_new_block_template.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleNotifyNewBlockTemplate handles the respectively named RPC command
|
||||
func HandleNotifyNewBlockTemplate(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateNewBlockTemplateNotifications()
|
||||
|
||||
response := appmessage.NewNotifyNewBlockTemplateResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -26,7 +26,7 @@ func HandleNotifyUTXOsChanged(context *rpccontext.Context, router *router.Router
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateUTXOsChangedNotifications(addresses)
|
||||
context.NotificationManager.PropagateUTXOsChangedNotifications(listener, addresses)
|
||||
|
||||
response := appmessage.NewNotifyUTXOsChangedResponseMessage()
|
||||
return response, nil
|
||||
|
||||
@@ -7,12 +7,17 @@ import (
|
||||
)
|
||||
|
||||
// HandleNotifyVirtualSelectedParentChainChanged handles the respectively named RPC command
|
||||
func HandleNotifyVirtualSelectedParentChainChanged(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
func HandleNotifyVirtualSelectedParentChainChanged(context *rpccontext.Context, router *router.Router,
|
||||
request appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
notifyVirtualSelectedParentChainChangedRequest := request.(*appmessage.NotifyVirtualSelectedParentChainChangedRequestMessage)
|
||||
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateVirtualSelectedParentChainChangedNotifications()
|
||||
listener.PropagateVirtualSelectedParentChainChangedNotifications(
|
||||
notifyVirtualSelectedParentChainChangedRequest.IncludeAcceptedTransactionIDs)
|
||||
|
||||
response := appmessage.NewNotifyVirtualSelectedParentChainChangedResponseMessage()
|
||||
return response, nil
|
||||
|
||||
@@ -8,6 +8,14 @@ import (
|
||||
|
||||
// HandleResolveFinalityConflict handles the respectively named RPC command
|
||||
func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("ResolveFinalityConflict RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := &appmessage.ResolveFinalityConflictResponseMessage{}
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("ResolveFinalityConflict RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
response := &appmessage.ResolveFinalityConflictResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
|
||||
@@ -12,6 +12,14 @@ const pauseBeforeShutDown = time.Second
|
||||
|
||||
// HandleShutDown handles the respectively named RPC command
|
||||
func HandleShutDown(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("ShutDown RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := appmessage.NewShutDownResponseMessage()
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("ShutDown RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
log.Warn("ShutDown RPC called.")
|
||||
|
||||
// Wait a second before shutting down, to allow time to return the response to the caller
|
||||
|
||||
@@ -26,7 +26,7 @@ func HandleStopNotifyingUTXOsChanged(context *rpccontext.Context, router *router
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.StopPropagatingUTXOsChangedNotifications(addresses)
|
||||
context.NotificationManager.StopPropagatingUTXOsChangedNotifications(listener, addresses)
|
||||
|
||||
response := appmessage.NewStopNotifyingUTXOsChangedResponseMessage()
|
||||
return response, nil
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
@@ -14,9 +15,14 @@ import (
|
||||
func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage)
|
||||
|
||||
isSynced, err := context.ProtocolManager.ShouldMine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var err error
|
||||
isSynced := false
|
||||
// The node is considered synced if it has peers and consensus state is nearly synced
|
||||
if context.ProtocolManager.Context().HasPeers() {
|
||||
isSynced, err = context.ProtocolManager.Context().IsNearlySynced()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !context.Config.AllowSubmitBlockWhenNotSynced && !isSynced {
|
||||
@@ -58,6 +64,12 @@ func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request ap
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jsonBytes, _ := json.MarshalIndent(submitBlockRequest.Block.Header, "", " ")
|
||||
if jsonBytes != nil {
|
||||
log.Warnf("The RPC submitted block triggered a rule/protocol error (%s), printing "+
|
||||
"the full header for debug purposes: \n%s", err, string(jsonBytes))
|
||||
}
|
||||
|
||||
return &appmessage.SubmitBlockResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Block rejected. Reason: %s", err),
|
||||
RejectReason: appmessage.RejectReasonBlockInvalid,
|
||||
|
||||
@@ -9,6 +9,14 @@ import (
|
||||
|
||||
// HandleUnban handles the respectively named RPC command
|
||||
func HandleUnban(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if context.Config.SafeRPC {
|
||||
log.Warn("Unban RPC command called while node in safe RPC mode -- ignoring.")
|
||||
response := appmessage.NewUnbanResponseMessage()
|
||||
response.Error =
|
||||
appmessage.RPCErrorf("Unban RPC command called while node in safe RPC mode")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
unbanRequest := request.(*appmessage.UnbanRequestMessage)
|
||||
ip := net.ParseIP(unbanRequest.IP)
|
||||
if ip == nil {
|
||||
|
||||
@@ -5,9 +5,8 @@ FLAGS=$@
|
||||
go version
|
||||
|
||||
go get $FLAGS -t -d ./...
|
||||
# This is to bypass a go bug: https://github.com/golang/go/issues/27643
|
||||
GO111MODULE=off go get $FLAGS golang.org/x/lint/golint \
|
||||
honnef.co/go/tools/cmd/staticcheck
|
||||
GO111MODULE=off go get $FLAGS golang.org/x/lint/golint
|
||||
go install $FLAGS honnef.co/go/tools/cmd/staticcheck@latest
|
||||
|
||||
test -z "$(go fmt ./...)"
|
||||
|
||||
|
||||
199
changelog.txt
199
changelog.txt
@@ -1,3 +1,202 @@
|
||||
Kaspad v0.12.11 - 2022-12-1
|
||||
===========================
|
||||
|
||||
* Fix IBD sync conditions (#2174)
|
||||
|
||||
Kaspad v0.12.10 - 2022-11-23
|
||||
===========================
|
||||
|
||||
* Increase devnet's initial difficulty (#2167)
|
||||
|
||||
Bug fixes:
|
||||
* Check rule errors when validating blocks with trusted data (#2171)
|
||||
* Compare blue score with selected tip when checking if a pruning point proof is needed (#2169)
|
||||
* Add found to GetBlock (#2165)
|
||||
|
||||
Wallet new features:
|
||||
* Use one of the From addresses as a change address (#2164)
|
||||
|
||||
Kaspad v0.12.9 - 2022-10-23
|
||||
===========================
|
||||
|
||||
* Create directory before locking lock file (#2160)
|
||||
|
||||
Kaspad v0.12.8 - 2022-10-23
|
||||
===========================
|
||||
|
||||
* Remove hard fork activation rules (#2152)
|
||||
* Add lock file to kaspawallet (#2154)
|
||||
* Add a new testnet DNS seeder (#2156)
|
||||
* Use utxo diff algo for pruning point move and use acceptance data method only as a fall-back (#2157)
|
||||
* Make more checks if status is invalid even if the block exists (#2158)
|
||||
|
||||
|
||||
Kaspad v0.12.7 - 2022-09-21
|
||||
===========================
|
||||
|
||||
* Security Fix + Hard fork - Full details can be seen here: https://medium.com/@michaelsuttonil/kaspa-security-patch-and-hard-fork-september-2022-12da617b0094
|
||||
|
||||
Kaspad v0.12.6 - 2022-09-09
|
||||
===========================
|
||||
|
||||
* Remove tests from docker files (#2133)
|
||||
|
||||
Wallet new features:
|
||||
* Optionally show serialized transactions on send (#2135)
|
||||
|
||||
Bug fixes:
|
||||
* Update virtual on IBD if nearly synced (#2134)
|
||||
|
||||
Kaspad v0.12.5 - 2022-08-28
|
||||
===========================
|
||||
|
||||
* Add tests for hash writers (#2120)
|
||||
* Replace daglabs's dnsseeder with Wolfie's (#2119)
|
||||
* Change testnet dnsseeder (#2126)
|
||||
* Add RPC timeout parameter to wallet daemon (#2104)
|
||||
|
||||
Wallet new features:
|
||||
* Add UseExistingChangeAddress option to the wallet (#2127)
|
||||
|
||||
Bug fixes:
|
||||
* Call update pruning point if required on resolve virtual or startup (#2129)
|
||||
* Add missing locks to notification listener modifications (#2124)
|
||||
* Calculate pruning point utxo set from acceptance data (#2123)
|
||||
* Fix RPC client memory/goroutine leak (#2122)
|
||||
* Fix a subtle lock sync issue in consensus insert block (#2121)
|
||||
* Mempool: Retrieve stable state of the mempool. Optimze get mempool entries by addresses (#2111)
|
||||
* Kaspawallet.send(): Make separate context for Broadcast, to prolong timeout (#2131)
|
||||
|
||||
|
||||
|
||||
Kaspad v0.12.4 - 2022-07-17
|
||||
===========================
|
||||
|
||||
* Crucial fix for the UTXO difference mechanism (#2114)
|
||||
* Implement multi-layer auto-compound (#2115)
|
||||
|
||||
Kaspad v0.12.3 - 2022-06-29
|
||||
===========================
|
||||
|
||||
* Fixes a few bugs which can lead to node crashes or out-of-memory errors
|
||||
|
||||
Kaspad v0.12.2 - 2022-06-17
|
||||
===========================
|
||||
|
||||
* Clarify wallet message concerning a wallet daemon sync state (#2045)
|
||||
* Change the way the miner executable reports execution errors (closes issue #1677) (#2048)
|
||||
* Fix kaspawallet help messages, clarify sweep command help string (#2067)
|
||||
* Wallet parse/send/create commands improvement (#2024)
|
||||
* Use chunks for `GetBlocksAcceptanceData` calls in order to avoid blocking consensus for too long (#2075)
|
||||
* Unite multiple `GetBlockAcceptanceData` consensus calls to one (#2074)
|
||||
* Update many-small-chains-and-one-big-chain DAG to not fail merge depth limit (#2072)
|
||||
|
||||
RPC API Changes:
|
||||
* RPC: include orphans into mempool entries (#2046)
|
||||
* RPC & UtxoIndex: keep track of, query and test circulating supply. (#2070)
|
||||
|
||||
Bug Fixes:
|
||||
* Fix RPC connections counting (#2026)
|
||||
* Fix UTXO diff child error (#2084)
|
||||
* Fix `not in selected chain` crash (#2082)
|
||||
|
||||
Kaspad v0.12.1 - 2022-05-31
|
||||
===========================
|
||||
|
||||
* Fix utxoindex synchronization bug which resulted in kaspawallet orphan tx errors (#2052, #2056, #2059)
|
||||
* Add a channel mechanism for consensus events to be processed in the order they were produced (#2052, #2056, #2059)
|
||||
* Block template cache improvement (#2023)
|
||||
* Improved staging shard performance (#2034)
|
||||
* Add finality check to ResolveVirtual (#2041)
|
||||
* Update Dockerfile for go 1.18 (#2038)
|
||||
* Remove HF1 activation code (#2042)
|
||||
|
||||
Kaspa wallet:
|
||||
* Various kaspawallet text fixes and log additions (#2032, #2047, #2062)
|
||||
* Wallet address synchronization improvement (#2025)
|
||||
* Add support for `from` address in `kaspawallet send` (#1964)
|
||||
* Make kaspawallet ignore outputs that exist in the mempool (#2053)
|
||||
* Wrap the entire wallet send operation with a lock (#2063)
|
||||
|
||||
RPC API:
|
||||
* Add "GetMempoolEntriesByAddresses" to kaspad RPC (#2022)
|
||||
* Make sure RPCErrors are returned and do not crash the system (#2039)
|
||||
* Add AcceptedTransactionIDs to ChainChanged notification and VirtualSelectedParentChain RPC (#2036, for exchanges to track tx confirmations)
|
||||
* Allow blank address in NotifyUTXOsChanged to get all updates (#2027)
|
||||
* Include isSynced and isUtxoIndexed in GetInfoResponse (#2068)
|
||||
|
||||
Kaspad v0.12.0 - 2022-04-14
|
||||
===========================
|
||||
Breaking changes:
|
||||
Hard-fork at DAA score 14687583 (estimated to be on 28/04 16:38 UTC) which includes:
|
||||
* Using separate depth than finality depth for merge set calculations (#2013)
|
||||
* Not counting the header size as part of the block mass (#2013)
|
||||
* Increasing block version to 1 (#2013)
|
||||
* Removing the limit on amount of KAS that can be sent in one transaction (#2013)
|
||||
|
||||
Bug fixes:
|
||||
* Making a workaround for the UTXO diff child bug (#2020)
|
||||
* Use cosigner index 0 for read only wallets (#2014)
|
||||
|
||||
Non-breaking changes:
|
||||
* Adding a "sweep" command to `kaspawallet` (#2018)
|
||||
* Use `blue work` heuristic to skip irrelevant relay blocks
|
||||
* Kaspawallet daemon: Add Send and Sign commands (#2016)
|
||||
|
||||
Kaspad v0.11.17 - 2022-04-06
|
||||
===========================
|
||||
* Decrement estimatedHeaderUpperBound from mempool's MaxBlockMass (#2009)
|
||||
|
||||
Kaspad v0.11.16 - 2022-04-05
|
||||
===========================
|
||||
* Don't skip wallet address with different cosigner index (#2007)
|
||||
|
||||
Kaspad v0.11.15 - 2022-04-05
|
||||
===========================
|
||||
* Add support for auto-compound in `kaspawallet send` (#1951)
|
||||
* Unite reachability stores (#1963, #1993, #2001)
|
||||
* Add names to nameless routes (#1986)
|
||||
* Optimize the miner-kaspad flow and latency (#1988)
|
||||
* Upgrade to go 1.18 (#1992)
|
||||
* Add package name to kaspawalletd .proto file (#1991)
|
||||
* Block template cache (#1994)
|
||||
* Add extra data to GetBlockTemplate request (#1995, #1997)
|
||||
* New definition for "out of sync" (#1996)
|
||||
* Remove v4 p2p version (#1998)
|
||||
* Remove increase pagefile from deploy.yaml (#2000)
|
||||
* Cache the pruning point anticone (#2002)
|
||||
* Add DB compaction after the deletion of a DB prefix (#2003)
|
||||
* Fixed a bug in staging of pruning point by index (#2005)
|
||||
* Clean up debug log level by moving many frequent logs to trace level (#2004)
|
||||
|
||||
Kaspad v0.11.14 - 2022-03-20
|
||||
===========================
|
||||
* Fix a bug in the new p2p v5 IBD chain negotiation (#1981)
|
||||
|
||||
Kaspad v0.11.13 - 2022-03-16
|
||||
===========================
|
||||
* Display progress of IBD process in Kaspad logs (#1938, #1939, #1949, #1977)
|
||||
* Optimize DB writes during fresh IBD (#1937)
|
||||
* Add AllowConnectionToDifferentVersions flag to kaspactl (#1940)
|
||||
* Drop support for p2p v3 (#1942)
|
||||
* Various transaction processing fixes and workarounds (#1943, #1946, #1971, #1974)
|
||||
* Make kaspawallet store the utxos sorted by amount (#1947)
|
||||
* Implement a `parse` sub command in the kaspawallet (#1953)
|
||||
* Set MaxBlockLevels for non-mainnet networks to 250 (#1952)
|
||||
* Add cache to DAA block window (#1948)
|
||||
* kaspactl: string slice parser for GetUtxosByAddresses (#1955, first contribution by @icook)
|
||||
* Add MergeSet and IsChainBlock to RPC (#1961)
|
||||
* Ignore transaction invs during IBD (#1960)
|
||||
* Optimize validation of expected header pruning point (#1962)
|
||||
* Fix a bug in bounded marge depth validation (#1966)
|
||||
* Don't relay blocks in virtual anticone (#1970)
|
||||
* Add version to block template to allow tracking of miner's kaspad version (#1967)
|
||||
* New p2p version: v5 (#1969)
|
||||
* Fix IBD shared past negotiation to be non quadratic also in the worst-case (#1969, p2p v5)
|
||||
* Send pruning point anticone in batches (#1973, p2p v5)
|
||||
* Cleanup log output mistakes and try to be more clear to the user (#1976, #1978)
|
||||
* Apply avoiding IBD logic from patch10 to p2p v4 IBD handling (#1979)
|
||||
|
||||
Kaspad v0.11.11 - 2022-01-27
|
||||
===========================
|
||||
* Fix for rare consensus bug regarding DAA window order. The bug only affected IBD from scratch and only today (#1934)
|
||||
|
||||
@@ -4,7 +4,7 @@ kaspactl is an RPC client for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.18 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -31,10 +31,13 @@ var commandTypes = []reflect.Type{
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetMempoolEntryRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetMempoolEntriesRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetMempoolEntriesByAddressesRequest{}),
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_SubmitTransactionRequest{}),
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetUtxosByAddressesRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetBalanceByAddressRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_GetCoinSupplyRequest{}),
|
||||
|
||||
reflect.TypeOf(protowire.KaspadMessage_BanRequest{}),
|
||||
reflect.TypeOf(protowire.KaspadMessage_UnbanRequest{}),
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.16-alpine AS build
|
||||
FROM golang:1.18-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
WORKDIR /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
RUN apk add --no-cache curl git openssh binutils gcc musl-dev
|
||||
RUN go get -u golang.org/x/lint/golint \
|
||||
honnef.co/go/tools/cmd/staticcheck
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
@@ -18,10 +16,6 @@ COPY . .
|
||||
|
||||
WORKDIR /go/src/github.com/kaspanet/kaspad/cmd/kaspactl
|
||||
|
||||
RUN GOFMT_RESULT=`go fmt ./...`; echo $GOFMT_RESULT; test -z "$GOFMT_RESULT"
|
||||
RUN go vet ./...
|
||||
RUN golint -set_exit_status ./...
|
||||
RUN staticcheck -checks SA4006 ./...
|
||||
RUN GOOS=linux go build -a -installsuffix cgo -o kaspactl .
|
||||
|
||||
# --- multistage docker build: stage #2: runtime image
|
||||
|
||||
@@ -4,7 +4,7 @@ Kaspaminer is a CPU-based miner for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.18 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@ const minerTimeout = 10 * time.Second
|
||||
type minerClient struct {
|
||||
*rpcclient.RPCClient
|
||||
|
||||
cfg *configFlags
|
||||
blockAddedNotificationChan chan struct{}
|
||||
cfg *configFlags
|
||||
newBlockTemplateNotificationChan chan struct{}
|
||||
}
|
||||
|
||||
func (mc *minerClient) connect() error {
|
||||
@@ -30,14 +30,14 @@ func (mc *minerClient) connect() error {
|
||||
mc.SetTimeout(minerTimeout)
|
||||
mc.SetLogger(backendLog, logger.LevelTrace)
|
||||
|
||||
err = mc.RegisterForBlockAddedNotifications(func(_ *appmessage.BlockAddedNotificationMessage) {
|
||||
err = mc.RegisterForNewBlockTemplateNotifications(func(_ *appmessage.NewBlockTemplateNotificationMessage) {
|
||||
select {
|
||||
case mc.blockAddedNotificationChan <- struct{}{}:
|
||||
case mc.newBlockTemplateNotificationChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error requesting block-added notifications")
|
||||
return errors.Wrapf(err, "error requesting new-block-template notifications")
|
||||
}
|
||||
|
||||
log.Infof("Connected to %s", rpcAddress)
|
||||
@@ -47,8 +47,8 @@ func (mc *minerClient) connect() error {
|
||||
|
||||
func newMinerClient(cfg *configFlags) (*minerClient, error) {
|
||||
minerClient := &minerClient{
|
||||
cfg: cfg,
|
||||
blockAddedNotificationChan: make(chan struct{}),
|
||||
cfg: cfg,
|
||||
newBlockTemplateNotificationChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
err := minerClient.connect()
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.16-alpine AS build
|
||||
FROM golang:1.18-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
WORKDIR /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
RUN apk add --no-cache curl git openssh binutils gcc musl-dev
|
||||
RUN go get -u golang.org/x/lint/golint \
|
||||
honnef.co/go/tools/cmd/staticcheck
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
@@ -17,11 +15,6 @@ RUN go mod download
|
||||
COPY . .
|
||||
|
||||
WORKDIR /go/src/github.com/kaspanet/kaspad/cmd/kaspaminer
|
||||
|
||||
RUN GOFMT_RESULT=`go fmt ./...`; echo $GOFMT_RESULT; test -z "$GOFMT_RESULT"
|
||||
RUN go vet ./...
|
||||
RUN golint -set_exit_status ./...
|
||||
RUN staticcheck -checks SA4006 ./...
|
||||
RUN GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
|
||||
|
||||
# --- multistage docker build: stage #2: runtime image
|
||||
|
||||
@@ -23,8 +23,7 @@ func main() {
|
||||
|
||||
cfg, err := parseConfig()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing command-line arguments: %s\n", err)
|
||||
os.Exit(1)
|
||||
printErrorAndExit(errors.Errorf("Error parsing command-line arguments: %s", err))
|
||||
}
|
||||
defer backendLog.Close()
|
||||
|
||||
@@ -44,7 +43,7 @@ func main() {
|
||||
|
||||
miningAddr, err := util.DecodeAddress(cfg.MiningAddr, cfg.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error decoding mining address"))
|
||||
printErrorAndExit(errors.Errorf("Error decoding mining address: %s", err))
|
||||
}
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
@@ -61,3 +60,8 @@ func main() {
|
||||
case <-interrupt:
|
||||
}
|
||||
}
|
||||
|
||||
func printErrorAndExit(err error) {
|
||||
fmt.Fprintf(os.Stderr, "%+v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
nativeerrors "errors"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -187,7 +188,7 @@ func getBlockForMining(mineWhenNotSynced bool) (*externalapi.DomainBlock, *pow.S
|
||||
|
||||
func templatesLoop(client *minerClient, miningAddr util.Address, errChan chan error) {
|
||||
getBlockTemplate := func() {
|
||||
template, err := client.GetBlockTemplate(miningAddr.String())
|
||||
template, err := client.GetBlockTemplate(miningAddr.String(), "kaspaminer-"+version.Version())
|
||||
if nativeerrors.Is(err, router.ErrTimeout) {
|
||||
log.Warnf("Got timeout while requesting block template from %s: %s", client.Address(), err)
|
||||
reconnectErr := client.Reconnect()
|
||||
@@ -217,7 +218,7 @@ func templatesLoop(client *minerClient, miningAddr util.Address, errChan chan er
|
||||
ticker := time.NewTicker(tickerTime)
|
||||
for {
|
||||
select {
|
||||
case <-client.blockAddedNotificationChan:
|
||||
case <-client.newBlockTemplateNotificationChan:
|
||||
getBlockTemplate()
|
||||
ticker.Reset(tickerTime)
|
||||
case <-ticker.C:
|
||||
|
||||
@@ -3,19 +3,12 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/client"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/utils"
|
||||
)
|
||||
|
||||
func formatKas(amount uint64) string {
|
||||
res := " "
|
||||
if amount > 0 {
|
||||
res = fmt.Sprintf("%19.8f", float64(amount)/constants.SompiPerKaspa)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func balance(conf *balanceConfig) error {
|
||||
daemonClient, tearDown, err := client.Connect(conf.DaemonAddress)
|
||||
if err != nil {
|
||||
@@ -39,12 +32,12 @@ func balance(conf *balanceConfig) error {
|
||||
println("Address Available Pending")
|
||||
println("-----------------------------------------------------------------------------------------------------------")
|
||||
for _, addressBalance := range response.AddressBalances {
|
||||
fmt.Printf("%s %s %s\n", addressBalance.Address, formatKas(addressBalance.Available), formatKas(addressBalance.Pending))
|
||||
fmt.Printf("%s %s %s\n", addressBalance.Address, utils.FormatKas(addressBalance.Available), utils.FormatKas(addressBalance.Pending))
|
||||
}
|
||||
println("-----------------------------------------------------------------------------------------------------------")
|
||||
print(" ")
|
||||
}
|
||||
fmt.Printf("Total balance, KAS %s %s%s\n", formatKas(response.Available), formatKas(response.Pending), pendingSuffix)
|
||||
fmt.Printf("Total balance, KAS %s %s%s\n", utils.FormatKas(response.Available), utils.FormatKas(response.Pending), pendingSuffix)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user