mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-22 03:32:55 +00:00
Compare commits
35 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca32eb6bbf | ||
|
|
061e65be93 | ||
|
|
9df231f810 | ||
|
|
09cebe6960 | ||
|
|
7c327683d3 | ||
|
|
c903a65def | ||
|
|
685c049a12 | ||
|
|
9b45e803d0 | ||
|
|
cb5e9b55b7 | ||
|
|
190e725dd0 | ||
|
|
20f16cf729 | ||
|
|
4d3f504b73 | ||
|
|
b5eda33488 | ||
|
|
ef1a3c0dce | ||
|
|
1cedc720ac | ||
|
|
6449b03034 | ||
|
|
9f02a24e8b | ||
|
|
9b23bbcdb5 | ||
|
|
b30f7309a2 | ||
|
|
1c18a49992 | ||
|
|
28d0f1ea2e | ||
|
|
3f7e482291 | ||
|
|
ce4f5fcc33 | ||
|
|
be3a6604d7 | ||
|
|
f452531df0 | ||
|
|
13a09da848 | ||
|
|
f58aeb4f9f | ||
|
|
82f0a4d74f | ||
|
|
69d90fe827 | ||
|
|
c85b5d70fd | ||
|
|
1cd712a63e | ||
|
|
27ba9d0374 | ||
|
|
b1229f7908 | ||
|
|
4a560f25a6 | ||
|
|
dab1a881fe |
@@ -38,6 +38,10 @@ type RPCError struct {
|
||||
Message string
|
||||
}
|
||||
|
||||
func (err RPCError) Error() string {
|
||||
return err.Message
|
||||
}
|
||||
|
||||
// RPCErrorf formats according to a format specifier and returns the string
|
||||
// as an RPCError.
|
||||
func RPCErrorf(format string, args ...interface{}) *RPCError {
|
||||
|
||||
@@ -150,6 +150,8 @@ const (
|
||||
CmdNotifyVirtualDaaScoreChangedRequestMessage
|
||||
CmdNotifyVirtualDaaScoreChangedResponseMessage
|
||||
CmdVirtualDaaScoreChangedNotificationMessage
|
||||
CmdGetBalancesByAddressesRequestMessage
|
||||
CmdGetBalancesByAddressesResponseMessage
|
||||
)
|
||||
|
||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -274,6 +276,8 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdNotifyVirtualDaaScoreChangedRequestMessage: "NotifyVirtualDaaScoreChangedRequest",
|
||||
CmdNotifyVirtualDaaScoreChangedResponseMessage: "NotifyVirtualDaaScoreChangedResponse",
|
||||
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
|
||||
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
|
||||
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
|
||||
}
|
||||
|
||||
// Message is an interface that describes a kaspa message. A type that
|
||||
|
||||
47
app/appmessage/rpc_get_balances_by_addresses.go
Normal file
47
app/appmessage/rpc_get_balances_by_addresses.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package appmessage
|
||||
|
||||
// GetBalancesByAddressesRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetBalancesByAddressesRequestMessage struct {
|
||||
baseMessage
|
||||
Addresses []string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetBalancesByAddressesRequestMessage) Command() MessageCommand {
|
||||
return CmdGetBalancesByAddressesRequestMessage
|
||||
}
|
||||
|
||||
// NewGetBalancesByAddressesRequest returns a instance of the message
|
||||
func NewGetBalancesByAddressesRequest(addresses []string) *GetBalancesByAddressesRequestMessage {
|
||||
return &GetBalancesByAddressesRequestMessage{
|
||||
Addresses: addresses,
|
||||
}
|
||||
}
|
||||
|
||||
// BalancesByAddressesEntry represents the balance of some address
|
||||
type BalancesByAddressesEntry struct {
|
||||
Address string
|
||||
Balance uint64
|
||||
}
|
||||
|
||||
// GetBalancesByAddressesResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetBalancesByAddressesResponseMessage struct {
|
||||
baseMessage
|
||||
Entries []*BalancesByAddressesEntry
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetBalancesByAddressesResponseMessage) Command() MessageCommand {
|
||||
return CmdGetBalancesByAddressesResponseMessage
|
||||
}
|
||||
|
||||
// NewGetBalancesByAddressesResponse returns an instance of the message
|
||||
func NewGetBalancesByAddressesResponse(entries []*BalancesByAddressesEntry) *GetBalancesByAddressesResponseMessage {
|
||||
return &GetBalancesByAddressesResponseMessage{
|
||||
Entries: entries,
|
||||
}
|
||||
}
|
||||
@@ -92,11 +92,14 @@ type RPCBlockLevelParents struct {
|
||||
|
||||
// RPCBlockVerboseData holds verbose data about a block
|
||||
type RPCBlockVerboseData struct {
|
||||
Hash string
|
||||
Difficulty float64
|
||||
SelectedParentHash string
|
||||
TransactionIDs []string
|
||||
IsHeaderOnly bool
|
||||
BlueScore uint64
|
||||
ChildrenHashes []string
|
||||
Hash string
|
||||
Difficulty float64
|
||||
SelectedParentHash string
|
||||
TransactionIDs []string
|
||||
IsHeaderOnly bool
|
||||
BlueScore uint64
|
||||
ChildrenHashes []string
|
||||
MergeSetBluesHashes []string
|
||||
MergeSetRedsHashes []string
|
||||
IsChainBlock bool
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ var (
|
||||
|
||||
// minAcceptableProtocolVersion is the lowest protocol version that a
|
||||
// connected peer may support.
|
||||
minAcceptableProtocolVersion = uint32(3)
|
||||
minAcceptableProtocolVersion = uint32(4)
|
||||
|
||||
maxAcceptableProtocolVersion = uint32(4)
|
||||
)
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.DomainHash, limit uint32) error {
|
||||
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, limit)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
case *appmessage.MsgBlockLocator:
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleIBDBlockLocatorContext is the interface for the context needed for the HandleIBDBlockLocator flow.
|
||||
type HandleIBDBlockLocatorContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandleIBDBlockLocator listens to appmessage.MsgIBDBlockLocator messages and sends
|
||||
// the highest known block that's in the selected parent chain of `targetHash` to the
|
||||
// requesting peer.
|
||||
func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
for {
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ibdBlockLocatorMessage := message.(*appmessage.MsgIBDBlockLocator)
|
||||
|
||||
targetHash := ibdBlockLocatorMessage.TargetHash
|
||||
log.Debugf("Received IBDBlockLocator from %s with targetHash %s", peer, targetHash)
|
||||
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(targetHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
|
||||
"with an unknown targetHash %s", targetHash)
|
||||
}
|
||||
|
||||
foundHighestHashInTheSelectedParentChainOfTargetHash := false
|
||||
for _, blockLocatorHash := range ibdBlockLocatorMessage.BlockLocatorHashes {
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(blockLocatorHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The IBD block locator is checking only existing blocks with bodies.
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
continue
|
||||
}
|
||||
|
||||
isBlockLocatorHashInSelectedParentChainOfHighHash, err :=
|
||||
context.Domain().Consensus().IsInSelectedParentChainOf(blockLocatorHash, targetHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isBlockLocatorHashInSelectedParentChainOfHighHash {
|
||||
continue
|
||||
}
|
||||
|
||||
foundHighestHashInTheSelectedParentChainOfTargetHash = true
|
||||
log.Debugf("Found a known hash %s amongst peer %s's "+
|
||||
"blockLocator that's in the selected parent chain of targetHash %s", blockLocatorHash, peer, targetHash)
|
||||
|
||||
ibdBlockLocatorHighestHashMessage := appmessage.NewMsgIBDBlockLocatorHighestHash(blockLocatorHash)
|
||||
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if !foundHighestHashInTheSelectedParentChainOfTargetHash {
|
||||
log.Warnf("no hash was found in the blockLocator "+
|
||||
"that was in the selected parent chain of targetHash %s", targetHash)
|
||||
|
||||
ibdBlockLocatorHighestHashNotFoundMessage := appmessage.NewMsgIBDBlockLocatorHighestHashNotFound()
|
||||
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashNotFoundMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleIBDBlockRequestsContext is the interface for the context needed for the HandleIBDBlockRequests flow.
|
||||
type HandleIBDBlockRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandleIBDBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
|
||||
// their corresponding blocks to the requesting peer.
|
||||
func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
for {
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgRequestIBDBlocks := message.(*appmessage.MsgRequestIBDBlocks)
|
||||
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
|
||||
for i, hash := range msgRequestIBDBlocks.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
||||
ibdBlockMessage := appmessage.NewMsgIBDBlock(blockMessage)
|
||||
err = outgoingRoute.Enqueue(ibdBlockMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("sent %d out of %d", i+1, len(msgRequestIBDBlocks.Hashes))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
|
||||
type PruningPointAndItsAnticoneRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
var isBusy uint32
|
||||
|
||||
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
|
||||
// the pruning point and its anticone to the requesting peer.
|
||||
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
err := func() error {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
|
||||
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
|
||||
}
|
||||
defer atomic.StoreUint32(&isBusy, 0)
|
||||
|
||||
log.Debugf("Got request for pruning point and its anticone from %s", peer)
|
||||
|
||||
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
|
||||
for i, header := range pruningPointHeaders {
|
||||
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, blockHash := range pointAndItsAnticone {
|
||||
err := sendBlockWithTrustedData(context, outgoingRoute, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sent pruning point and its anticone to %s", peer)
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sendBlockWithTrustedData(context PruningPointAndItsAnticoneRequestsContext, outgoingRoute *router.Route, blockHash *externalapi.DomainHash) error {
|
||||
blockWithTrustedData, err := context.Domain().Consensus().BlockWithTrustedData(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(blockWithTrustedData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// PruningPointProofRequestsContext is the interface for the context needed for the HandlePruningPointProofRequests flow.
|
||||
type PruningPointProofRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandlePruningPointProofRequests listens to appmessage.MsgRequestPruningPointProof messages and sends
|
||||
// the pruning point proof to the requesting peer.
|
||||
func HandlePruningPointProofRequests(context PruningPointProofRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
_, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Got request for pruning point proof from %s", peer)
|
||||
|
||||
pruningPointProof, err := context.Domain().Consensus().BuildPruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pruningPointProofMessage := appmessage.DomainPruningPointProofToMsgPruningPointProof(pruningPointProof)
|
||||
err = outgoingRoute.Enqueue(pruningPointProofMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Sent pruning point proof to %s", peer)
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RelayBlockRequestsContext is the interface for the context needed for the HandleRelayBlockRequests flow.
|
||||
type RelayBlockRequestsContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
// HandleRelayBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
|
||||
// their corresponding blocks to the requesting peer.
|
||||
func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
for {
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
getRelayBlocksMessage := message.(*appmessage.MsgRequestRelayBlocks)
|
||||
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
|
||||
for _, hash := range getRelayBlocksMessage.Hashes {
|
||||
// Fetch the block from the database.
|
||||
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
|
||||
}
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Relayed block with hash %s", hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,382 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// orphanResolutionRange is the maximum amount of blockLocator hashes
|
||||
// to search for known blocks. See isBlockInOrphanResolutionRange for
|
||||
// further details
|
||||
var orphanResolutionRange uint32 = 5
|
||||
|
||||
// RelayInvsContext is the interface for the context needed for the HandleRelayInvs flow.
|
||||
type RelayInvsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
||||
Broadcast(message appmessage.Message) error
|
||||
AddOrphan(orphanBlock *externalapi.DomainBlock)
|
||||
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
|
||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||
IsIBDRunning() bool
|
||||
IsRecoverableError(err error) bool
|
||||
}
|
||||
|
||||
type handleRelayInvsFlow struct {
|
||||
RelayInvsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
invsQueue []*appmessage.MsgInvRelayBlock
|
||||
}
|
||||
|
||||
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
||||
// are missing, adds them to the DAG and propagates them to the rest of the network.
|
||||
func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||
peer *peerpkg.Peer) error {
|
||||
|
||||
flow := &handleRelayInvsFlow{
|
||||
RelayInvsContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
||||
}
|
||||
err := flow.start()
|
||||
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
||||
close(peer.IBDRequestChannel())
|
||||
return err
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) start() error {
|
||||
for {
|
||||
log.Debugf("Waiting for inv")
|
||||
inv, err := flow.readInv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Got relay inv for block %s", inv.Hash)
|
||||
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(inv.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
||||
if blockInfo.BlockStatus == externalapi.StatusInvalid {
|
||||
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
|
||||
inv.Hash)
|
||||
}
|
||||
log.Debugf("Block %s already exists. continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsOrphan(inv.Hash) {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
|
||||
err := flow.AddOrphanRootsToQueue(inv.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Block relay is disabled during IBD
|
||||
if flow.IsIBDRunning() {
|
||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Requesting block %s", inv.Hash)
|
||||
block, exists, err := flow.requestBlock(inv.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
log.Debugf("Aborting requesting block %s because it already exists", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) {
|
||||
log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block))
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
log.Infof("Ignoring pruned block %s", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Infof("Ignoring duplicate block %s", inv.Hash)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(missingParents) > 0 {
|
||||
log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents)
|
||||
err := flow.processOrphan(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Relaying block %s", inv.Hash)
|
||||
err = flow.relayBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||
consensushashing.BlockHash(block))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
"expecting an inv message", msg.Command())
|
||||
}
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||
exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash)
|
||||
if exists {
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
|
||||
// clean from any pending blocks.
|
||||
defer flow.SharedRequestedBlocks().Remove(requestHash)
|
||||
|
||||
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
|
||||
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
msgBlock, err := flow.readMsgBlock()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if !blockHash.Equal(requestHash) {
|
||||
return nil, false, protocolerrors.Errorf(true, "got unrequested block %s", blockHash)
|
||||
}
|
||||
|
||||
return block, false, nil
|
||||
}
|
||||
|
||||
// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive.
|
||||
//
|
||||
// Note: this function assumes msgChan can contain only appmessage.MsgInvRelayBlock and appmessage.MsgBlock messages.
|
||||
func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock, err error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
case *appmessage.MsgBlock:
|
||||
return message, nil
|
||||
default:
|
||||
return nil, errors.Errorf("unexpected message %s", message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
}
|
||||
|
||||
missingParentsError := &ruleerrors.ErrMissingParents{}
|
||||
if errors.As(err, missingParentsError) {
|
||||
return missingParentsError.MissingParentHashes, nil, nil
|
||||
}
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
return nil, virtualChangeSet, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) error {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
|
||||
// Return if the block has been orphaned from elsewhere already
|
||||
if flow.IsOrphan(blockHash) {
|
||||
log.Debugf("Skipping orphan processing for block %s because it is already an orphan", blockHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the block to the orphan set if it's within orphan resolution range
|
||||
isBlockInOrphanResolutionRange, err := flow.isBlockInOrphanResolutionRange(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBlockInOrphanResolutionRange {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", blockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Block %s is within orphan resolution range. "+
|
||||
"Adding it to the orphan set", blockHash)
|
||||
flow.AddOrphan(block)
|
||||
log.Debugf("Requesting block %s missing ancestors", blockHash)
|
||||
return flow.AddOrphanRootsToQueue(blockHash)
|
||||
}
|
||||
|
||||
// Start IBD unless we already are in IBD
|
||||
log.Debugf("Block %s is out of orphan resolution range. "+
|
||||
"Attempting to start IBD against it.", blockHash)
|
||||
|
||||
// Send the block to IBD flow via the IBDRequestChannel.
|
||||
// Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it
|
||||
select {
|
||||
case flow.peer.IBDRequestChannel() <- block:
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool {
|
||||
parents := block.Header.DirectParents()
|
||||
return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash)
|
||||
}
|
||||
|
||||
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
|
||||
// retrieved via the unorphaning mechanism or via IBD. This method sends a
|
||||
// getBlockLocator request to the peer with a limit of orphanResolutionRange.
|
||||
// In the response, if we know none of the hashes, we should retrieve the given
|
||||
// blockHash via IBD. Otherwise, via unorphaning.
|
||||
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
err := flow.sendGetBlockLocator(blockHash, orphanResolutionRange)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
blockLocatorHashes, err := flow.receiveBlockLocator()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, blockLocatorHash := range blockLocatorHashes {
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockLocatorHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.DomainHash) error {
|
||||
orphanRoots, orphanExists, err := flow.GetOrphanRoots(orphan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !orphanExists {
|
||||
log.Infof("Orphan block %s was missing from the orphan pool while requesting for its roots. This "+
|
||||
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
||||
}
|
||||
|
||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
||||
|
||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
||||
for i, root := range orphanRoots {
|
||||
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
||||
invMessages[i] = appmessage.NewMsgInvBlock(root)
|
||||
}
|
||||
|
||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
||||
return nil
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// RequestBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
|
||||
type RequestBlockLocatorContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestBlockLocatorFlow struct {
|
||||
RequestBlockLocatorContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestBlockLocator handles getBlockLocator messages
|
||||
func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestBlockLocatorFlow{
|
||||
RequestBlockLocatorContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) start() error {
|
||||
for {
|
||||
highHash, limit, err := flow.receiveGetBlockLocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received getBlockLocator with highHash: %s, limit: %d", highHash, limit)
|
||||
|
||||
locator, err := flow.Domain().Consensus().CreateBlockLocatorFromPruningPoint(highHash, limit)
|
||||
if err != nil || len(locator) == 0 {
|
||||
if err != nil {
|
||||
log.Debugf("Received error from CreateBlockLocatorFromPruningPoint: %s", err)
|
||||
}
|
||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||
"locator between the pruning point and %s", highHash)
|
||||
}
|
||||
|
||||
err = flow.sendBlockLocator(locator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (highHash *externalapi.DomainHash, limit uint32, err error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
|
||||
|
||||
return msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {
|
||||
msgBlockLocator := appmessage.NewMsgBlockLocator(locator)
|
||||
err := flow.outgoingRoute.Enqueue(msgBlockLocator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
const ibdBatchSize = router.DefaultMaxMessages
|
||||
|
||||
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestHeadersContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestHeadersFlow struct {
|
||||
RequestHeadersContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peer.Peer
|
||||
}
|
||||
|
||||
// HandleRequestHeaders handles RequestHeaders messages
|
||||
func HandleRequestHeaders(context RequestHeadersContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
flow := &handleRequestHeadersFlow{
|
||||
RequestHeadersContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestHeadersFlow) start() error {
|
||||
for {
|
||||
lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
|
||||
for !lowHash.Equal(highHash) {
|
||||
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
||||
|
||||
// GetHashesBetween is a relatively heavy operation so we limit it
|
||||
// in order to avoid locking the consensus for too long
|
||||
// maxBlocks MUST be >= MergeSetSizeLimit + 1
|
||||
const maxBlocks = 1 << 10
|
||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Got %d header hashes above lowHash %s", len(blockHashes), lowHash)
|
||||
|
||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
|
||||
}
|
||||
|
||||
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
|
||||
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := message.(*appmessage.MsgRequestNextHeaders); !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextHeaders, message.Command())
|
||||
}
|
||||
|
||||
// The next lowHash is the last element in blockHashes
|
||||
lowHash = blockHashes[len(blockHashes)-1]
|
||||
}
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func receiveRequestHeaders(incomingRoute *router.Route) (lowHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgRequestIBDBlocks := message.(*appmessage.MsgRequestHeaders)
|
||||
|
||||
return msgRequestIBDBlocks.LowHash, msgRequestIBDBlocks.HighHash, nil
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleRequestPruningPointUTXOSetContext is the interface for the context needed for the HandleRequestPruningPointUTXOSet flow.
|
||||
type HandleRequestPruningPointUTXOSetContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestPruningPointUTXOSetFlow struct {
|
||||
HandleRequestPruningPointUTXOSetContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestPruningPointUTXOSet listens to appmessage.MsgRequestPruningPointUTXOSet messages and sends
|
||||
// the pruning point UTXO set and block body.
|
||||
func HandleRequestPruningPointUTXOSet(context HandleRequestPruningPointUTXOSetContext, incomingRoute,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestPruningPointUTXOSetFlow{
|
||||
HandleRequestPruningPointUTXOSetContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) start() error {
|
||||
for {
|
||||
msgRequestPruningPointUTXOSet, err := flow.waitForRequestPruningPointUTXOSetMessages()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.handleRequestPruningPointUTXOSetMessage(msgRequestPruningPointUTXOSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) handleRequestPruningPointUTXOSetMessage(
|
||||
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetFlow")
|
||||
defer onEnd()
|
||||
|
||||
log.Debugf("Got request for pruning point UTXO set")
|
||||
|
||||
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSet)
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXOSetMessages() (
|
||||
*appmessage.MsgRequestPruningPointUTXOSet, error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
|
||||
if !ok {
|
||||
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
|
||||
}
|
||||
return msgRequestPruningPointUTXOSet, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
|
||||
msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error {
|
||||
|
||||
// Send the UTXO set in `step`-sized chunks
|
||||
const step = 1000
|
||||
var fromOutpoint *externalapi.DomainOutpoint
|
||||
chunksSent := 0
|
||||
for {
|
||||
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash, fromOutpoint, step)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Retrieved %d UTXOs for pruning block %s",
|
||||
len(pruningPointUTXOs), msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
outpointAndUTXOEntryPairs :=
|
||||
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
finished := len(pruningPointUTXOs) < step
|
||||
if finished && chunksSent%ibdBatchSize != 0 {
|
||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
}
|
||||
|
||||
if len(pruningPointUTXOs) > 0 {
|
||||
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
|
||||
}
|
||||
chunksSent++
|
||||
|
||||
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
|
||||
if chunksSent%ibdBatchSize == 0 {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
|
||||
if !ok {
|
||||
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
|
||||
return protocolerrors.Errorf(false, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
|
||||
}
|
||||
|
||||
if finished {
|
||||
log.Debugf("Finished sending UTXOs for pruning block %s",
|
||||
msgRequestPruningPointUTXOSet.PruningPointHash)
|
||||
|
||||
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,577 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// IBDContext is the interface for the context needed for the HandleIBD flow.
|
||||
type IBDContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||
UnsetIBDRunning()
|
||||
IsRecoverableError(err error) bool
|
||||
}
|
||||
|
||||
type handleIBDFlow struct {
|
||||
IBDContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
}
|
||||
|
||||
// HandleIBD handles IBD
|
||||
func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||
peer *peerpkg.Peer) error {
|
||||
|
||||
flow := &handleIBDFlow{
|
||||
IBDContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) start() error {
|
||||
for {
|
||||
// Wait for IBD requests triggered by other flows
|
||||
block, ok := <-flow.peer.IBDRequestChannel()
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
err := flow.runIBDIfNotRunning(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
||||
if !wasIBDNotRunning {
|
||||
log.Debugf("IBD is already running")
|
||||
return nil
|
||||
}
|
||||
|
||||
isFinishedSuccessfully := false
|
||||
defer func() {
|
||||
flow.UnsetIBDRunning()
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
}()
|
||||
|
||||
highHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
log.Debugf("Syncing blocks up to %s", highHash)
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !shouldSync {
|
||||
return nil
|
||||
}
|
||||
|
||||
if shouldDownloadHeadersProof {
|
||||
log.Infof("Starting IBD with headers proof")
|
||||
err := flow.ibdWithHeadersProof(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced {
|
||||
isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", highHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncMissingBlockBodies(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Finished syncing blocks up to %s", highHash)
|
||||
isFinishedSuccessfully = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
||||
successString := "successfully"
|
||||
if !isFinishedSuccessfully {
|
||||
successString = "(interrupted)"
|
||||
}
|
||||
log.Infof("IBD finished %s", successString)
|
||||
}
|
||||
|
||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) findHighestSharedBlockHash(
|
||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
for {
|
||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !highestHashFound {
|
||||
return nil, false, nil
|
||||
}
|
||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if highestHashIndex == 0 ||
|
||||
// If the block locator contains only two adjacent chain blocks, the
|
||||
// syncer will always find the same highest chain block, so to avoid
|
||||
// an endless loop, we explicitly stop the loop in such situation.
|
||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
||||
|
||||
return highestHash, true, nil
|
||||
}
|
||||
|
||||
locatorHashAboveHighestHash := highestHash
|
||||
if highestHashIndex > 0 {
|
||||
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
|
||||
}
|
||||
|
||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
if err != nil {
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
|
||||
"restarting with full block locator")
|
||||
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return blockLocator, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) findHighestHashIndex(
|
||||
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
|
||||
|
||||
highestHashIndex := 0
|
||||
highestHashIndexFound := false
|
||||
for i, blockLocatorHash := range blockLocator {
|
||||
if highestHash.Equal(blockLocatorHash) {
|
||||
highestHashIndex = i
|
||||
highestHashIndexFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !highestHashIndexFound {
|
||||
return 0, protocolerrors.Errorf(true, "highest hash %s "+
|
||||
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
|
||||
}
|
||||
log.Debugf("The index of the highest hash in the original "+
|
||||
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
|
||||
|
||||
return highestHashIndex, nil
|
||||
}
|
||||
|
||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) fetchHighestHash(
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
||||
highestHash := message.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, true, nil
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
||||
return nil, false, nil
|
||||
default:
|
||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash) error {
|
||||
|
||||
log.Infof("Downloading headers from %s", flow.peer)
|
||||
|
||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Keep a short queue of BlockHeadersMessages so that there's
|
||||
// never a moment when the node is not validating and inserting
|
||||
// headers
|
||||
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
|
||||
errChan := make(chan error)
|
||||
spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() {
|
||||
for {
|
||||
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if doneIBD {
|
||||
close(blockHeadersMessageChan)
|
||||
return
|
||||
}
|
||||
|
||||
blockHeadersMessageChan <- blockHeadersMessage
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
// If the highHash has not been received, the peer is misbehaving
|
||||
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !highHashBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"highHash block %s from peer %s during block download", highHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case err := <-errChan:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.BlockHeadersMessage:
|
||||
return message, false, nil
|
||||
case *appmessage.MsgDoneHeaders:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s",
|
||||
appmessage.CmdBlockHeaders,
|
||||
appmessage.CmdDoneHeaders,
|
||||
message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||
block := &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: nil,
|
||||
}
|
||||
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
||||
} else {
|
||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
|
||||
headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds()
|
||||
|
||||
if headerSelectedTipTimestamp < currentSelectedTipTimestamp {
|
||||
return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+
|
||||
"tip is smaller than the current selected tip")
|
||||
}
|
||||
|
||||
minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds()
|
||||
if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds {
|
||||
return protocolerrors.Errorf(false, "difference between the timestamps of "+
|
||||
"the current pruning point and the candidate pruning point is too small. Aborting IBD...")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
||||
consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
|
||||
defer onEnd()
|
||||
|
||||
receivedChunkCount := 0
|
||||
receivedUTXOCount := 0
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgPruningPointUTXOSetChunk:
|
||||
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
|
||||
domainOutpointAndUTXOEntryPairs :=
|
||||
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
|
||||
|
||||
err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
receivedChunkCount++
|
||||
if receivedChunkCount%ibdBatchSize == 0 {
|
||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
receivedChunkCount, receivedUTXOCount)
|
||||
|
||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
case *appmessage.MsgDonePruningPointUTXOSetChunks:
|
||||
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
|
||||
return true, nil
|
||||
|
||||
case *appmessage.MsgUnexpectedPruningPoint:
|
||||
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
|
||||
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
|
||||
return false, nil
|
||||
|
||||
default:
|
||||
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(hashes) == 0 {
|
||||
// Blocks can be inserted inside the DAG during IBD if those were requested before IBD started.
|
||||
// In rare cases, all the IBD blocks might be already inserted by the time we reach this point.
|
||||
// In these cases - GetMissingBlockBodyHashes would return an empty array.
|
||||
log.Debugf("No missing block body hashes found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
||||
} else {
|
||||
hashesToRequest = hashes[offset:]
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, expectedHash := range hashesToRequest {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if !expectedHash.Equal(blockHash) {
|
||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||
}
|
||||
|
||||
err = flow.banIfBlockIsHeaderOnly(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualChangeSet, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
|
||||
continue
|
||||
}
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return flow.resolveVirtual()
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
if len(block.Transactions) == 0 {
|
||||
return protocolerrors.Errorf(true, "sent header of %s block where expected block with body",
|
||||
consensushashing.BlockHash(block))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) resolveVirtual() error {
|
||||
for i := 0; ; i++ {
|
||||
if i%10 == 0 {
|
||||
log.Infof("Resolving virtual. This may take some time...")
|
||||
}
|
||||
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnVirtualChange(virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isCompletelyResolved {
|
||||
log.Infof("Resolved virtual")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,364 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash) error {
|
||||
err := flow.Domain().InitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(highHash)
|
||||
if err != nil {
|
||||
if !flow.IsRecoverableError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
||||
if deleteStagingConsensusErr != nil {
|
||||
return deleteStagingConsensusErr
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.Domain().CommitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.OnPruningPointUTXOSetOverride()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *externalapi.DomainBlock,
|
||||
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
|
||||
|
||||
if !highestSharedBlockFound {
|
||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
if hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore {
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock *externalapi.DomainBlock) (bool, error) {
|
||||
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
headersSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(headersSelectedTip)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if highBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return highBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
||||
log.Infof("Downloading the pruning point proof from %s", flow.peer)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointProof())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pruningPointProofMessage, ok := message.(*appmessage.MsgPruningPointProof)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdPruningPointProof, message.Command())
|
||||
}
|
||||
pruningPointProof := appmessage.MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage)
|
||||
err = flow.Domain().Consensus().ValidatePruningPointProof(pruningPointProof)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, protocolerrors.Wrapf(true, err, "pruning point proof validation failed")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = flow.Domain().StagingConsensus().ApplyPruningPointProof(pruningPointProof)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash) error {
|
||||
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointsAndPruningPointAnticone(proofPruningPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Remove this condition once there's more proper way to check finality violation
|
||||
// in the headers proof.
|
||||
if proofPruningPoint.Equal(flow.Config().NetParams().GenesisHash) {
|
||||
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Headers downloaded from peer %s", flow.peer)
|
||||
|
||||
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !highHashInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
|
||||
}
|
||||
|
||||
err = flow.validatePruningPointFutureHeaderTimestamps()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Syncing the current pruning point UTXO set")
|
||||
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet(flow.Domain().StagingConsensus(), proofPruningPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !syncedPruningPointUTXOSetSuccessfully {
|
||||
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Finished syncing the current pruning point UTXO set")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error {
|
||||
log.Infof("Downloading the past pruning points and the pruning point anticone from %s", flow.peer)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.validateAndInsertPruningPoints(proofPruningPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pruningPointWithMetaData, done, err := flow.receiveBlockWithTrustedData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
return protocolerrors.Errorf(true, "got `done` message before receiving the pruning point")
|
||||
}
|
||||
|
||||
if !pruningPointWithMetaData.Block.Header.BlockHash().Equal(proofPruningPoint) {
|
||||
return protocolerrors.Errorf(true, "first block with trusted data is not the pruning point")
|
||||
}
|
||||
|
||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if done {
|
||||
break
|
||||
}
|
||||
|
||||
err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processBlockWithTrustedData(
|
||||
consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedData) error {
|
||||
|
||||
_, err := consensus.ValidateAndInsertBlockWithTrustedData(appmessage.BlockWithTrustedDataToDomainBlockWithTrustedData(block), false)
|
||||
return err
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedData, bool, error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
switch downCastedMessage := message.(type) {
|
||||
case *appmessage.MsgBlockWithTrustedData:
|
||||
return downCastedMessage, false, nil
|
||||
case *appmessage.MsgDoneBlocksWithTrustedData:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s",
|
||||
(&appmessage.MsgBlockWithTrustedData{}).Command(),
|
||||
(&appmessage.MsgDoneBlocksWithTrustedData{}).Command(),
|
||||
downCastedMessage.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgPruningPoints, ok := message.(*appmessage.MsgPruningPoints)
|
||||
if !ok {
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdPruningPoints, message.Command())
|
||||
}
|
||||
|
||||
return msgPruningPoints, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error {
|
||||
currentPruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if currentPruningPoint.Equal(proofPruningPoint) {
|
||||
return protocolerrors.Errorf(true, "the proposed pruning point is the same as the current pruning point")
|
||||
}
|
||||
|
||||
pruningPoints, err := flow.receivePruningPoints()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := make([]externalapi.BlockHeader, len(pruningPoints.Headers))
|
||||
for i, header := range pruningPoints.Headers {
|
||||
headers[i] = appmessage.BlockHeaderToDomainBlockHeader(header)
|
||||
}
|
||||
|
||||
arePruningPointsViolatingFinality, err := flow.Domain().Consensus().ArePruningPointsViolatingFinality(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if arePruningPointsViolatingFinality {
|
||||
// TODO: Find a better way to deal with finality conflicts.
|
||||
return protocolerrors.Errorf(false, "pruning points are violating finality")
|
||||
}
|
||||
|
||||
lastPruningPoint := consensushashing.HeaderHash(headers[len(headers)-1])
|
||||
if !lastPruningPoint.Equal(proofPruningPoint) {
|
||||
return protocolerrors.Errorf(true, "the proof pruning point is not equal to the last pruning "+
|
||||
"point in the list")
|
||||
}
|
||||
|
||||
err = flow.Domain().StagingConsensus().ImportPruningPoints(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus,
|
||||
pruningPoint *externalapi.DomainHash) (bool, error) {
|
||||
|
||||
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint)
|
||||
isValid, err := flow.Domain().StagingConsensus().IsValidPruningPoint(pruningPoint)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !isValid {
|
||||
return false, protocolerrors.Errorf(true, "invalid pruning point %s", pruningPoint)
|
||||
}
|
||||
|
||||
log.Info("Fetching the pruning point UTXO set")
|
||||
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !isSuccessful {
|
||||
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Info("Fetched the new pruning point UTXO set")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
|
||||
defer func() {
|
||||
err := flow.Domain().StagingConsensus().ClearImportedPruningPointData()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
|
||||
}
|
||||
}()
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSet(pruningPointHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(consensus, pruningPointHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !receivedAll {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err = flow.Domain().StagingConsensus().ValidateAndInsertImportedPruningPoint(pruningPointHash)
|
||||
if err != nil {
|
||||
// TODO: Find a better way to deal with finality conflicts.
|
||||
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
|
||||
return false, nil
|
||||
}
|
||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log = logger.RegisterSubSystem("PROT")
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
@@ -1,35 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// SendVirtualSelectedParentInvContext is the interface for the context needed for the SendVirtualSelectedParentInv flow.
|
||||
type SendVirtualSelectedParentInvContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
// SendVirtualSelectedParentInv sends a peer the selected parent hash of the virtual
|
||||
func SendVirtualSelectedParentInv(context SendVirtualSelectedParentInvContext,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
virtualSelectedParent, err := context.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if virtualSelectedParent.Equal(context.Config().NetParams().GenesisHash) {
|
||||
log.Debugf("Skipping sending the virtual selected parent hash to peer %s because it's the genesis", peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("Sending virtual selected parent hash %s to peer %s", virtualSelectedParent, peer)
|
||||
|
||||
virtualSelectedParentInv := appmessage.NewMsgInvBlock(virtualSelectedParent)
|
||||
return outgoingRoute.Enqueue(virtualSelectedParentInv)
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/transactionrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
type protocolManager interface {
|
||||
RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand,
|
||||
isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
|
||||
messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow
|
||||
Context() *flowcontext.FlowContext
|
||||
}
|
||||
|
||||
// Register is used in order to register all the protocol flows to the given router.
|
||||
func Register(m protocolManager, router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*common.Flow) {
|
||||
flows = registerAddressFlows(m, router, isStopping, errChan)
|
||||
flows = append(flows, registerBlockRelayFlows(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerPingFlows(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerTransactionRelayFlow(m, router, isStopping, errChan)...)
|
||||
flows = append(flows, registerRejectsFlow(m, router, isStopping, errChan)...)
|
||||
|
||||
return flows
|
||||
}
|
||||
|
||||
func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return addressexchange.ReceiveAddresses(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{},
|
||||
isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.SendVirtualSelectedParentInv(m.Context(), outgoingRoute, peer)
|
||||
}),
|
||||
|
||||
m.RegisterFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayInvs(m.Context(), incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBD", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
|
||||
appmessage.CmdBlockHeaders, appmessage.CmdIBDBlockLocatorHighestHash, appmessage.CmdBlockWithTrustedData,
|
||||
appmessage.CmdDoneBlocksWithTrustedData, appmessage.CmdIBDBlockLocatorHighestHashNotFound,
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
|
||||
appmessage.CmdPruningPointProof,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBD(m.Context(), incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayBlockRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestBlockLocator(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestHeaders", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestHeaders(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBDBlockRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBDBlockRequests(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestPruningPointUTXOSet", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSet,
|
||||
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestPruningPointUTXOSet(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleIBDBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBDBlockLocator(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointProofRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointProofRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerPingFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ping.ReceivePings(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ping.SendPings(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerTransactionRelayFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRelayedTransactions(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
m.RegisterFlow("HandleRequestTransactions", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRequestedTransactions(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func registerRejectsFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
m.RegisterFlow("HandleRejects", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return rejects.HandleRejects(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,9 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
"github.com/pkg/errors"
|
||||
"math/big"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -64,6 +66,29 @@ func (flow *handleIBDFlow) start() error {
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
|
||||
highHash := consensushashing.BlockHash(block)
|
||||
|
||||
// Temp code to avoid IBD from lagging nodes publishing their side-chain
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err == nil {
|
||||
virtualSelectedParentHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualSelectedParent)
|
||||
if err == nil {
|
||||
if virtualSelectedParentHeader.DAAScore() > block.Header.DAAScore()+2641 {
|
||||
virtualDifficulty := difficulty.CalcWork(virtualSelectedParentHeader.Bits())
|
||||
var virtualSub, difficultyMul big.Int
|
||||
if difficultyMul.Mul(virtualDifficulty, big.NewInt(180)).
|
||||
Cmp(virtualSub.Sub(virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())) < 0 {
|
||||
log.Criticalf("Avoiding IBD triggered by relay %s because it is coming from " +
|
||||
"a deep (%d DAA score depth) side-chain which has much lower blue work (%d, %d)",
|
||||
highHash,
|
||||
virtualSelectedParentHeader.DAAScore()-block.Header.DAAScore(),
|
||||
virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
|
||||
if !wasIBDNotRunning {
|
||||
log.Debugf("IBD is already running")
|
||||
@@ -76,15 +101,14 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
}()
|
||||
|
||||
highHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
log.Debugf("Syncing blocks up to %s", highHash)
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
log.Criticalf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
log.Criticalf("Syncing blocks up to %s", highHash)
|
||||
log.Criticalf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
log.Criticalf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
|
||||
if err != nil {
|
||||
@@ -97,7 +121,7 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
|
||||
if shouldDownloadHeadersProof {
|
||||
log.Infof("Starting IBD with headers proof")
|
||||
err := flow.ibdWithHeadersProof(highHash)
|
||||
err := flow.ibdWithHeadersProof(highHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -115,7 +139,7 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash)
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -266,7 +290,7 @@ func (flow *handleIBDFlow) fetchHighestHash(
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash) error {
|
||||
highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
|
||||
log.Infof("Downloading headers from %s", flow.peer)
|
||||
|
||||
@@ -275,6 +299,12 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
return err
|
||||
}
|
||||
|
||||
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestSharedBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScore, "block headers")
|
||||
|
||||
// Keep a short queue of BlockHeadersMessages so that there's
|
||||
// never a moment when the node is not validating and inserting
|
||||
// headers
|
||||
@@ -318,11 +348,14 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
return nil
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
_, err := flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1]
|
||||
progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore)
|
||||
case err := <-errChan:
|
||||
return err
|
||||
}
|
||||
@@ -356,7 +389,7 @@ func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeader
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) (bool, error) {
|
||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||
block := &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
@@ -366,27 +399,26 @@ func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlo
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockInfo, err := consensus.GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block, false)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
return false, errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
}
|
||||
|
||||
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
|
||||
} else {
|
||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||
return false, protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {
|
||||
@@ -491,6 +523,17 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
return nil
|
||||
}
|
||||
|
||||
lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks")
|
||||
highestProcessedDAAScore := lowBlockHeader.DAAScore()
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
@@ -539,10 +582,14 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestProcessedDAAScore = block.Header.DAAScore()
|
||||
}
|
||||
|
||||
progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
return flow.resolveVirtual()
|
||||
return flow.resolveVirtual(highestProcessedDAAScore)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error {
|
||||
@@ -554,10 +601,25 @@ func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) resolveVirtual() error {
|
||||
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
|
||||
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if i%10 == 0 {
|
||||
log.Infof("Resolving virtual. This may take some time...")
|
||||
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var percents int
|
||||
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
|
||||
percents = 100
|
||||
} else {
|
||||
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
|
||||
}
|
||||
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
|
||||
}
|
||||
virtualChangeSet, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
|
||||
if err != nil {
|
||||
|
||||
32
app/protocol/flows/v4/blockrelay/ibd_progress_reporter.go
Normal file
32
app/protocol/flows/v4/blockrelay/ibd_progress_reporter.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package blockrelay
|
||||
|
||||
type ibdProgressReporter struct {
|
||||
lowDAAScore uint64
|
||||
highDAAScore uint64
|
||||
objectName string
|
||||
totalDAAScoreDifference uint64
|
||||
lastReportedProgressPercent int
|
||||
processed int
|
||||
}
|
||||
|
||||
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
|
||||
return &ibdProgressReporter{
|
||||
lowDAAScore: lowDAAScore,
|
||||
highDAAScore: highDAAScore,
|
||||
objectName: objectName,
|
||||
totalDAAScoreDifference: highDAAScore - lowDAAScore,
|
||||
lastReportedProgressPercent: 0,
|
||||
processed: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
|
||||
ipr.processed += processedDelta
|
||||
|
||||
relativeDAAScore := highestProcessedDAAScore - ipr.lowDAAScore
|
||||
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
|
||||
if progressPercent > ipr.lastReportedProgressPercent {
|
||||
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
|
||||
ipr.lastReportedProgressPercent = progressPercent
|
||||
}
|
||||
}
|
||||
@@ -9,15 +9,16 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash) error {
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
err := flow.Domain().InitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(highHash)
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(highHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
if !flow.IsRecoverableError(err) {
|
||||
return err
|
||||
@@ -87,7 +88,7 @@ func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.Doma
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(10 * time.Minute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -113,7 +114,7 @@ func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.Doma
|
||||
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash) error {
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -130,7 +131,7 @@ func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalap
|
||||
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash)
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,11 +4,11 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
@@ -39,13 +39,11 @@ func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStoppin
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*common.Flow{
|
||||
// TODO: This code was moved to the upper level to prevent a race condition when connecting to v3 peers. This should be uncommented
|
||||
// and removed from the upper level once v3 is obsolete.
|
||||
//m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
|
||||
// func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
// return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute)
|
||||
// },
|
||||
//),
|
||||
m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/addressexchange"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
@@ -22,6 +22,7 @@ type TransactionsRelayContext interface {
|
||||
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
|
||||
OnTransactionAddedToMempool()
|
||||
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
|
||||
IsIBDRunning() bool
|
||||
}
|
||||
|
||||
type handleRelayedTransactionsFlow struct {
|
||||
@@ -49,6 +50,10 @@ func (flow *handleRelayedTransactionsFlow) start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsIBDRunning() {
|
||||
continue
|
||||
}
|
||||
|
||||
requestedIDs, err := flow.requestInvTransactions(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -3,10 +3,10 @@ package transactionrelay_test
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
@@ -47,6 +47,10 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
|
||||
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
|
||||
// have the requested transactions in the mempool.
|
||||
func TestHandleRelayedTransactionsNotFound(t *testing.T) {
|
||||
@@ -2,10 +2,10 @@ package transactionrelay_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@@ -3,8 +3,6 @@ package protocol
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
|
||||
v3 "github.com/kaspanet/kaspad/app/protocol/flows/v3"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v3/addressexchange"
|
||||
v4 "github.com/kaspanet/kaspad/app/protocol/flows/v4"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -55,14 +53,6 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: This code was moved here to prevent a race condition when connecting to v3 peers. This should be moved to v4.registerAddressFlows
|
||||
// once v3 is obsolete.
|
||||
sendAddressesFlow := m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, &isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return addressexchange.SendAddresses(m.Context(), incomingRoute, router.OutgoingRoute())
|
||||
},
|
||||
)
|
||||
|
||||
peer, err := handshake.HandleHandshake(m.context, netConnection, receiveVersionRoute,
|
||||
sendVersionRoute, router.OutgoingRoute())
|
||||
|
||||
@@ -86,21 +76,16 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
var flows []*common.Flow
|
||||
log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion())
|
||||
switch peer.ProtocolVersion() {
|
||||
case 3:
|
||||
flows = v3.Register(m, router, errChan, &isStopping)
|
||||
case 4:
|
||||
flows = v4.Register(m, router, errChan, &isStopping)
|
||||
default:
|
||||
panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion()))
|
||||
}
|
||||
flows = append(flows, sendAddressesFlow)
|
||||
|
||||
if peer.ProtocolVersion() > 3 {
|
||||
err = ready.HandleReady(receiveReadyRoute, router.OutgoingRoute(), peer)
|
||||
if err != nil {
|
||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||
return
|
||||
}
|
||||
err = ready.HandleReady(receiveReadyRoute, router.OutgoingRoute(), peer)
|
||||
if err != nil {
|
||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||
return
|
||||
}
|
||||
|
||||
removeHandshakeRoutes(router)
|
||||
|
||||
@@ -38,6 +38,7 @@ var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdNotifyUTXOsChangedRequestMessage: rpchandlers.HandleNotifyUTXOsChanged,
|
||||
appmessage.CmdStopNotifyingUTXOsChangedRequestMessage: rpchandlers.HandleStopNotifyingUTXOsChanged,
|
||||
appmessage.CmdGetUTXOsByAddressesRequestMessage: rpchandlers.HandleGetUTXOsByAddresses,
|
||||
appmessage.CmdGetBalancesByAddressesRequestMessage: rpchandlers.HandleGetBalancesByAddresses,
|
||||
appmessage.CmdGetVirtualSelectedParentBlueScoreRequestMessage: rpchandlers.HandleGetVirtualSelectedParentBlueScore,
|
||||
appmessage.CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualSelectedParentBlueScoreChanged,
|
||||
appmessage.CmdBanRequestMessage: rpchandlers.HandleBan,
|
||||
|
||||
@@ -56,21 +56,29 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
|
||||
"invalid block")
|
||||
}
|
||||
|
||||
_, selectedParentHash, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
|
||||
_, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isChainBlock, err := ctx.Domain.Consensus().IsChainBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block.VerboseData = &appmessage.RPCBlockVerboseData{
|
||||
Hash: blockHash.String(),
|
||||
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
|
||||
ChildrenHashes: hashes.ToStrings(childrenHashes),
|
||||
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
|
||||
BlueScore: blockInfo.BlueScore,
|
||||
Hash: blockHash.String(),
|
||||
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
|
||||
ChildrenHashes: hashes.ToStrings(childrenHashes),
|
||||
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
|
||||
BlueScore: blockInfo.BlueScore,
|
||||
MergeSetBluesHashes: hashes.ToStrings(blockInfo.MergeSetBlues),
|
||||
MergeSetRedsHashes: hashes.ToStrings(blockInfo.MergeSetReds),
|
||||
IsChainBlock: isChainBlock,
|
||||
}
|
||||
// selectedParentHash will be nil in the genesis block
|
||||
if selectedParentHash != nil {
|
||||
block.VerboseData.SelectedParentHash = selectedParentHash.String()
|
||||
if blockInfo.SelectedParent != nil {
|
||||
block.VerboseData.SelectedParentHash = blockInfo.SelectedParent.String()
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleGetBalanceByAddress handles the respectively named RPC command
|
||||
@@ -18,30 +19,39 @@ func HandleGetBalanceByAddress(context *rpccontext.Context, _ *router.Router, re
|
||||
|
||||
getBalanceByAddressRequest := request.(*appmessage.GetBalanceByAddressRequestMessage)
|
||||
|
||||
var balance uint64 = 0
|
||||
addressString := getBalanceByAddressRequest.Address
|
||||
|
||||
address, err := util.DecodeAddress(addressString, context.Config.ActiveNetParams.Prefix)
|
||||
balance, err := getBalanceByAddress(context, getBalanceByAddressRequest.Address)
|
||||
if err != nil {
|
||||
rpcError := &appmessage.RPCError{}
|
||||
if !errors.As(err, rpcError) {
|
||||
return nil, err
|
||||
}
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could decode address '%s': %s", addressString, err)
|
||||
errorMessage.Error = rpcError
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
utxoOutpointEntryPairs, err := context.UTXOIndex.UTXOs(scriptPublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, utxoOutpointEntryPair := range utxoOutpointEntryPairs {
|
||||
balance += utxoOutpointEntryPair.Amount()
|
||||
}
|
||||
|
||||
response := appmessage.NewGetBalanceByAddressResponse(balance)
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func getBalanceByAddress(context *rpccontext.Context, addressString string) (uint64, error) {
|
||||
address, err := util.DecodeAddress(addressString, context.Config.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
return 0, appmessage.RPCErrorf("Couldn't decode address '%s': %s", addressString, err)
|
||||
}
|
||||
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
return 0, appmessage.RPCErrorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
|
||||
}
|
||||
utxoOutpointEntryPairs, err := context.UTXOIndex.UTXOs(scriptPublicKey)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
balance := uint64(0)
|
||||
for _, utxoOutpointEntryPair := range utxoOutpointEntryPairs {
|
||||
balance += utxoOutpointEntryPair.Amount()
|
||||
}
|
||||
return balance, nil
|
||||
}
|
||||
|
||||
41
app/rpc/rpchandlers/get_balances_by_addresses.go
Normal file
41
app/rpc/rpchandlers/get_balances_by_addresses.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleGetBalancesByAddresses handles the respectively named RPC command
|
||||
func HandleGetBalancesByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
if !context.Config.UTXOIndex {
|
||||
errorMessage := &appmessage.GetBalancesByAddressesResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
getBalancesByAddressesRequest := request.(*appmessage.GetBalancesByAddressesRequestMessage)
|
||||
|
||||
allEntries := make([]*appmessage.BalancesByAddressesEntry, len(getBalancesByAddressesRequest.Addresses))
|
||||
for i, address := range getBalancesByAddressesRequest.Addresses {
|
||||
balance, err := getBalanceByAddress(context, address)
|
||||
|
||||
if err != nil {
|
||||
rpcError := &appmessage.RPCError{}
|
||||
if !errors.As(err, rpcError) {
|
||||
return nil, err
|
||||
}
|
||||
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
|
||||
errorMessage.Error = rpcError
|
||||
return errorMessage, nil
|
||||
}
|
||||
allEntries[i] = &appmessage.BalancesByAddressesEntry{
|
||||
Address: address,
|
||||
Balance: balance,
|
||||
}
|
||||
}
|
||||
|
||||
response := appmessage.NewGetBalancesByAddressesResponse(allEntries)
|
||||
return response, nil
|
||||
}
|
||||
@@ -1,3 +1,7 @@
|
||||
Kaspad v0.11.11 - 2022-01-27
|
||||
===========================
|
||||
* Fix for rare consensus bug regarding DAA window order. The bug only affected IBD from scratch and only today (#1934)
|
||||
|
||||
Kaspad v0.11.10 - 2022-01-27
|
||||
===========================
|
||||
* Add monitoring of heap and save heap profile if size is over some limit (#1932)
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -149,12 +150,24 @@ func stringToValue(parameterDesc *parameterDescription, valueStr string) (reflec
|
||||
|
||||
value = pointer.Interface()
|
||||
|
||||
case reflect.Slice:
|
||||
sliceType := parameterDesc.typeof.Elem()
|
||||
if sliceType.Kind() != reflect.String {
|
||||
return reflect.Value{},
|
||||
errors.Errorf("Unsupported slice type '%s' for parameter '%s'",
|
||||
sliceType,
|
||||
parameterDesc.name)
|
||||
}
|
||||
if valueStr == "" {
|
||||
value = []string{}
|
||||
} else {
|
||||
value = strings.Split(valueStr, ",")
|
||||
}
|
||||
// Int and uint are not supported because their size is platform-dependant
|
||||
case reflect.Int,
|
||||
reflect.Uint,
|
||||
// Other types are not supported simply because they are not used in any command right now
|
||||
// but support can be added if and when needed
|
||||
reflect.Slice,
|
||||
reflect.Func,
|
||||
reflect.Interface,
|
||||
reflect.Map,
|
||||
|
||||
@@ -12,11 +12,12 @@ var (
|
||||
)
|
||||
|
||||
type configFlags struct {
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
Timeout uint64 `short:"t" long:"timeout" description:"Timeout for the request (in seconds)"`
|
||||
RequestJSON string `short:"j" long:"json" description:"The request in JSON format"`
|
||||
ListCommands bool `short:"l" long:"list-commands" description:"List all commands and exit"`
|
||||
CommandAndParameters []string
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
Timeout uint64 `short:"t" long:"timeout" description:"Timeout for the request (in seconds)"`
|
||||
RequestJSON string `short:"j" long:"json" description:"The request in JSON format"`
|
||||
ListCommands bool `short:"l" long:"list-commands" description:"List all commands and exit"`
|
||||
AllowConnectionToDifferentVersions bool `short:"a" long:"allow-connection-to-different-versions" description:"Allow connections to versions different than kaspactl's version'"`
|
||||
CommandAndParameters []string
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
|
||||
@@ -34,16 +34,18 @@ func main() {
|
||||
}
|
||||
defer client.Disconnect()
|
||||
|
||||
kaspadMessage, err := client.Post(&protowire.KaspadMessage{Payload: &protowire.KaspadMessage_GetInfoRequest{GetInfoRequest: &protowire.GetInfoRequestMessage{}}})
|
||||
if err != nil {
|
||||
printErrorAndExit(fmt.Sprintf("Cannot post GetInfo message: %s", err))
|
||||
}
|
||||
if !cfg.AllowConnectionToDifferentVersions {
|
||||
kaspadMessage, err := client.Post(&protowire.KaspadMessage{Payload: &protowire.KaspadMessage_GetInfoRequest{GetInfoRequest: &protowire.GetInfoRequestMessage{}}})
|
||||
if err != nil {
|
||||
printErrorAndExit(fmt.Sprintf("Cannot post GetInfo message: %s", err))
|
||||
}
|
||||
|
||||
localVersion := version.Version()
|
||||
remoteVersion := kaspadMessage.GetGetInfoResponse().ServerVersion
|
||||
localVersion := version.Version()
|
||||
remoteVersion := kaspadMessage.GetGetInfoResponse().ServerVersion
|
||||
|
||||
if localVersion != remoteVersion {
|
||||
printErrorAndExit(fmt.Sprintf("Server version mismatch, expect: %s, got: %s", localVersion, remoteVersion))
|
||||
if localVersion != remoteVersion {
|
||||
printErrorAndExit(fmt.Sprintf("Server version mismatch, expect: %s, got: %s", localVersion, remoteVersion))
|
||||
}
|
||||
}
|
||||
|
||||
responseChan := make(chan string)
|
||||
|
||||
@@ -15,6 +15,7 @@ const (
|
||||
createUnsignedTransactionSubCmd = "create-unsigned-transaction"
|
||||
signSubCmd = "sign"
|
||||
broadcastSubCmd = "broadcast"
|
||||
parseSubCmd = "parse"
|
||||
showAddressesSubCmd = "show-addresses"
|
||||
newAddressSubCmd = "new-address"
|
||||
dumpUnencryptedDataSubCmd = "dump-unencrypted-data"
|
||||
@@ -79,6 +80,13 @@ type broadcastConfig struct {
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
type parseConfig struct {
|
||||
Transaction string `long:"transaction" short:"t" description:"The transaction to parse (encoded in hex)"`
|
||||
TransactionFile string `long:"transaction-file" short:"F" description:"The file containing the transaction to parse (encoded in hex)"`
|
||||
Verbose bool `long:"verbose" short:"v" description:"Verbose: show transaction inputs"`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
type showAddressesConfig struct {
|
||||
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
|
||||
config.NetworkFlags
|
||||
@@ -133,6 +141,10 @@ func parseCommandLine() (subCommand string, config interface{}) {
|
||||
parser.AddCommand(broadcastSubCmd, "Broadcast the given transaction",
|
||||
"Broadcast the given transaction", broadcastConf)
|
||||
|
||||
parseConf := &parseConfig{}
|
||||
parser.AddCommand(parseSubCmd, "Parse the given transaction and print its contents",
|
||||
"Parse the given transaction and print its contents", parseConf)
|
||||
|
||||
showAddressesConf := &showAddressesConfig{DaemonAddress: defaultListen}
|
||||
parser.AddCommand(showAddressesSubCmd, "Shows all generated public addresses of the current wallet",
|
||||
"Shows all generated public addresses of the current wallet", showAddressesConf)
|
||||
@@ -207,6 +219,13 @@ func parseCommandLine() (subCommand string, config interface{}) {
|
||||
printErrorAndExit(err)
|
||||
}
|
||||
config = broadcastConf
|
||||
case parseSubCmd:
|
||||
combineNetworkFlags(&parseConf.NetworkFlags, &cfg.NetworkFlags)
|
||||
err := parseConf.ResolveNetwork(parser)
|
||||
if err != nil {
|
||||
printErrorAndExit(err)
|
||||
}
|
||||
config = parseConf
|
||||
case showAddressesSubCmd:
|
||||
combineNetworkFlags(&showAddressesConf.NetworkFlags, &cfg.NetworkFlags)
|
||||
err := showAddressesConf.ResolveNetwork(parser)
|
||||
|
||||
@@ -2,6 +2,7 @@ package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
)
|
||||
@@ -21,7 +22,7 @@ func (s *server) GetBalance(_ context.Context, _ *pb.GetBalanceRequest) (*pb.Get
|
||||
maturity := s.params.BlockCoinbaseMaturity
|
||||
|
||||
balancesMap := make(balancesMapType, 0)
|
||||
for _, entry := range s.utxos {
|
||||
for _, entry := range s.utxosSortedByAmount {
|
||||
amount := entry.UTXOEntry.Amount()
|
||||
address := entry.address
|
||||
balances, ok := balancesMap[address]
|
||||
|
||||
@@ -2,6 +2,7 @@ package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
@@ -17,7 +18,7 @@ func (s *server) CreateUnsignedTransaction(_ context.Context, request *pb.Create
|
||||
return nil, errors.New("server is not synced")
|
||||
}
|
||||
|
||||
err := s.refreshExistingUTXOs()
|
||||
err := s.refreshUTXOs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -66,7 +67,7 @@ func (s *server) selectUTXOs(spendAmount uint64, feePerInput uint64) (
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
for _, utxo := range s.utxos {
|
||||
for _, utxo := range s.utxosSortedByAmount {
|
||||
if !isUTXOSpendable(utxo, dagInfo.VirtualDAAScore, s.params.BlockCoinbaseMaturity) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -2,15 +2,15 @@ package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/util/profiling"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/profiling"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/keys"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient"
|
||||
"github.com/kaspanet/kaspad/infrastructure/os/signal"
|
||||
@@ -26,11 +26,12 @@ type server struct {
|
||||
rpcClient *rpcclient.RPCClient
|
||||
params *dagconfig.Params
|
||||
|
||||
lock sync.RWMutex
|
||||
utxos map[externalapi.DomainOutpoint]*walletUTXO
|
||||
nextSyncStartIndex uint32
|
||||
keysFile *keys.File
|
||||
shutdown chan struct{}
|
||||
lock sync.RWMutex
|
||||
utxosSortedByAmount []*walletUTXO
|
||||
nextSyncStartIndex uint32
|
||||
keysFile *keys.File
|
||||
shutdown chan struct{}
|
||||
addressSet walletAddressSet
|
||||
}
|
||||
|
||||
// Start starts the kaspawalletd server
|
||||
@@ -61,12 +62,13 @@ func Start(params *dagconfig.Params, listen, rpcServer string, keysFilePath stri
|
||||
}
|
||||
|
||||
serverInstance := &server{
|
||||
rpcClient: rpcClient,
|
||||
params: params,
|
||||
utxos: make(map[externalapi.DomainOutpoint]*walletUTXO),
|
||||
nextSyncStartIndex: 0,
|
||||
keysFile: keysFile,
|
||||
shutdown: make(chan struct{}),
|
||||
rpcClient: rpcClient,
|
||||
params: params,
|
||||
utxosSortedByAmount: []*walletUTXO{},
|
||||
nextSyncStartIndex: 0,
|
||||
keysFile: keysFile,
|
||||
shutdown: make(chan struct{}),
|
||||
addressSet: make(walletAddressSet),
|
||||
}
|
||||
|
||||
spawn("serverInstance.sync", func() {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -27,17 +27,18 @@ func (s *server) sync() error {
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
err := s.collectUTXOsFromRecentAddresses()
|
||||
err := s.collectRecentAddresses()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.collectUTXOsFromFarAddresses()
|
||||
err = s.collectFarAddresses()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.refreshExistingUTXOsWithLock()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -74,12 +75,12 @@ func (s *server) addressesToQuery(start, end uint32) (walletAddressSet, error) {
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
// collectUTXOsFromFarAddresses collects numIndexesToQuery UTXOs
|
||||
// collectFarAddresses collects numIndexesToQuery addresses
|
||||
// from the last point it stopped in the previous call.
|
||||
func (s *server) collectUTXOsFromFarAddresses() error {
|
||||
func (s *server) collectFarAddresses() error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
err := s.collectUTXOs(s.nextSyncStartIndex, s.nextSyncStartIndex+numIndexesToQuery)
|
||||
err := s.collectAddresses(s.nextSyncStartIndex, s.nextSyncStartIndex+numIndexesToQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -100,14 +101,14 @@ func (s *server) maxUsedIndex() uint32 {
|
||||
return maxUsedIndex
|
||||
}
|
||||
|
||||
// collectUTXOsFromRecentAddresses collects UTXOs from used addresses until
|
||||
// collectRecentAddresses collects addresses from used addresses until
|
||||
// the address with the index of the last used address + 1000.
|
||||
// collectUTXOsFromRecentAddresses scans addresses in batches of numIndexesToQuery,
|
||||
// collectRecentAddresses scans addresses in batches of numIndexesToQuery,
|
||||
// and releases the lock between scans.
|
||||
func (s *server) collectUTXOsFromRecentAddresses() error {
|
||||
func (s *server) collectRecentAddresses() error {
|
||||
maxUsedIndex := s.maxUsedIndex()
|
||||
for i := uint32(0); i < maxUsedIndex+1000; i += numIndexesToQuery {
|
||||
err := s.collectUTXOsWithLock(i, i+numIndexesToQuery)
|
||||
err := s.collectAddressesWithLock(i, i+numIndexesToQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -116,30 +117,25 @@ func (s *server) collectUTXOsFromRecentAddresses() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) collectUTXOsWithLock(start, end uint32) error {
|
||||
func (s *server) collectAddressesWithLock(start, end uint32) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.collectUTXOs(start, end)
|
||||
return s.collectAddresses(start, end)
|
||||
}
|
||||
|
||||
func (s *server) collectUTXOs(start, end uint32) error {
|
||||
func (s *server) collectAddresses(start, end uint32) error {
|
||||
addressSet, err := s.addressesToQuery(start, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
getUTXOsByAddressesResponse, err := s.rpcClient.GetUTXOsByAddresses(addressSet.strings())
|
||||
getBalancesByAddressesResponse, err := s.rpcClient.GetBalancesByAddresses(addressSet.strings())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.updateLastUsedIndexes(addressSet, getUTXOsByAddressesResponse)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.updateUTXOs(addressSet, getUTXOsByAddressesResponse)
|
||||
err = s.updateAddressesAndLastUsedIndexes(addressSet, getBalancesByAddressesResponse)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -147,35 +143,28 @@ func (s *server) collectUTXOs(start, end uint32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) updateUTXOs(addressSet walletAddressSet,
|
||||
getUTXOsByAddressesResponse *appmessage.GetUTXOsByAddressesResponseMessage) error {
|
||||
|
||||
for _, entry := range getUTXOsByAddressesResponse.Entries {
|
||||
err := s.addEntryToUTXOSet(entry, addressSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) updateLastUsedIndexes(addressSet walletAddressSet,
|
||||
getUTXOsByAddressesResponse *appmessage.GetUTXOsByAddressesResponseMessage) error {
|
||||
func (s *server) updateAddressesAndLastUsedIndexes(requestedAddressSet walletAddressSet,
|
||||
getBalancesByAddressesResponse *appmessage.GetBalancesByAddressesResponseMessage) error {
|
||||
|
||||
lastUsedExternalIndex := s.keysFile.LastUsedExternalIndex()
|
||||
lastUsedInternalIndex := s.keysFile.LastUsedInternalIndex()
|
||||
|
||||
for _, entry := range getUTXOsByAddressesResponse.Entries {
|
||||
walletAddress, ok := addressSet[entry.Address]
|
||||
for _, entry := range getBalancesByAddressesResponse.Entries {
|
||||
walletAddress, ok := requestedAddressSet[entry.Address]
|
||||
if !ok {
|
||||
return errors.Errorf("Got result from address %s even though it wasn't requested", entry.Address)
|
||||
}
|
||||
|
||||
if entry.Balance == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if walletAddress.cosignerIndex != s.keysFile.CosignerIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
s.addressSet[entry.Address] = walletAddress
|
||||
|
||||
if walletAddress.keyChain == libkaspawallet.ExternalKeychain {
|
||||
if walletAddress.index > lastUsedExternalIndex {
|
||||
lastUsedExternalIndex = walletAddress.index
|
||||
@@ -200,58 +189,49 @@ func (s *server) refreshExistingUTXOsWithLock() error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.refreshExistingUTXOs()
|
||||
return s.refreshUTXOs()
|
||||
}
|
||||
|
||||
func (s *server) addEntryToUTXOSet(entry *appmessage.UTXOsByAddressesEntry, addressSet walletAddressSet) error {
|
||||
outpoint, err := appmessage.RPCOutpointToDomainOutpoint(entry.Outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// updateUTXOSet clears the current UTXO set, and re-fills it with the given entries
|
||||
func (s *server) updateUTXOSet(entries []*appmessage.UTXOsByAddressesEntry) error {
|
||||
utxos := make([]*walletUTXO, len(entries))
|
||||
|
||||
utxoEntry, err := appmessage.RPCUTXOEntryToUTXOEntry(entry.UTXOEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
address, ok := addressSet[entry.Address]
|
||||
if !ok {
|
||||
return errors.Errorf("Got result from address %s even though it wasn't requested", entry.Address)
|
||||
}
|
||||
|
||||
s.utxos[*outpoint] = &walletUTXO{
|
||||
Outpoint: outpoint,
|
||||
UTXOEntry: utxoEntry,
|
||||
address: address,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) refreshExistingUTXOs() error {
|
||||
addressSet := make(walletAddressSet, len(s.utxos))
|
||||
for _, utxo := range s.utxos {
|
||||
addressString, err := s.walletAddressString(utxo.address)
|
||||
for i, entry := range entries {
|
||||
outpoint, err := appmessage.RPCOutpointToDomainOutpoint(entry.Outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addressSet[addressString] = utxo.address
|
||||
utxoEntry, err := appmessage.RPCUTXOEntryToUTXOEntry(entry.UTXOEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
address, ok := s.addressSet[entry.Address]
|
||||
if !ok {
|
||||
return errors.Errorf("Got result from address %s even though it wasn't requested", entry.Address)
|
||||
}
|
||||
utxos[i] = &walletUTXO{
|
||||
Outpoint: outpoint,
|
||||
UTXOEntry: utxoEntry,
|
||||
address: address,
|
||||
}
|
||||
}
|
||||
|
||||
getUTXOsByAddressesResponse, err := s.rpcClient.GetUTXOsByAddresses(addressSet.strings())
|
||||
sort.Slice(utxos, func(i, j int) bool { return utxos[i].UTXOEntry.Amount() > utxos[j].UTXOEntry.Amount() })
|
||||
|
||||
s.utxosSortedByAmount = utxos
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) refreshUTXOs() error {
|
||||
getUTXOsByAddressesResponse, err := s.rpcClient.GetUTXOsByAddresses(s.addressSet.strings())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.utxos = make(map[externalapi.DomainOutpoint]*walletUTXO, len(getUTXOsByAddressesResponse.Entries))
|
||||
for _, entry := range getUTXOsByAddressesResponse.Entries {
|
||||
err := s.addEntryToUTXOSet(entry, addressSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return s.updateUTXOSet(getUTXOsByAddressesResponse.Entries)
|
||||
}
|
||||
|
||||
func (s *server) isSynced() bool {
|
||||
|
||||
@@ -19,6 +19,8 @@ func main() {
|
||||
err = sign(config.(*signConfig))
|
||||
case broadcastSubCmd:
|
||||
err = broadcast(config.(*broadcastConfig))
|
||||
case parseSubCmd:
|
||||
err = parse(config.(*parseConfig))
|
||||
case showAddressesSubCmd:
|
||||
err = showAddresses(config.(*showAddressesConfig))
|
||||
case newAddressSubCmd:
|
||||
|
||||
83
cmd/kaspawallet/parse.go
Normal file
83
cmd/kaspawallet/parse.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/pkg/errors"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func parse(conf *parseConfig) error {
|
||||
if conf.Transaction == "" && conf.TransactionFile == "" {
|
||||
return errors.Errorf("Either --transaction or --transaction-file is required")
|
||||
}
|
||||
if conf.Transaction != "" && conf.TransactionFile != "" {
|
||||
return errors.Errorf("Both --transaction and --transaction-file cannot be passed at the same time")
|
||||
}
|
||||
|
||||
transactionHex := conf.Transaction
|
||||
if conf.TransactionFile != "" {
|
||||
transactionHexBytes, err := ioutil.ReadFile(conf.TransactionFile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Could not read hex from %s", conf.TransactionFile)
|
||||
}
|
||||
transactionHex = strings.TrimSpace(string(transactionHexBytes))
|
||||
}
|
||||
|
||||
transaction, err := hex.DecodeString(transactionHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partiallySignedTransaction, err := serialization.DeserializePartiallySignedTransaction(transaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Transaction ID: \t%s\n", consensushashing.TransactionID(partiallySignedTransaction.Tx))
|
||||
fmt.Println()
|
||||
|
||||
allInputSompi := uint64(0)
|
||||
for index, input := range partiallySignedTransaction.Tx.Inputs {
|
||||
partiallySignedInput := partiallySignedTransaction.PartiallySignedInputs[index]
|
||||
|
||||
if conf.Verbose {
|
||||
fmt.Printf("Input %d: \tOutpoint: %s:%d \tAmount: %.2f Kaspa\n", index, input.PreviousOutpoint.TransactionID,
|
||||
input.PreviousOutpoint.Index, float64(partiallySignedInput.PrevOutput.Value)/float64(constants.SompiPerKaspa))
|
||||
}
|
||||
|
||||
allInputSompi += partiallySignedInput.PrevOutput.Value
|
||||
}
|
||||
if conf.Verbose {
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
allOutputSompi := uint64(0)
|
||||
for index, output := range partiallySignedTransaction.Tx.Outputs {
|
||||
scriptPublicKeyType, scriptPublicKeyAddress, err := txscript.ExtractScriptPubKeyAddress(output.ScriptPublicKey, conf.ActiveNetParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addressString := scriptPublicKeyAddress.EncodeAddress()
|
||||
if scriptPublicKeyType == txscript.NonStandardTy {
|
||||
scriptPublicKeyHex := hex.EncodeToString(output.ScriptPublicKey.Script)
|
||||
addressString = fmt.Sprintf("<Non-standard transaction script public key: %s>", scriptPublicKeyHex)
|
||||
}
|
||||
|
||||
fmt.Printf("Output %d: \tRecipient: %s \tAmount: %.2f Kaspa\n",
|
||||
index, addressString, float64(output.Value)/float64(constants.SompiPerKaspa))
|
||||
|
||||
allOutputSompi += output.Value
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("Fee:\t%d Sompi\n", allInputSompi-allOutputSompi)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -144,14 +144,6 @@ func (s *consensus) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, err
|
||||
return s.pruningManager.PruningPointAndItsAnticone()
|
||||
}
|
||||
|
||||
// TODO: Remove this method once v3 is obsolete
|
||||
func (s *consensus) BlockWithTrustedData(blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.pruningManager.BlockWithTrustedData(model.NewStagingArea(), blockHash)
|
||||
}
|
||||
|
||||
// BuildBlock builds a block over the current state, with the transactions
|
||||
// selected by the given transactionSelector
|
||||
func (s *consensus) BuildBlock(coinbaseData *externalapi.DomainCoinbaseData,
|
||||
@@ -190,7 +182,12 @@ func (s *consensus) ValidateTransactionAndPopulateWithConsensusData(transaction
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.transactionValidator.ValidateTransactionInContextIgnoringUTXO(stagingArea, transaction, model.VirtualBlockHash)
|
||||
virtualPastMedianTime, err := s.pastMedianTimeManager.PastMedianTime(stagingArea, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.transactionValidator.ValidateTransactionInContextIgnoringUTXO(stagingArea, transaction, model.VirtualBlockHash, virtualPastMedianTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -289,13 +286,15 @@ func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalap
|
||||
|
||||
blockInfo.BlueScore = ghostdagData.BlueScore()
|
||||
blockInfo.BlueWork = ghostdagData.BlueWork()
|
||||
blockInfo.SelectedParent = ghostdagData.SelectedParent()
|
||||
blockInfo.MergeSetBlues = ghostdagData.MergeSetBlues()
|
||||
blockInfo.MergeSetReds = ghostdagData.MergeSetReds()
|
||||
|
||||
return blockInfo, nil
|
||||
}
|
||||
|
||||
func (s *consensus) GetBlockRelations(blockHash *externalapi.DomainHash) (
|
||||
parents []*externalapi.DomainHash, selectedParent *externalapi.DomainHash,
|
||||
children []*externalapi.DomainHash, err error) {
|
||||
parents []*externalapi.DomainHash, children []*externalapi.DomainHash, err error) {
|
||||
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
@@ -304,15 +303,10 @@ func (s *consensus) GetBlockRelations(blockHash *externalapi.DomainHash) (
|
||||
|
||||
blockRelation, err := s.blockRelationStores[0].BlockRelation(s.databaseContext, stagingArea, blockHash)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
blockGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, blockHash, false)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return blockRelation.Parents, blockGHOSTDAGData.SelectedParent(), blockRelation.Children, nil
|
||||
return blockRelation.Parents, blockRelation.Children, nil
|
||||
}
|
||||
|
||||
func (s *consensus) GetBlockAcceptanceData(blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) {
|
||||
@@ -741,24 +735,27 @@ func (s *consensus) ValidatePruningPointProof(pruningPointProof *externalapi.Pru
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.pruningProofManager.ValidatePruningPointProof(pruningPointProof)
|
||||
log.Infof("Validating the pruning point proof")
|
||||
err := s.pruningProofManager.ValidatePruningPointProof(pruningPointProof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Done validating the pruning point proof")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *consensus) ApplyPruningPointProof(pruningPointProof *externalapi.PruningPointProof) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
err := s.pruningProofManager.ApplyPruningPointProof(stagingArea, pruningPointProof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = staging.CommitAllChanges(s.databaseContext, stagingArea)
|
||||
log.Infof("Applying the pruning point proof")
|
||||
err := s.pruningProofManager.ApplyPruningPointProof(pruningPointProof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Done applying the pruning point proof")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -824,3 +821,16 @@ func (s *consensus) TrustedGHOSTDAGData(blockHash *externalapi.DomainHash) (*ext
|
||||
|
||||
return ghostdagData, nil
|
||||
}
|
||||
|
||||
func (s *consensus) IsChainBlock(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
virtualGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, model.VirtualBlockHash, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return s.dagTopologyManagers[0].IsInSelectedParentChainOf(stagingArea, blockHash, virtualGHOSTDAGData.SelectedParent())
|
||||
}
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
package blockwindowheapslicestore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type shardKey struct {
|
||||
hash externalapi.DomainHash
|
||||
windowSize int
|
||||
}
|
||||
|
||||
type blockWindowHeapSliceStagingShard struct {
|
||||
store *blockWindowHeapSliceStore
|
||||
toAdd map[shardKey][]*externalapi.BlockGHOSTDAGDataHashPair
|
||||
}
|
||||
|
||||
func (bss *blockWindowHeapSliceStore) stagingShard(stagingArea *model.StagingArea) *blockWindowHeapSliceStagingShard {
|
||||
return stagingArea.GetOrCreateShard(bss.shardID, func() model.StagingShard {
|
||||
return &blockWindowHeapSliceStagingShard{
|
||||
store: bss,
|
||||
toAdd: make(map[shardKey][]*externalapi.BlockGHOSTDAGDataHashPair),
|
||||
}
|
||||
}).(*blockWindowHeapSliceStagingShard)
|
||||
}
|
||||
|
||||
func (bsss *blockWindowHeapSliceStagingShard) Commit(_ model.DBTransaction) error {
|
||||
for key, heapSlice := range bsss.toAdd {
|
||||
bsss.store.cache.Add(&key.hash, key.windowSize, heapSlice)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bsss *blockWindowHeapSliceStagingShard) isStaged() bool {
|
||||
return len(bsss.toAdd) != 0
|
||||
}
|
||||
|
||||
func newShardKey(hash *externalapi.DomainHash, windowSize int) shardKey {
|
||||
return shardKey{
|
||||
hash: *hash,
|
||||
windowSize: windowSize,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
package blockwindowheapslicestore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucachehashandwindowsizetoblockghostdagdatahashpairs"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type blockWindowHeapSliceStore struct {
|
||||
shardID model.StagingShardID
|
||||
cache *lrucachehashandwindowsizetoblockghostdagdatahashpairs.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new WindowHeapSliceStore
|
||||
func New(cacheSize int, preallocate bool) model.WindowHeapSliceStore {
|
||||
return &blockWindowHeapSliceStore{
|
||||
shardID: staging.GenerateShardingID(),
|
||||
cache: lrucachehashandwindowsizetoblockghostdagdatahashpairs.New(cacheSize, preallocate),
|
||||
}
|
||||
}
|
||||
|
||||
// Stage stages the given blockStatus for the given blockHash
|
||||
func (bss *blockWindowHeapSliceStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, windowSize int, heapSlice []*externalapi.BlockGHOSTDAGDataHashPair) {
|
||||
stagingShard := bss.stagingShard(stagingArea)
|
||||
stagingShard.toAdd[newShardKey(blockHash, windowSize)] = heapSlice
|
||||
}
|
||||
|
||||
func (bss *blockWindowHeapSliceStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return bss.stagingShard(stagingArea).isStaged()
|
||||
}
|
||||
|
||||
func (bss *blockWindowHeapSliceStore) Get(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, error) {
|
||||
stagingShard := bss.stagingShard(stagingArea)
|
||||
|
||||
if heapSlice, ok := stagingShard.toAdd[newShardKey(blockHash, windowSize)]; ok {
|
||||
return heapSlice, nil
|
||||
}
|
||||
|
||||
if heapSlice, ok := bss.cache.Get(blockHash, windowSize); ok {
|
||||
return heapSlice, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(database.ErrNotFound, "Window heap slice not found")
|
||||
}
|
||||
@@ -1,12 +1,12 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/blockwindowheapslicestore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
|
||||
parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
@@ -145,9 +145,10 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
finalityStore := finalitystore.New(prefixBucket, 200, preallocateCaches)
|
||||
headersSelectedChainStore := headersselectedchainstore.New(prefixBucket, pruningWindowSizeForCaches, preallocateCaches)
|
||||
daaBlocksStore := daablocksstore.New(prefixBucket, pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches)
|
||||
windowHeapSliceStore := blockwindowheapslicestore.New(2000, preallocateCaches)
|
||||
|
||||
blockRelationStores, reachabilityDataStores, ghostdagDataStores := dagStores(config, prefixBucket, pruningWindowSizePlusFinalityDepthForCache, pruningWindowSizeForCaches, preallocateCaches)
|
||||
reachabilityManagers, dagTopologyManagers, ghostdagManagers, dagTraversalManagers := f.dagProcesses(config, dbManager, blockHeaderStore, daaWindowStore, blockRelationStores, reachabilityDataStores, ghostdagDataStores)
|
||||
reachabilityManagers, dagTopologyManagers, ghostdagManagers, dagTraversalManagers := f.dagProcesses(config, dbManager, blockHeaderStore, daaWindowStore, windowHeapSliceStore, blockRelationStores, reachabilityDataStores, ghostdagDataStores)
|
||||
|
||||
blockRelationStore := blockRelationStores[0]
|
||||
reachabilityDataStore := reachabilityDataStores[0]
|
||||
@@ -158,7 +159,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
dagTraversalManager := dagTraversalManagers[0]
|
||||
|
||||
// Processes
|
||||
parentsManager := parentssanager.New(config.GenesisHash)
|
||||
parentsManager := parentssanager.New(config.GenesisHash, config.MaxBlockLevel)
|
||||
blockParentBuilder := blockparentbuilder.New(
|
||||
dbManager,
|
||||
blockHeaderStore,
|
||||
@@ -168,6 +169,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
pruningStore,
|
||||
|
||||
config.GenesisHash,
|
||||
config.MaxBlockLevel,
|
||||
)
|
||||
pastMedianTimeManager := f.pastMedianTimeConsructor(
|
||||
config.TimestampDeviationTolerance,
|
||||
@@ -304,6 +306,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
config.TimestampDeviationTolerance,
|
||||
config.TargetTimePerBlock,
|
||||
config.IgnoreHeaderMass,
|
||||
config.MaxBlockLevel,
|
||||
|
||||
dbManager,
|
||||
difficultyManager,
|
||||
@@ -370,6 +373,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
blockProcessor := blockprocessor.New(
|
||||
genesisHash,
|
||||
config.TargetTimePerBlock,
|
||||
config.MaxBlockLevel,
|
||||
dbManager,
|
||||
consensusStateManager,
|
||||
pruningManager,
|
||||
@@ -417,6 +421,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
genesisHash,
|
||||
config.K,
|
||||
config.PruningProofM,
|
||||
config.MaxBlockLevel,
|
||||
)
|
||||
|
||||
c := &consensus{
|
||||
@@ -568,16 +573,16 @@ func dagStores(config *Config,
|
||||
pruningWindowSizePlusFinalityDepthForCache, pruningWindowSizeForCaches int,
|
||||
preallocateCaches bool) ([]model.BlockRelationStore, []model.ReachabilityDataStore, []model.GHOSTDAGDataStore) {
|
||||
|
||||
blockRelationStores := make([]model.BlockRelationStore, constants.MaxBlockLevel+1)
|
||||
reachabilityDataStores := make([]model.ReachabilityDataStore, constants.MaxBlockLevel+1)
|
||||
ghostdagDataStores := make([]model.GHOSTDAGDataStore, constants.MaxBlockLevel+1)
|
||||
blockRelationStores := make([]model.BlockRelationStore, config.MaxBlockLevel+1)
|
||||
reachabilityDataStores := make([]model.ReachabilityDataStore, config.MaxBlockLevel+1)
|
||||
ghostdagDataStores := make([]model.GHOSTDAGDataStore, config.MaxBlockLevel+1)
|
||||
|
||||
ghostdagDataCacheSize := pruningWindowSizeForCaches * 2
|
||||
if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize {
|
||||
ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize
|
||||
}
|
||||
|
||||
for i := 0; i <= constants.MaxBlockLevel; i++ {
|
||||
for i := 0; i <= config.MaxBlockLevel; i++ {
|
||||
prefixBucket := prefixBucket.Bucket([]byte{byte(i)})
|
||||
if i == 0 {
|
||||
blockRelationStores[i] = blockrelationstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
|
||||
@@ -597,6 +602,7 @@ func (f *factory) dagProcesses(config *Config,
|
||||
dbManager model.DBManager,
|
||||
blockHeaderStore model.BlockHeaderStore,
|
||||
daaWindowStore model.BlocksWithTrustedDataDAAWindowStore,
|
||||
windowHeapSliceStore model.WindowHeapSliceStore,
|
||||
blockRelationStores []model.BlockRelationStore,
|
||||
reachabilityDataStores []model.ReachabilityDataStore,
|
||||
ghostdagDataStores []model.GHOSTDAGDataStore) (
|
||||
@@ -606,12 +612,12 @@ func (f *factory) dagProcesses(config *Config,
|
||||
[]model.DAGTraversalManager,
|
||||
) {
|
||||
|
||||
reachabilityManagers := make([]model.ReachabilityManager, constants.MaxBlockLevel+1)
|
||||
dagTopologyManagers := make([]model.DAGTopologyManager, constants.MaxBlockLevel+1)
|
||||
ghostdagManagers := make([]model.GHOSTDAGManager, constants.MaxBlockLevel+1)
|
||||
dagTraversalManagers := make([]model.DAGTraversalManager, constants.MaxBlockLevel+1)
|
||||
reachabilityManagers := make([]model.ReachabilityManager, config.MaxBlockLevel+1)
|
||||
dagTopologyManagers := make([]model.DAGTopologyManager, config.MaxBlockLevel+1)
|
||||
ghostdagManagers := make([]model.GHOSTDAGManager, config.MaxBlockLevel+1)
|
||||
dagTraversalManagers := make([]model.DAGTraversalManager, config.MaxBlockLevel+1)
|
||||
|
||||
for i := 0; i <= constants.MaxBlockLevel; i++ {
|
||||
for i := 0; i <= config.MaxBlockLevel; i++ {
|
||||
reachabilityManagers[i] = reachabilitymanager.New(
|
||||
dbManager,
|
||||
ghostdagDataStores[i],
|
||||
@@ -638,6 +644,7 @@ func (f *factory) dagProcesses(config *Config,
|
||||
reachabilityDataStores[i],
|
||||
ghostdagManagers[i],
|
||||
daaWindowStore,
|
||||
windowHeapSliceStore,
|
||||
config.GenesisHash,
|
||||
config.DifficultyAdjustmentWindowSize)
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ type BaseBlockHeader interface {
|
||||
BlueScore() uint64
|
||||
BlueWork() *big.Int
|
||||
PruningPoint() *DomainHash
|
||||
BlockLevel() int
|
||||
BlockLevel(maxBlockLevel int) int
|
||||
Equal(other BaseBlockHeader) bool
|
||||
}
|
||||
|
||||
|
||||
@@ -4,18 +4,24 @@ import "math/big"
|
||||
|
||||
// BlockInfo contains various information about a specific block
|
||||
type BlockInfo struct {
|
||||
Exists bool
|
||||
BlockStatus BlockStatus
|
||||
BlueScore uint64
|
||||
BlueWork *big.Int
|
||||
Exists bool
|
||||
BlockStatus BlockStatus
|
||||
BlueScore uint64
|
||||
BlueWork *big.Int
|
||||
SelectedParent *DomainHash
|
||||
MergeSetBlues []*DomainHash
|
||||
MergeSetReds []*DomainHash
|
||||
}
|
||||
|
||||
// Clone returns a clone of BlockInfo
|
||||
func (bi *BlockInfo) Clone() *BlockInfo {
|
||||
return &BlockInfo{
|
||||
Exists: bi.Exists,
|
||||
BlockStatus: bi.BlockStatus.Clone(),
|
||||
BlueScore: bi.BlueScore,
|
||||
BlueWork: new(big.Int).Set(bi.BlueWork),
|
||||
Exists: bi.Exists,
|
||||
BlockStatus: bi.BlockStatus.Clone(),
|
||||
BlueScore: bi.BlueScore,
|
||||
BlueWork: new(big.Int).Set(bi.BlueWork),
|
||||
SelectedParent: bi.SelectedParent,
|
||||
MergeSetBlues: CloneHashes(bi.MergeSetBlues),
|
||||
MergeSetReds: CloneHashes(bi.MergeSetReds),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,31 +14,83 @@ func initTestBlockInfoStructsForClone() []*BlockInfo {
|
||||
BlockStatus(0x01),
|
||||
0,
|
||||
big.NewInt(0),
|
||||
nil,
|
||||
[]*DomainHash{},
|
||||
[]*DomainHash{},
|
||||
}, {
|
||||
true,
|
||||
BlockStatus(0x02),
|
||||
0,
|
||||
big.NewInt(0),
|
||||
nil,
|
||||
[]*DomainHash{},
|
||||
[]*DomainHash{},
|
||||
}, {
|
||||
true,
|
||||
1,
|
||||
1,
|
||||
big.NewInt(0),
|
||||
nil,
|
||||
[]*DomainHash{},
|
||||
[]*DomainHash{},
|
||||
}, {
|
||||
true,
|
||||
255,
|
||||
2,
|
||||
big.NewInt(0),
|
||||
nil,
|
||||
[]*DomainHash{},
|
||||
[]*DomainHash{},
|
||||
}, {
|
||||
true,
|
||||
0,
|
||||
3,
|
||||
big.NewInt(0),
|
||||
nil,
|
||||
[]*DomainHash{},
|
||||
[]*DomainHash{},
|
||||
}, {
|
||||
true,
|
||||
BlockStatus(0x01),
|
||||
0,
|
||||
big.NewInt(1),
|
||||
nil,
|
||||
[]*DomainHash{},
|
||||
[]*DomainHash{},
|
||||
}, {
|
||||
false,
|
||||
BlockStatus(0x01),
|
||||
0,
|
||||
big.NewInt(1),
|
||||
NewDomainHashFromByteArray(&[DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}),
|
||||
[]*DomainHash{
|
||||
NewDomainHashFromByteArray(&[DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}),
|
||||
NewDomainHashFromByteArray(&[DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}),
|
||||
},
|
||||
[]*DomainHash{
|
||||
NewDomainHashFromByteArray(&[DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04}),
|
||||
NewDomainHashFromByteArray(&[DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}),
|
||||
},
|
||||
},
|
||||
}
|
||||
return tests
|
||||
|
||||
@@ -16,7 +16,7 @@ type Consensus interface {
|
||||
GetBlockEvenIfHeaderOnly(blockHash *DomainHash) (*DomainBlock, error)
|
||||
GetBlockHeader(blockHash *DomainHash) (BlockHeader, error)
|
||||
GetBlockInfo(blockHash *DomainHash) (*BlockInfo, error)
|
||||
GetBlockRelations(blockHash *DomainHash) (parents []*DomainHash, selectedParent *DomainHash, children []*DomainHash, err error)
|
||||
GetBlockRelations(blockHash *DomainHash) (parents []*DomainHash, children []*DomainHash, err error)
|
||||
GetBlockAcceptanceData(blockHash *DomainHash) (AcceptanceData, error)
|
||||
|
||||
GetHashesBetween(lowHash, highHash *DomainHash, maxBlocks uint64) (hashes []*DomainHash, actualHighHash *DomainHash, err error)
|
||||
@@ -26,7 +26,6 @@ type Consensus interface {
|
||||
PruningPoint() (*DomainHash, error)
|
||||
PruningPointHeaders() ([]BlockHeader, error)
|
||||
PruningPointAndItsAnticone() ([]*DomainHash, error)
|
||||
BlockWithTrustedData(blockHash *DomainHash) (*BlockWithTrustedData, error)
|
||||
ClearImportedPruningPointData() error
|
||||
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) error
|
||||
ValidateAndInsertImportedPruningPoint(newPruningPoint *DomainHash) error
|
||||
@@ -51,4 +50,5 @@ type Consensus interface {
|
||||
TrustedDataDataDAAHeader(trustedBlockHash, daaBlockHash *DomainHash, daaBlockWindowIndex uint64) (*TrustedDataDataDAAHeader, error)
|
||||
TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash *DomainHash) ([]*DomainHash, error)
|
||||
TrustedGHOSTDAGData(blockHash *DomainHash) (*BlockGHOSTDAGData, error)
|
||||
IsChainBlock(blockHash *DomainHash) (bool, error)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package model
|
||||
|
||||
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// WindowHeapSliceStore caches the slices that are needed for the heap implementation of DAGTraversalManager.BlockWindow
|
||||
type WindowHeapSliceStore interface {
|
||||
Store
|
||||
Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, windowSize int, pairs []*externalapi.BlockGHOSTDAGDataHashPair)
|
||||
IsStaged(stagingArea *StagingArea) bool
|
||||
Get(stagingArea *StagingArea, blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, error)
|
||||
}
|
||||
@@ -13,7 +13,6 @@ type DAGTraversalManager interface {
|
||||
AnticoneFromBlocks(stagingArea *StagingArea, tips []*externalapi.DomainHash, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
|
||||
AnticoneFromVirtualPOV(stagingArea *StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
|
||||
BlockWindow(stagingArea *StagingArea, highHash *externalapi.DomainHash, windowSize int) ([]*externalapi.DomainHash, error)
|
||||
BlockWindowWithGHOSTDAGData(stagingArea *StagingArea, highHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, error)
|
||||
DAABlockWindow(stagingArea *StagingArea, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
|
||||
NewDownHeap(stagingArea *StagingArea) BlockHeap
|
||||
NewUpHeap(stagingArea *StagingArea) BlockHeap
|
||||
|
||||
@@ -15,5 +15,4 @@ type PruningManager interface {
|
||||
PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error)
|
||||
ExpectedHeaderPruningPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
|
||||
TrustedBlockAssociatedGHOSTDAGDataBlockHashes(stagingArea *StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
|
||||
BlockWithTrustedData(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error)
|
||||
}
|
||||
|
||||
@@ -6,5 +6,5 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
type PruningProofManager interface {
|
||||
BuildPruningPointProof(stagingArea *StagingArea) (*externalapi.PruningPointProof, error)
|
||||
ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error
|
||||
ApplyPruningPointProof(stagingArea *StagingArea, pruningPointProof *externalapi.PruningPointProof) error
|
||||
ApplyPruningPointProof(pruningPointProof *externalapi.PruningPointProof) error
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
type TransactionValidator interface {
|
||||
ValidateTransactionInIsolation(transaction *externalapi.DomainTransaction) error
|
||||
ValidateTransactionInContextIgnoringUTXO(stagingArea *StagingArea, tx *externalapi.DomainTransaction,
|
||||
povBlockHash *externalapi.DomainHash) error
|
||||
povBlockHash *externalapi.DomainHash, povBlockPastMedianTime int64) error
|
||||
ValidateTransactionInContextAndPopulateFee(stagingArea *StagingArea,
|
||||
tx *externalapi.DomainTransaction, povBlockHash *externalapi.DomainHash) error
|
||||
PopulateMass(transaction *externalapi.DomainTransaction)
|
||||
|
||||
@@ -166,7 +166,12 @@ func (bb *blockBuilder) validateTransaction(
|
||||
return err
|
||||
}
|
||||
|
||||
err = bb.transactionValidator.ValidateTransactionInContextIgnoringUTXO(stagingArea, transaction, model.VirtualBlockHash)
|
||||
virtualPastMedianTime, err := bb.pastMedianTimeManager.PastMedianTime(stagingArea, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bb.transactionValidator.ValidateTransactionInContextIgnoringUTXO(stagingArea, transaction, model.VirtualBlockHash, virtualPastMedianTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -16,7 +16,8 @@ type blockParentBuilder struct {
|
||||
reachabilityDataStore model.ReachabilityDataStore
|
||||
pruningStore model.PruningStore
|
||||
|
||||
genesisHash *externalapi.DomainHash
|
||||
genesisHash *externalapi.DomainHash
|
||||
maxBlockLevel int
|
||||
}
|
||||
|
||||
// New creates a new instance of a BlockParentBuilder
|
||||
@@ -30,6 +31,7 @@ func New(
|
||||
pruningStore model.PruningStore,
|
||||
|
||||
genesisHash *externalapi.DomainHash,
|
||||
maxBlockLevel int,
|
||||
) model.BlockParentBuilder {
|
||||
return &blockParentBuilder{
|
||||
databaseContext: databaseContext,
|
||||
@@ -40,6 +42,7 @@ func New(
|
||||
reachabilityDataStore: reachabilityDataStore,
|
||||
pruningStore: pruningStore,
|
||||
genesisHash: genesisHash,
|
||||
maxBlockLevel: maxBlockLevel,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,7 +105,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
|
||||
// all the block levels they occupy
|
||||
for _, directParentHeader := range directParentHeaders {
|
||||
directParentHash := consensushashing.HeaderHash(directParentHeader)
|
||||
blockLevel := directParentHeader.BlockLevel()
|
||||
blockLevel := directParentHeader.BlockLevel(bpb.maxBlockLevel)
|
||||
for i := 0; i <= blockLevel; i++ {
|
||||
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
|
||||
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
type blockProcessor struct {
|
||||
genesisHash *externalapi.DomainHash
|
||||
targetTimePerBlock time.Duration
|
||||
maxBlockLevel int
|
||||
databaseContext model.DBManager
|
||||
blockLogger *blocklogger.BlockLogger
|
||||
|
||||
@@ -52,6 +53,7 @@ type blockProcessor struct {
|
||||
func New(
|
||||
genesisHash *externalapi.DomainHash,
|
||||
targetTimePerBlock time.Duration,
|
||||
maxBlockLevel int,
|
||||
databaseContext model.DBManager,
|
||||
|
||||
consensusStateManager model.ConsensusStateManager,
|
||||
@@ -86,6 +88,7 @@ func New(
|
||||
return &blockProcessor{
|
||||
genesisHash: genesisHash,
|
||||
targetTimePerBlock: targetTimePerBlock,
|
||||
maxBlockLevel: maxBlockLevel,
|
||||
databaseContext: databaseContext,
|
||||
blockLogger: blocklogger.NewBlockLogger(),
|
||||
pruningManager: pruningManager,
|
||||
|
||||
@@ -259,7 +259,7 @@ func (bp *blockProcessor) updateReachabilityReindexRoot(stagingArea *model.Stagi
|
||||
return err
|
||||
}
|
||||
|
||||
headersSelectedTipHeaderBlockLevel := headersSelectedTipHeader.BlockLevel()
|
||||
headersSelectedTipHeaderBlockLevel := headersSelectedTipHeader.BlockLevel(bp.maxBlockLevel)
|
||||
for blockLevel := 0; blockLevel <= headersSelectedTipHeaderBlockLevel; blockLevel++ {
|
||||
err := bp.reachabilityManagers[blockLevel].UpdateReindexRoot(stagingArea, headersSelectedTip)
|
||||
if err != nil {
|
||||
|
||||
@@ -93,9 +93,44 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, blockHash := range pruningPointAndItsAnticone {
|
||||
blockWithTrustedData, err := tcSyncer.BlockWithTrustedData(blockHash)
|
||||
block, err := tcSyncer.GetBlock(blockHash)
|
||||
if err != nil {
|
||||
return
|
||||
t.Fatalf("GetBlock: %+v", err)
|
||||
}
|
||||
|
||||
blockDAAWindowHashes, err := tcSyncer.BlockDAAWindowHashes(blockHash)
|
||||
if err != nil {
|
||||
t.Fatalf("BlockDAAWindowHashes: %+v", err)
|
||||
}
|
||||
|
||||
ghostdagDataBlockHashes, err := tcSyncer.TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash)
|
||||
if err != nil {
|
||||
t.Fatalf("TrustedBlockAssociatedGHOSTDAGDataBlockHashes: %+v", err)
|
||||
}
|
||||
|
||||
blockWithTrustedData := &externalapi.BlockWithTrustedData{
|
||||
Block: block,
|
||||
DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(blockDAAWindowHashes)),
|
||||
GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(ghostdagDataBlockHashes)),
|
||||
}
|
||||
|
||||
for i, daaBlockHash := range blockDAAWindowHashes {
|
||||
trustedDataDataDAAHeader, err := tcSyncer.TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i))
|
||||
if err != nil {
|
||||
t.Fatalf("TrustedDataDataDAAHeader: %+v", err)
|
||||
}
|
||||
blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, trustedDataDataDAAHeader)
|
||||
}
|
||||
|
||||
for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes {
|
||||
data, err := tcSyncer.TrustedGHOSTDAGData(ghostdagDataBlockHash)
|
||||
if err != nil {
|
||||
t.Fatalf("TrustedGHOSTDAGData: %+v", err)
|
||||
}
|
||||
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, &externalapi.BlockGHOSTDAGDataHashPair{
|
||||
Hash: ghostdagDataBlockHash,
|
||||
GHOSTDAGData: data,
|
||||
})
|
||||
}
|
||||
|
||||
_, err = synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
|
||||
|
||||
@@ -136,8 +136,12 @@ func (v *blockValidator) checkBlockTransactions(
|
||||
}
|
||||
|
||||
// Ensure all transactions in the block are finalized.
|
||||
pastMedianTime, err := v.pastMedianTimeManager.PastMedianTime(stagingArea, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tx := range block.Transactions {
|
||||
if err = v.transactionValidator.ValidateTransactionInContextIgnoringUTXO(stagingArea, tx, blockHash); err != nil {
|
||||
if err = v.transactionValidator.ValidateTransactionInContextIgnoringUTXO(stagingArea, tx, blockHash, pastMedianTime); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
|
||||
return err
|
||||
}
|
||||
if !hasReachabilityData {
|
||||
blockLevel := header.BlockLevel()
|
||||
blockLevel := header.BlockLevel(v.maxBlockLevel)
|
||||
for i := 0; i <= blockLevel; i++ {
|
||||
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
|
||||
if err != nil {
|
||||
|
||||
@@ -23,6 +23,7 @@ type blockValidator struct {
|
||||
timestampDeviationTolerance int
|
||||
targetTimePerBlock time.Duration
|
||||
ignoreHeaderMass bool
|
||||
maxBlockLevel int
|
||||
|
||||
databaseContext model.DBReader
|
||||
difficultyManager model.DifficultyManager
|
||||
@@ -60,6 +61,7 @@ func New(powMax *big.Int,
|
||||
timestampDeviationTolerance int,
|
||||
targetTimePerBlock time.Duration,
|
||||
ignoreHeaderMass bool,
|
||||
maxBlockLevel int,
|
||||
|
||||
databaseContext model.DBReader,
|
||||
|
||||
@@ -97,6 +99,7 @@ func New(powMax *big.Int,
|
||||
mergeSetSizeLimit: mergeSetSizeLimit,
|
||||
maxBlockParents: maxBlockParents,
|
||||
ignoreHeaderMass: ignoreHeaderMass,
|
||||
maxBlockLevel: maxBlockLevel,
|
||||
|
||||
timestampDeviationTolerance: timestampDeviationTolerance,
|
||||
targetTimePerBlock: targetTimePerBlock,
|
||||
|
||||
@@ -69,7 +69,7 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
|
||||
header externalapi.BlockHeader,
|
||||
isBlockWithTrustedData bool) error {
|
||||
|
||||
for level := 0; level <= header.BlockLevel(); level++ {
|
||||
for level := 0; level <= header.BlockLevel(v.maxBlockLevel); level++ {
|
||||
var parents []*externalapi.DomainHash
|
||||
for _, parent := range v.parentsManager.ParentsAtLevel(header, level) {
|
||||
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
|
||||
@@ -118,7 +118,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
|
||||
return err
|
||||
}
|
||||
|
||||
blockLevel := header.BlockLevel()
|
||||
blockLevel := header.BlockLevel(v.maxBlockLevel)
|
||||
for i := 1; i <= blockLevel; i++ {
|
||||
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
|
||||
if err != nil {
|
||||
|
||||
@@ -336,12 +336,12 @@ func (csm *consensusStateManager) boundedMergeBreakingParents(stagingArea *model
|
||||
log.Debugf("Checking whether parent %s breaks the bounded merge set", parent)
|
||||
isBadRedInPast := false
|
||||
for _, badRedBlock := range badReds {
|
||||
isBadRedInPast, err = csm.dagTopologyManager.IsAncestorOf(stagingArea, parent, badRedBlock)
|
||||
isBadRedInPast, err = csm.dagTopologyManager.IsAncestorOf(stagingArea, badRedBlock, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isBadRedInPast {
|
||||
log.Debugf("Parent %s is an ancestor of bad red %s", parent, badRedBlock)
|
||||
log.Debugf("Parent %s is a descendant of bad red %s", parent, badRedBlock)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,6 +152,18 @@ func (dtm *dagTraversalManager) newSizedUpHeap(stagingArea *model.StagingArea, c
|
||||
return &h
|
||||
}
|
||||
|
||||
func (dtm *dagTraversalManager) newSizedUpHeapFromSlice(stagingArea *model.StagingArea, slice []*externalapi.BlockGHOSTDAGDataHashPair) *sizedUpBlockHeap {
|
||||
sliceClone := make([]*externalapi.BlockGHOSTDAGDataHashPair, len(slice), cap(slice))
|
||||
copy(sliceClone, slice)
|
||||
h := sizedUpBlockHeap{
|
||||
impl: upHeap{baseHeap{slice: sliceClone, ghostdagManager: dtm.ghostdagManager}},
|
||||
ghostdagStore: dtm.ghostdagDataStore,
|
||||
dbContext: dtm.databaseContext,
|
||||
stagingArea: stagingArea,
|
||||
}
|
||||
return &h
|
||||
}
|
||||
|
||||
// len returns the length of this heap
|
||||
func (sbh *sizedUpBlockHeap) len() int {
|
||||
return sbh.impl.Len()
|
||||
|
||||
@@ -18,6 +18,7 @@ type dagTraversalManager struct {
|
||||
daaWindowStore model.BlocksWithTrustedDataDAAWindowStore
|
||||
genesisHash *externalapi.DomainHash
|
||||
difficultyAdjustmentWindowSize int
|
||||
windowHeapSliceStore model.WindowHeapSliceStore
|
||||
}
|
||||
|
||||
// New instantiates a new DAGTraversalManager
|
||||
@@ -28,6 +29,7 @@ func New(
|
||||
reachabilityDataStore model.ReachabilityDataStore,
|
||||
ghostdagManager model.GHOSTDAGManager,
|
||||
daaWindowStore model.BlocksWithTrustedDataDAAWindowStore,
|
||||
windowHeapSliceStore model.WindowHeapSliceStore,
|
||||
genesisHash *externalapi.DomainHash,
|
||||
difficultyAdjustmentWindowSize int) model.DAGTraversalManager {
|
||||
return &dagTraversalManager{
|
||||
@@ -40,6 +42,7 @@ func New(
|
||||
|
||||
genesisHash: genesisHash,
|
||||
difficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
|
||||
windowHeapSliceStore: windowHeapSliceStore,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ func (dtm *dagTraversalManager) DAABlockWindow(stagingArea *model.StagingArea, h
|
||||
func (dtm *dagTraversalManager) BlockWindow(stagingArea *model.StagingArea, highHash *externalapi.DomainHash,
|
||||
windowSize int) ([]*externalapi.DomainHash, error) {
|
||||
|
||||
windowHeap, err := dtm.calculateBlockWindowHeap(stagingArea, highHash, windowSize)
|
||||
windowHeap, err := dtm.blockWindowHeap(stagingArea, highHash, windowSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -28,15 +28,26 @@ func (dtm *dagTraversalManager) BlockWindow(stagingArea *model.StagingArea, high
|
||||
return window, nil
|
||||
}
|
||||
|
||||
func (dtm *dagTraversalManager) BlockWindowWithGHOSTDAGData(stagingArea *model.StagingArea,
|
||||
highHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, error) {
|
||||
func (dtm *dagTraversalManager) blockWindowHeap(stagingArea *model.StagingArea,
|
||||
highHash *externalapi.DomainHash, windowSize int) (*sizedUpBlockHeap, error) {
|
||||
windowHeapSlice, err := dtm.windowHeapSliceStore.Get(stagingArea, highHash, windowSize)
|
||||
sliceNotCached := database.IsNotFoundError(err)
|
||||
if !sliceNotCached && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !sliceNotCached {
|
||||
return dtm.newSizedUpHeapFromSlice(stagingArea, windowHeapSlice), nil
|
||||
}
|
||||
|
||||
windowHeap, err := dtm.calculateBlockWindowHeap(stagingArea, highHash, windowSize)
|
||||
heap, err := dtm.calculateBlockWindowHeap(stagingArea, highHash, windowSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return windowHeap.impl.slice, nil
|
||||
if !highHash.Equal(model.VirtualBlockHash) {
|
||||
dtm.windowHeapSliceStore.Stage(stagingArea, highHash, windowSize, heap.impl.slice)
|
||||
}
|
||||
return heap, nil
|
||||
}
|
||||
|
||||
func (dtm *dagTraversalManager) calculateBlockWindowHeap(stagingArea *model.StagingArea,
|
||||
@@ -56,18 +67,54 @@ func (dtm *dagTraversalManager) calculateBlockWindowHeap(stagingArea *model.Stag
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the block has a trusted DAA window attached, we just take it as is and don't use cache of selected parent to
|
||||
// build the window. This is because tryPushMergeSet might not be able to find all the GHOSTDAG data that is
|
||||
// associated with the block merge set.
|
||||
_, err = dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, 0)
|
||||
isNonTrustedBlock := database.IsNotFoundError(err)
|
||||
if !isNonTrustedBlock && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isNonTrustedBlock && currentGHOSTDAGData.SelectedParent() != nil {
|
||||
windowHeapSlice, err := dtm.windowHeapSliceStore.Get(stagingArea, currentGHOSTDAGData.SelectedParent(), windowSize)
|
||||
selectedParentNotCached := database.IsNotFoundError(err)
|
||||
if !selectedParentNotCached && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !selectedParentNotCached {
|
||||
windowHeap := dtm.newSizedUpHeapFromSlice(stagingArea, windowHeapSlice)
|
||||
if !currentGHOSTDAGData.SelectedParent().Equal(dtm.genesisHash) {
|
||||
selectedParentGHOSTDAGData, err := dtm.ghostdagDataStore.Get(
|
||||
dtm.databaseContext, stagingArea, currentGHOSTDAGData.SelectedParent(), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = dtm.tryPushMergeSet(windowHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return windowHeap, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Walk down the chain until you finish or find a trusted block and then take complete the rest
|
||||
// of the window with the trusted window.
|
||||
for {
|
||||
if currentGHOSTDAGData.SelectedParent().Equal(dtm.genesisHash) {
|
||||
break
|
||||
}
|
||||
|
||||
_, err := dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, 0)
|
||||
isNotFoundError := database.IsNotFoundError(err)
|
||||
if !isNotFoundError && err != nil {
|
||||
currentIsNonTrustedBlock := database.IsNotFoundError(err)
|
||||
if !currentIsNonTrustedBlock && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isNotFoundError {
|
||||
if !currentIsNonTrustedBlock {
|
||||
for i := uint64(0); ; i++ {
|
||||
daaBlock, err := dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, i)
|
||||
if database.IsNotFoundError(err) {
|
||||
@@ -94,47 +141,60 @@ func (dtm *dagTraversalManager) calculateBlockWindowHeap(stagingArea *model.Stag
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
added, err := windowHeap.tryPushWithGHOSTDAGData(currentGHOSTDAGData.SelectedParent(), selectedParentGHOSTDAGData)
|
||||
|
||||
done, err := dtm.tryPushMergeSet(windowHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the window is full and the selected parent is less than the minimum then we break
|
||||
// because this means that there cannot be any more blocks in the past with higher blueWork
|
||||
if !added {
|
||||
if done {
|
||||
break
|
||||
}
|
||||
|
||||
// Now we go over the merge set.
|
||||
// Remove the SP from the blue merge set because we already added it.
|
||||
mergeSetBlues := currentGHOSTDAGData.MergeSetBlues()[1:]
|
||||
// Go over the merge set in reverse because it's ordered in reverse by blueWork.
|
||||
for i := len(mergeSetBlues) - 1; i >= 0; i-- {
|
||||
added, err := windowHeap.tryPush(mergeSetBlues[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
|
||||
if !added {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
mergeSetReds := currentGHOSTDAGData.MergeSetReds()
|
||||
for i := len(mergeSetReds) - 1; i >= 0; i-- {
|
||||
added, err := windowHeap.tryPush(mergeSetReds[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
|
||||
if !added {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
current = currentGHOSTDAGData.SelectedParent()
|
||||
currentGHOSTDAGData = selectedParentGHOSTDAGData
|
||||
}
|
||||
|
||||
return windowHeap, nil
|
||||
}
|
||||
|
||||
func (dtm *dagTraversalManager) tryPushMergeSet(windowHeap *sizedUpBlockHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData *externalapi.BlockGHOSTDAGData) (bool, error) {
|
||||
added, err := windowHeap.tryPushWithGHOSTDAGData(currentGHOSTDAGData.SelectedParent(), selectedParentGHOSTDAGData)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// If the window is full and the selected parent is less than the minimum then we break
|
||||
// because this means that there cannot be any more blocks in the past with higher blueWork
|
||||
if !added {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Now we go over the merge set.
|
||||
// Remove the SP from the blue merge set because we already added it.
|
||||
mergeSetBlues := currentGHOSTDAGData.MergeSetBlues()[1:]
|
||||
// Go over the merge set in reverse because it's ordered in reverse by blueWork.
|
||||
for i := len(mergeSetBlues) - 1; i >= 0; i-- {
|
||||
added, err := windowHeap.tryPush(mergeSetBlues[i])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
|
||||
if !added {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
mergeSetReds := currentGHOSTDAGData.MergeSetReds()
|
||||
for i := len(mergeSetReds) - 1; i >= 0; i-- {
|
||||
added, err := windowHeap.tryPush(mergeSetReds[i])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
|
||||
if !added {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
package difficultymanager
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
"math"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
type difficultyBlock struct {
|
||||
timeInMilliseconds int64
|
||||
Bits uint32
|
||||
hash *externalapi.DomainHash
|
||||
blueWork *big.Int
|
||||
}
|
||||
|
||||
type blockWindow []difficultyBlock
|
||||
@@ -27,6 +27,8 @@ func (dm *difficultyManager) getDifficultyBlock(
|
||||
return difficultyBlock{
|
||||
timeInMilliseconds: header.TimeInMilliseconds(),
|
||||
Bits: header.Bits(),
|
||||
hash: blockHash,
|
||||
blueWork: header.BlueWork(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -53,19 +55,32 @@ func (dm *difficultyManager) blockWindow(stagingArea *model.StagingArea, startin
|
||||
return window, windowHashes, nil
|
||||
}
|
||||
|
||||
func (window blockWindow) minMaxTimestamps() (min, max int64, minIndex, maxIndex int) {
|
||||
func ghostdagLess(blockA *difficultyBlock, blockB *difficultyBlock) bool {
|
||||
switch blockA.blueWork.Cmp(blockB.blueWork) {
|
||||
case -1:
|
||||
return true
|
||||
case 1:
|
||||
return false
|
||||
case 0:
|
||||
return blockA.hash.Less(blockB.hash)
|
||||
default:
|
||||
panic("big.Int.Cmp is defined to always return -1/1/0 and nothing else")
|
||||
}
|
||||
}
|
||||
|
||||
func (window blockWindow) minMaxTimestamps() (min, max int64, minIndex int) {
|
||||
min = math.MaxInt64
|
||||
minIndex = math.MaxInt64
|
||||
minIndex = 0
|
||||
max = 0
|
||||
maxIndex = 0
|
||||
for i, block := range window {
|
||||
if block.timeInMilliseconds < min {
|
||||
// If timestamps are equal we ghostdag compare in order to reach consensus on `minIndex`
|
||||
if block.timeInMilliseconds < min ||
|
||||
(block.timeInMilliseconds == min && ghostdagLess(&block, &window[minIndex])) {
|
||||
min = block.timeInMilliseconds
|
||||
minIndex = i
|
||||
}
|
||||
if block.timeInMilliseconds > max {
|
||||
max = block.timeInMilliseconds
|
||||
maxIndex = i
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
@@ -115,9 +115,10 @@ func (dm *difficultyManager) requiredDifficultyFromTargetsWindow(targetsWindow b
|
||||
if len(targetsWindow) < 2 || len(targetsWindow) < dm.difficultyAdjustmentWindowSize {
|
||||
return dm.genesisBits, nil
|
||||
}
|
||||
windowMinTimestamp, windowMaxTimeStamp, windowsMinIndex, _ := targetsWindow.minMaxTimestamps()
|
||||
|
||||
windowMinTimestamp, windowMaxTimeStamp, windowMinIndex := targetsWindow.minMaxTimestamps()
|
||||
// Remove the last block from the window so to calculate the average target of dag.difficultyAdjustmentWindowSize blocks
|
||||
targetsWindow.remove(windowsMinIndex)
|
||||
targetsWindow.remove(windowMinIndex)
|
||||
|
||||
// Calculate new target difficulty as:
|
||||
// averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize))
|
||||
|
||||
@@ -35,7 +35,7 @@ func (dm *difficultyManager) estimateNetworkHashesPerSecond(stagingArea *model.S
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
minWindowTimestamp, maxWindowTimestamp, _, _ := blockWindow.minMaxTimestamps()
|
||||
minWindowTimestamp, maxWindowTimestamp, _ := blockWindow.minMaxTimestamps()
|
||||
if minWindowTimestamp == maxWindowTimestamp {
|
||||
return 0, errors.Errorf("min window timestamp is equal to the max window timestamp")
|
||||
}
|
||||
|
||||
@@ -3,17 +3,18 @@ package parentssanager
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
)
|
||||
|
||||
type parentsManager struct {
|
||||
genesisHash *externalapi.DomainHash
|
||||
genesisHash *externalapi.DomainHash
|
||||
maxBlockLevel int
|
||||
}
|
||||
|
||||
// New instantiates a new ParentsManager
|
||||
func New(genesisHash *externalapi.DomainHash) model.ParentsManager {
|
||||
func New(genesisHash *externalapi.DomainHash, maxBlockLevel int) model.ParentsManager {
|
||||
return &parentsManager{
|
||||
genesisHash: genesisHash,
|
||||
genesisHash: genesisHash,
|
||||
maxBlockLevel: maxBlockLevel,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,7 +32,7 @@ func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, le
|
||||
}
|
||||
|
||||
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {
|
||||
numParents := constants.MaxBlockLevel + 1
|
||||
numParents := pm.maxBlockLevel + 1
|
||||
parents := make([]externalapi.BlockLevelParents, numParents)
|
||||
for i := 0; i < numParents; i++ {
|
||||
parents[i] = pm.ParentsAtLevel(blockHeader, i)
|
||||
|
||||
@@ -38,8 +38,8 @@ func TestPruning(t *testing.T) {
|
||||
"dag-for-test-pruning.json": {
|
||||
dagconfig.MainnetParams.Name: "503",
|
||||
dagconfig.TestnetParams.Name: "502",
|
||||
dagconfig.DevnetParams.Name: "503",
|
||||
dagconfig.SimnetParams.Name: "502",
|
||||
dagconfig.DevnetParams.Name: "502",
|
||||
dagconfig.SimnetParams.Name: "503",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -962,71 +962,6 @@ func (pm *pruningManager) PruningPointAndItsAnticone() ([]*externalapi.DomainHas
|
||||
return append([]*externalapi.DomainHash{pruningPoint}, pruningPointAnticone...), nil
|
||||
}
|
||||
|
||||
func (pm *pruningManager) BlockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
|
||||
block, err := pm.blocksStore.Block(pm.databaseContext, stagingArea, blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
windowSize := pm.difficultyAdjustmentWindowSize
|
||||
window, err := pm.dagTraversalManager.BlockWindowWithGHOSTDAGData(stagingArea, blockHash, windowSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
windowPairs := make([]*externalapi.TrustedDataDataDAAHeader, len(window))
|
||||
for i, daaBlock := range window {
|
||||
daaDomainBlock, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, daaBlock.Hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
windowPairs[i] = &externalapi.TrustedDataDataDAAHeader{
|
||||
Header: daaDomainBlock,
|
||||
GHOSTDAGData: daaBlock.GHOSTDAGData,
|
||||
}
|
||||
}
|
||||
|
||||
ghostdagDataHashPairs := make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, pm.k)
|
||||
current := blockHash
|
||||
isTrustedData := false
|
||||
for i := externalapi.KType(0); i < pm.k+1; i++ {
|
||||
ghostdagData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, current, isTrustedData)
|
||||
isNotFoundError := database.IsNotFoundError(err)
|
||||
if !isNotFoundError && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isNotFoundError || ghostdagData.SelectedParent().Equal(model.VirtualGenesisBlockHash) {
|
||||
isTrustedData = true
|
||||
ghostdagData, err = pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, current, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ghostdagDataHashPairs = append(ghostdagDataHashPairs, &externalapi.BlockGHOSTDAGDataHashPair{
|
||||
Hash: current,
|
||||
GHOSTDAGData: ghostdagData,
|
||||
})
|
||||
|
||||
if ghostdagData.SelectedParent().Equal(pm.genesisHash) {
|
||||
break
|
||||
}
|
||||
|
||||
if current.Equal(pm.genesisHash) {
|
||||
break
|
||||
}
|
||||
|
||||
current = ghostdagData.SelectedParent()
|
||||
}
|
||||
|
||||
return &externalapi.BlockWithTrustedData{
|
||||
Block: block,
|
||||
DAAWindow: windowPairs,
|
||||
GHOSTDAGData: ghostdagDataHashPairs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (pm *pruningManager) ExpectedHeaderPruningPoint(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
|
||||
ghostdagData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, blockHash, false)
|
||||
if err != nil {
|
||||
@@ -1060,7 +995,13 @@ func (pm *pruningManager) ExpectedHeaderPruningPoint(stagingArea *model.StagingA
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hasPruningPointInItsSelectedChain && pm.finalityScore(ghostdagData.BlueScore()) > pm.finalityScore(selectedParentPruningPointHeader.BlueScore()+pm.pruningDepth) {
|
||||
// Note: the pruning point from the POV of the current block is the first block in its chain that is in depth of pm.pruningDepth and
|
||||
// its finality score is greater than the previous pruning point. This is why the diff between finalityScore(selectedParent.blueScore + 1) * finalityInterval
|
||||
// and the current block blue score is less than pm.pruningDepth we can know for sure that this block didn't trigger a pruning point change.
|
||||
minRequiredBlueScoreForNextPruningPoint := (pm.finalityScore(selectedParentPruningPointHeader.BlueScore()) + 1) * pm.finalityInterval
|
||||
|
||||
if hasPruningPointInItsSelectedChain &&
|
||||
minRequiredBlueScoreForNextPruningPoint+pm.pruningDepth <= ghostdagData.BlueScore() {
|
||||
var suggestedLowHash *externalapi.DomainHash
|
||||
hasReachabilityData, err := pm.reachabilityDataStore.HasReachabilityData(pm.databaseContext, stagingArea, selectedParentHeader.PruningPoint())
|
||||
if err != nil {
|
||||
|
||||
@@ -13,10 +13,10 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/reachabilitymanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
"github.com/pkg/errors"
|
||||
"math/big"
|
||||
)
|
||||
@@ -40,6 +40,7 @@ type pruningProofManager struct {
|
||||
genesisHash *externalapi.DomainHash
|
||||
k externalapi.KType
|
||||
pruningProofM uint64
|
||||
maxBlockLevel int
|
||||
|
||||
cachedPruningPoint *externalapi.DomainHash
|
||||
cachedProof *externalapi.PruningPointProof
|
||||
@@ -65,6 +66,7 @@ func New(
|
||||
genesisHash *externalapi.DomainHash,
|
||||
k externalapi.KType,
|
||||
pruningProofM uint64,
|
||||
maxBlockLevel int,
|
||||
) model.PruningProofManager {
|
||||
|
||||
return &pruningProofManager{
|
||||
@@ -85,6 +87,7 @@ func New(
|
||||
genesisHash: genesisHash,
|
||||
k: k,
|
||||
pruningProofM: pruningProofM,
|
||||
maxBlockLevel: maxBlockLevel,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,7 +136,7 @@ func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.Stagin
|
||||
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
|
||||
headersByLevel := make(map[int][]externalapi.BlockHeader)
|
||||
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
|
||||
pruningPointLevel := pruningPointHeader.BlockLevel()
|
||||
pruningPointLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel)
|
||||
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
|
||||
var selectedTip *externalapi.DomainHash
|
||||
if blockLevel <= pruningPointLevel {
|
||||
@@ -309,7 +312,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
|
||||
level0Headers := pruningPointProof.Headers[0]
|
||||
pruningPointHeader := level0Headers[len(level0Headers)-1]
|
||||
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
|
||||
pruningPointBlockLevel := pruningPointHeader.BlockLevel()
|
||||
pruningPointBlockLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel)
|
||||
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
|
||||
if maxLevel >= len(pruningPointProof.Headers) {
|
||||
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
|
||||
@@ -346,15 +349,16 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
|
||||
|
||||
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
|
||||
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
|
||||
log.Infof("Validating level %d from the pruning point proof", blockLevel)
|
||||
headers := make([]externalapi.BlockHeader, len(pruningPointProof.Headers[blockLevel]))
|
||||
copy(headers, pruningPointProof.Headers[blockLevel])
|
||||
|
||||
var selectedTip *externalapi.DomainHash
|
||||
for i, header := range headers {
|
||||
blockHash := consensushashing.HeaderHash(header)
|
||||
if header.BlockLevel() < blockLevel {
|
||||
if header.BlockLevel(ppm.maxBlockLevel) < blockLevel {
|
||||
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
||||
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
|
||||
"expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel)
|
||||
}
|
||||
|
||||
blockHeaderStore.Stage(stagingArea, blockHash, header)
|
||||
@@ -579,9 +583,9 @@ func (ppm *pruningProofManager) dagProcesses(
|
||||
[]model.GHOSTDAGManager,
|
||||
) {
|
||||
|
||||
reachabilityManagers := make([]model.ReachabilityManager, constants.MaxBlockLevel+1)
|
||||
dagTopologyManagers := make([]model.DAGTopologyManager, constants.MaxBlockLevel+1)
|
||||
ghostdagManagers := make([]model.GHOSTDAGManager, constants.MaxBlockLevel+1)
|
||||
reachabilityManagers := make([]model.ReachabilityManager, ppm.maxBlockLevel+1)
|
||||
dagTopologyManagers := make([]model.DAGTopologyManager, ppm.maxBlockLevel+1)
|
||||
ghostdagManagers := make([]model.GHOSTDAGManager, ppm.maxBlockLevel+1)
|
||||
|
||||
for i := 0; i <= maxLevel; i++ {
|
||||
reachabilityManagers[i] = reachabilitymanager.New(
|
||||
@@ -607,17 +611,27 @@ func (ppm *pruningProofManager) dagProcesses(
|
||||
return reachabilityManagers, dagTopologyManagers, ghostdagManagers
|
||||
}
|
||||
|
||||
func (ppm *pruningProofManager) ApplyPruningPointProof(stagingArea *model.StagingArea, pruningPointProof *externalapi.PruningPointProof) error {
|
||||
// ApplyPruningPointProof applies the given pruning proof to the current consensus. Specifically,
|
||||
// it's meant to be used against the StagingConsensus during headers-proof IBD. Note that for
|
||||
// performance reasons this operation is NOT atomic. If the process fails for whatever reason
|
||||
// (e.g. the process was killed) then the database for this consensus MUST be discarded.
|
||||
func (ppm *pruningProofManager) ApplyPruningPointProof(pruningPointProof *externalapi.PruningPointProof) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "ApplyPruningPointProof")
|
||||
defer onEnd()
|
||||
|
||||
for blockLevel, headers := range pruningPointProof.Headers {
|
||||
log.Infof("Applying level %d from the pruning point proof", blockLevel)
|
||||
var selectedTip *externalapi.DomainHash
|
||||
for i, header := range headers {
|
||||
if i%1000 == 0 {
|
||||
log.Infof("Applying level %d from the pruning point proof - applied %d headers out of %d", blockLevel, i, len(headers))
|
||||
}
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
blockHash := consensushashing.HeaderHash(header)
|
||||
if header.BlockLevel() < blockLevel {
|
||||
if header.BlockLevel(ppm.maxBlockLevel) < blockLevel {
|
||||
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
||||
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
|
||||
"expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel)
|
||||
}
|
||||
|
||||
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)
|
||||
@@ -693,11 +707,18 @@ func (ppm *pruningProofManager) ApplyPruningPointProof(stagingArea *model.Stagin
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = staging.CommitAllChanges(ppm.databaseContext, stagingArea)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pruningPointHeader := pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]
|
||||
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
ppm.consensusStateStore.StageTips(stagingArea, []*externalapi.DomainHash{pruningPoint})
|
||||
return nil
|
||||
return staging.CommitAllChanges(ppm.databaseContext, stagingArea)
|
||||
}
|
||||
|
||||
@@ -48,16 +48,12 @@ func (v *transactionValidator) IsFinalizedTransaction(tx *externalapi.DomainTran
|
||||
|
||||
// ValidateTransactionInContextIgnoringUTXO validates the transaction with consensus context but ignoring UTXO
|
||||
func (v *transactionValidator) ValidateTransactionInContextIgnoringUTXO(stagingArea *model.StagingArea, tx *externalapi.DomainTransaction,
|
||||
povBlockHash *externalapi.DomainHash) error {
|
||||
povBlockHash *externalapi.DomainHash, povBlockPastMedianTime int64) error {
|
||||
|
||||
povBlockDAAScore, err := v.daaBlocksStore.DAAScore(v.databaseContext, stagingArea, povBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
povBlockPastMedianTime, err := v.pastMedianTimeManager.PastMedianTime(stagingArea, povBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isFinalized := v.IsFinalizedTransaction(tx, povBlockDAAScore, povBlockPastMedianTime); !isFinalized {
|
||||
return errors.Wrapf(ruleerrors.ErrUnfinalizedTx, "unfinalized transaction %v", tx)
|
||||
}
|
||||
|
||||
@@ -179,9 +179,9 @@ func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader {
|
||||
return bh.clone()
|
||||
}
|
||||
|
||||
func (bh *blockHeader) BlockLevel() int {
|
||||
func (bh *blockHeader) BlockLevel(maxBlockLevel int) int {
|
||||
if !bh.isBlockLevelCached {
|
||||
bh.blockLevel = pow.BlockLevel(bh)
|
||||
bh.blockLevel = pow.BlockLevel(bh, maxBlockLevel)
|
||||
bh.isBlockLevelCached = true
|
||||
}
|
||||
|
||||
|
||||
@@ -35,9 +35,4 @@ const (
|
||||
// LockTimeThreshold is the number below which a lock time is
|
||||
// interpreted to be a DAA score.
|
||||
LockTimeThreshold = 5e11 // Tue Nov 5 00:53:20 1985 UTC
|
||||
|
||||
// MaxBlockLevel is the maximum possible block level.
|
||||
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
|
||||
// This means that any block that has a level lower or equal to genesis will be level 0.
|
||||
MaxBlockLevel = 225
|
||||
)
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
package lrucachehashandwindowsizetoblockghostdagdatahashpairs
|
||||
|
||||
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
type lruKey struct {
|
||||
blockHash externalapi.DomainHash
|
||||
windowSize int
|
||||
}
|
||||
|
||||
func newKey(blockHash *externalapi.DomainHash, windowSize int) lruKey {
|
||||
return lruKey{
|
||||
blockHash: *blockHash,
|
||||
windowSize: windowSize,
|
||||
}
|
||||
}
|
||||
|
||||
// LRUCache is a least-recently-used cache from
|
||||
// lruKey to *externalapi.BlockGHOSTDAGDataHashPair
|
||||
type LRUCache struct {
|
||||
cache map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair
|
||||
capacity int
|
||||
}
|
||||
|
||||
// New creates a new LRUCache
|
||||
func New(capacity int, preallocate bool) *LRUCache {
|
||||
var cache map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair
|
||||
if preallocate {
|
||||
cache = make(map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair, capacity+1)
|
||||
} else {
|
||||
cache = make(map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair)
|
||||
}
|
||||
return &LRUCache{
|
||||
cache: cache,
|
||||
capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds an entry to the LRUCache
|
||||
func (c *LRUCache) Add(blockHash *externalapi.DomainHash, windowSize int, value []*externalapi.BlockGHOSTDAGDataHashPair) {
|
||||
key := newKey(blockHash, windowSize)
|
||||
c.cache[key] = value
|
||||
|
||||
if len(c.cache) > c.capacity {
|
||||
c.evictRandom()
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the entry for the given key, or (nil, false) otherwise
|
||||
func (c *LRUCache) Get(blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, bool) {
|
||||
key := newKey(blockHash, windowSize)
|
||||
value, ok := c.cache[key]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
|
||||
// Has returns whether the LRUCache contains the given key
|
||||
func (c *LRUCache) Has(blockHash *externalapi.DomainHash, windowSize int) bool {
|
||||
key := newKey(blockHash, windowSize)
|
||||
_, ok := c.cache[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Remove removes the entry for the the given key. Does nothing if
|
||||
// the entry does not exist
|
||||
func (c *LRUCache) Remove(blockHash *externalapi.DomainHash, windowSize int) {
|
||||
key := newKey(blockHash, windowSize)
|
||||
delete(c.cache, key)
|
||||
}
|
||||
|
||||
func (c *LRUCache) evictRandom() {
|
||||
var keyToEvict lruKey
|
||||
for key := range c.cache {
|
||||
keyToEvict = key
|
||||
break
|
||||
}
|
||||
c.Remove(&keyToEvict.blockHash, keyToEvict.windowSize)
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package pow
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/serialization"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
@@ -96,15 +95,15 @@ func toBig(hash *externalapi.DomainHash) *big.Int {
|
||||
}
|
||||
|
||||
// BlockLevel returns the block level of the given header.
|
||||
func BlockLevel(header externalapi.BlockHeader) int {
|
||||
func BlockLevel(header externalapi.BlockHeader, maxBlockLevel int) int {
|
||||
// Genesis is defined to be the root of all blocks at all levels, so we define it to be the maximal
|
||||
// block level.
|
||||
if len(header.DirectParents()) == 0 {
|
||||
return constants.MaxBlockLevel
|
||||
return maxBlockLevel
|
||||
}
|
||||
|
||||
proofOfWorkValue := NewState(header.ToMutable()).CalculateProofOfWorkValue()
|
||||
level := constants.MaxBlockLevel - proofOfWorkValue.BitLen()
|
||||
level := maxBlockLevel - proofOfWorkValue.BitLen()
|
||||
// If the block has a level lower than genesis make it zero.
|
||||
if level < 0 {
|
||||
level = 0
|
||||
|
||||
@@ -185,6 +185,9 @@ type Params struct {
|
||||
DisallowDirectBlocksOnTopOfGenesis bool
|
||||
|
||||
IgnoreHeaderMass bool
|
||||
|
||||
// MaxBlockLevel is the maximum possible block level.
|
||||
MaxBlockLevel int
|
||||
}
|
||||
|
||||
// NormalizeRPCServerAddress returns addr with the current network default
|
||||
@@ -279,16 +282,20 @@ var MainnetParams = Params{
|
||||
PruningProofM: defaultPruningProofM,
|
||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||
DisallowDirectBlocksOnTopOfGenesis: true,
|
||||
|
||||
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
|
||||
// This means that any block that has a level lower or equal to genesis will be level 0.
|
||||
MaxBlockLevel: 225,
|
||||
}
|
||||
|
||||
// TestnetParams defines the network parameters for the test Kaspa network.
|
||||
var TestnetParams = Params{
|
||||
K: defaultGHOSTDAGK,
|
||||
Name: "kaspa-testnet-8",
|
||||
Name: "kaspa-testnet-9",
|
||||
Net: appmessage.Testnet,
|
||||
RPCPort: "16210",
|
||||
DefaultPort: "16211",
|
||||
DNSSeeds: []string{"testnet-8-dnsseed.daglabs-dev.com"},
|
||||
DNSSeeds: []string{"testnet-9-dnsseed.daglabs-dev.com"},
|
||||
|
||||
// DAG parameters
|
||||
GenesisBlock: &testnetGenesisBlock,
|
||||
@@ -339,6 +346,8 @@ var TestnetParams = Params{
|
||||
PruningProofM: defaultPruningProofM,
|
||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||
IgnoreHeaderMass: true,
|
||||
|
||||
MaxBlockLevel: 250,
|
||||
}
|
||||
|
||||
// SimnetParams defines the network parameters for the simulation test Kaspa
|
||||
@@ -402,6 +411,8 @@ var SimnetParams = Params{
|
||||
CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength,
|
||||
PruningProofM: defaultPruningProofM,
|
||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||
|
||||
MaxBlockLevel: 250,
|
||||
}
|
||||
|
||||
// DevnetParams defines the network parameters for the development Kaspa network.
|
||||
@@ -462,6 +473,8 @@ var DevnetParams = Params{
|
||||
PruningProofM: defaultPruningProofM,
|
||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||
IgnoreHeaderMass: true,
|
||||
|
||||
MaxBlockLevel: 250,
|
||||
}
|
||||
|
||||
// ErrDuplicateNet describes an error where the parameters for a Kaspa
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func (mp *mempool) fillInputsAndGetMissingParents(transaction *externalapi.DomainTransaction) (
|
||||
parents model.OutpointToTransactionMap, missingOutpoints []*externalapi.DomainOutpoint, err error) {
|
||||
parents model.IDToTransactionMap, missingOutpoints []*externalapi.DomainOutpoint, err error) {
|
||||
|
||||
parentsInPool := mp.transactionsPool.getParentTransactionsInPool(transaction)
|
||||
|
||||
@@ -34,9 +34,9 @@ func (mp *mempool) fillInputsAndGetMissingParents(transaction *externalapi.Domai
|
||||
return parentsInPool, nil, nil
|
||||
}
|
||||
|
||||
func fillInputs(transaction *externalapi.DomainTransaction, parentsInPool model.OutpointToTransactionMap) {
|
||||
func fillInputs(transaction *externalapi.DomainTransaction, parentsInPool model.IDToTransactionMap) {
|
||||
for _, input := range transaction.Inputs {
|
||||
parent, ok := parentsInPool[input.PreviousOutpoint]
|
||||
parent, ok := parentsInPool[input.PreviousOutpoint.TransactionID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -7,6 +7,9 @@ import (
|
||||
// IDToTransactionMap maps transactionID to a MempoolTransaction
|
||||
type IDToTransactionMap map[externalapi.DomainTransactionID]*MempoolTransaction
|
||||
|
||||
// IDToTransactionsSliceMap maps transactionID to a slice MempoolTransaction
|
||||
type IDToTransactionsSliceMap map[externalapi.DomainTransactionID][]*MempoolTransaction
|
||||
|
||||
// OutpointToUTXOEntryMap maps an outpoint to a UTXOEntry
|
||||
type OutpointToUTXOEntryMap map[externalapi.DomainOutpoint]externalapi.UTXOEntry
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
// MempoolTransaction represents a transaction inside the main TransactionPool
|
||||
type MempoolTransaction struct {
|
||||
transaction *externalapi.DomainTransaction
|
||||
parentTransactionsInPool OutpointToTransactionMap
|
||||
parentTransactionsInPool IDToTransactionMap
|
||||
isHighPriority bool
|
||||
addedAtDAAScore uint64
|
||||
}
|
||||
@@ -16,7 +16,7 @@ type MempoolTransaction struct {
|
||||
// NewMempoolTransaction constructs a new MempoolTransaction
|
||||
func NewMempoolTransaction(
|
||||
transaction *externalapi.DomainTransaction,
|
||||
parentTransactionsInPool OutpointToTransactionMap,
|
||||
parentTransactionsInPool IDToTransactionMap,
|
||||
isHighPriority bool,
|
||||
addedAtDAAScore uint64,
|
||||
) *MempoolTransaction {
|
||||
@@ -39,10 +39,15 @@ func (mt *MempoolTransaction) Transaction() *externalapi.DomainTransaction {
|
||||
}
|
||||
|
||||
// ParentTransactionsInPool a list of parent transactions that exist in the mempool, indexed by outpoint
|
||||
func (mt *MempoolTransaction) ParentTransactionsInPool() OutpointToTransactionMap {
|
||||
func (mt *MempoolTransaction) ParentTransactionsInPool() IDToTransactionMap {
|
||||
return mt.parentTransactionsInPool
|
||||
}
|
||||
|
||||
// RemoveParentTransactionInPool deletes a transaction from the parentTransactionsInPool set
|
||||
func (mt *MempoolTransaction) RemoveParentTransactionInPool(transactionID *externalapi.DomainTransactionID) {
|
||||
delete(mt.parentTransactionsInPool, *transactionID)
|
||||
}
|
||||
|
||||
// IsHighPriority returns whether this MempoolTransaction is a high-priority one
|
||||
func (mt *MempoolTransaction) IsHighPriority() bool {
|
||||
return mt.isHighPriority
|
||||
|
||||
@@ -27,9 +27,13 @@ func (mp *mempool) removeTransaction(transactionID *externalapi.DomainTransactio
|
||||
}
|
||||
|
||||
transactionsToRemove := []*model.MempoolTransaction{mempoolTransaction}
|
||||
redeemers := mp.transactionsPool.getRedeemers(mempoolTransaction)
|
||||
if removeRedeemers {
|
||||
redeemers := mp.transactionsPool.getRedeemers(mempoolTransaction)
|
||||
transactionsToRemove = append(transactionsToRemove, redeemers...)
|
||||
} else {
|
||||
for _, redeemer := range redeemers {
|
||||
redeemer.RemoveParentTransactionInPool(transactionID)
|
||||
}
|
||||
}
|
||||
|
||||
for _, transactionToRemove := range transactionsToRemove {
|
||||
|
||||
@@ -1,35 +1,36 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool/model"
|
||||
"time"
|
||||
)
|
||||
|
||||
type transactionsPool struct {
|
||||
mempool *mempool
|
||||
allTransactions model.IDToTransactionMap
|
||||
highPriorityTransactions model.IDToTransactionMap
|
||||
chainedTransactionsByPreviousOutpoint model.OutpointToTransactionMap
|
||||
transactionsOrderedByFeeRate model.TransactionsOrderedByFeeRate
|
||||
lastExpireScanDAAScore uint64
|
||||
lastExpireScanTime time.Time
|
||||
mempool *mempool
|
||||
allTransactions model.IDToTransactionMap
|
||||
highPriorityTransactions model.IDToTransactionMap
|
||||
chainedTransactionsByParentID model.IDToTransactionsSliceMap
|
||||
transactionsOrderedByFeeRate model.TransactionsOrderedByFeeRate
|
||||
lastExpireScanDAAScore uint64
|
||||
lastExpireScanTime time.Time
|
||||
}
|
||||
|
||||
func newTransactionsPool(mp *mempool) *transactionsPool {
|
||||
return &transactionsPool{
|
||||
mempool: mp,
|
||||
allTransactions: model.IDToTransactionMap{},
|
||||
highPriorityTransactions: model.IDToTransactionMap{},
|
||||
chainedTransactionsByPreviousOutpoint: model.OutpointToTransactionMap{},
|
||||
transactionsOrderedByFeeRate: model.TransactionsOrderedByFeeRate{},
|
||||
lastExpireScanDAAScore: 0,
|
||||
lastExpireScanTime: time.Now(),
|
||||
mempool: mp,
|
||||
allTransactions: model.IDToTransactionMap{},
|
||||
highPriorityTransactions: model.IDToTransactionMap{},
|
||||
chainedTransactionsByParentID: model.IDToTransactionsSliceMap{},
|
||||
transactionsOrderedByFeeRate: model.TransactionsOrderedByFeeRate{},
|
||||
lastExpireScanDAAScore: 0,
|
||||
lastExpireScanTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *transactionsPool) addTransaction(transaction *externalapi.DomainTransaction,
|
||||
parentTransactionsInPool model.OutpointToTransactionMap, isHighPriority bool) (*model.MempoolTransaction, error) {
|
||||
parentTransactionsInPool model.IDToTransactionMap, isHighPriority bool) (*model.MempoolTransaction, error) {
|
||||
|
||||
virtualDAAScore, err := tp.mempool.consensusReference.Consensus().GetVirtualDAAScore()
|
||||
if err != nil {
|
||||
@@ -50,8 +51,13 @@ func (tp *transactionsPool) addTransaction(transaction *externalapi.DomainTransa
|
||||
func (tp *transactionsPool) addMempoolTransaction(transaction *model.MempoolTransaction) error {
|
||||
tp.allTransactions[*transaction.TransactionID()] = transaction
|
||||
|
||||
for outpoint, parentTransactionInPool := range transaction.ParentTransactionsInPool() {
|
||||
tp.chainedTransactionsByPreviousOutpoint[outpoint] = parentTransactionInPool
|
||||
for _, parentTransactionInPool := range transaction.ParentTransactionsInPool() {
|
||||
parentTransactionID := *parentTransactionInPool.TransactionID()
|
||||
if tp.chainedTransactionsByParentID[parentTransactionID] == nil {
|
||||
tp.chainedTransactionsByParentID[parentTransactionID] = []*model.MempoolTransaction{}
|
||||
}
|
||||
tp.chainedTransactionsByParentID[parentTransactionID] =
|
||||
append(tp.chainedTransactionsByParentID[parentTransactionID], transaction)
|
||||
}
|
||||
|
||||
tp.mempool.mempoolUTXOSet.addTransaction(transaction)
|
||||
@@ -78,9 +84,7 @@ func (tp *transactionsPool) removeTransaction(transaction *model.MempoolTransact
|
||||
|
||||
delete(tp.highPriorityTransactions, *transaction.TransactionID())
|
||||
|
||||
for outpoint := range transaction.ParentTransactionsInPool() {
|
||||
delete(tp.chainedTransactionsByPreviousOutpoint, outpoint)
|
||||
}
|
||||
delete(tp.chainedTransactionsByParentID, *transaction.TransactionID())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -132,13 +136,13 @@ func (tp *transactionsPool) allReadyTransactions() []*externalapi.DomainTransact
|
||||
}
|
||||
|
||||
func (tp *transactionsPool) getParentTransactionsInPool(
|
||||
transaction *externalapi.DomainTransaction) model.OutpointToTransactionMap {
|
||||
transaction *externalapi.DomainTransaction) model.IDToTransactionMap {
|
||||
|
||||
parentsTransactionsInPool := model.OutpointToTransactionMap{}
|
||||
parentsTransactionsInPool := model.IDToTransactionMap{}
|
||||
|
||||
for _, input := range transaction.Inputs {
|
||||
if transaction, ok := tp.allTransactions[input.PreviousOutpoint.TransactionID]; ok {
|
||||
parentsTransactionsInPool[input.PreviousOutpoint] = transaction
|
||||
parentsTransactionsInPool[*transaction.TransactionID()] = transaction
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,13 +157,9 @@ func (tp *transactionsPool) getRedeemers(transaction *model.MempoolTransaction)
|
||||
last := len(stack) - 1
|
||||
current, stack = stack[last], stack[:last]
|
||||
|
||||
outpoint := externalapi.DomainOutpoint{TransactionID: *current.TransactionID()}
|
||||
for i := range current.Transaction().Outputs {
|
||||
outpoint.Index = uint32(i)
|
||||
if redeemerTransaction, ok := tp.chainedTransactionsByPreviousOutpoint[outpoint]; ok {
|
||||
stack = append(stack, redeemerTransaction)
|
||||
redeemers = append(redeemers, redeemerTransaction)
|
||||
}
|
||||
for _, redeemerTransaction := range tp.chainedTransactionsByParentID[*current.TransactionID()] {
|
||||
stack = append(stack, redeemerTransaction)
|
||||
redeemers = append(redeemers, redeemerTransaction)
|
||||
}
|
||||
}
|
||||
return redeemers
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
const (
|
||||
// DefaultMaxMessages is the default capacity for a route with a capacity defined
|
||||
DefaultMaxMessages = 100
|
||||
DefaultMaxMessages = 1000
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -148,6 +148,8 @@ type KaspadMessage struct {
|
||||
// *KaspadMessage_VirtualDaaScoreChangedNotification
|
||||
// *KaspadMessage_GetBalanceByAddressRequest
|
||||
// *KaspadMessage_GetBalanceByAddressResponse
|
||||
// *KaspadMessage_GetBalancesByAddressesRequest
|
||||
// *KaspadMessage_GetBalancesByAddressesResponse
|
||||
Payload isKaspadMessage_Payload `protobuf_oneof:"payload"`
|
||||
}
|
||||
|
||||
@@ -1009,6 +1011,20 @@ func (x *KaspadMessage) GetGetBalanceByAddressResponse() *GetBalanceByAddressRes
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *KaspadMessage) GetGetBalancesByAddressesRequest() *GetBalancesByAddressesRequestMessage {
|
||||
if x, ok := x.GetPayload().(*KaspadMessage_GetBalancesByAddressesRequest); ok {
|
||||
return x.GetBalancesByAddressesRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *KaspadMessage) GetGetBalancesByAddressesResponse() *GetBalancesByAddressesResponseMessage {
|
||||
if x, ok := x.GetPayload().(*KaspadMessage_GetBalancesByAddressesResponse); ok {
|
||||
return x.GetBalancesByAddressesResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type isKaspadMessage_Payload interface {
|
||||
isKaspadMessage_Payload()
|
||||
}
|
||||
@@ -1481,6 +1497,14 @@ type KaspadMessage_GetBalanceByAddressResponse struct {
|
||||
GetBalanceByAddressResponse *GetBalanceByAddressResponseMessage `protobuf:"bytes,1078,opt,name=getBalanceByAddressResponse,proto3,oneof"`
|
||||
}
|
||||
|
||||
type KaspadMessage_GetBalancesByAddressesRequest struct {
|
||||
GetBalancesByAddressesRequest *GetBalancesByAddressesRequestMessage `protobuf:"bytes,1079,opt,name=getBalancesByAddressesRequest,proto3,oneof"`
|
||||
}
|
||||
|
||||
type KaspadMessage_GetBalancesByAddressesResponse struct {
|
||||
GetBalancesByAddressesResponse *GetBalancesByAddressesResponseMessage `protobuf:"bytes,1080,opt,name=getBalancesByAddressesResponse,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*KaspadMessage_Addresses) isKaspadMessage_Payload() {}
|
||||
|
||||
func (*KaspadMessage_Block) isKaspadMessage_Payload() {}
|
||||
@@ -1715,13 +1739,17 @@ func (*KaspadMessage_GetBalanceByAddressRequest) isKaspadMessage_Payload() {}
|
||||
|
||||
func (*KaspadMessage_GetBalanceByAddressResponse) isKaspadMessage_Payload() {}
|
||||
|
||||
func (*KaspadMessage_GetBalancesByAddressesRequest) isKaspadMessage_Payload() {}
|
||||
|
||||
func (*KaspadMessage_GetBalancesByAddressesResponse) isKaspadMessage_Payload() {}
|
||||
|
||||
var File_messages_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_messages_proto_rawDesc = []byte{
|
||||
0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x12, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x1a, 0x09, 0x70, 0x32, 0x70,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x09, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x22, 0xb8, 0x61, 0x0a, 0x0d, 0x4b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x4d, 0x65, 0x73, 0x73,
|
||||
0x6f, 0x22, 0xaf, 0x63, 0x0a, 0x0d, 0x4b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x4d, 0x65, 0x73, 0x73,
|
||||
0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69,
|
||||
0x72, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73,
|
||||
@@ -2500,20 +2528,36 @@ var file_messages_proto_rawDesc = []byte{
|
||||
0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61,
|
||||
0x67, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x67, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65,
|
||||
0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x32, 0x50, 0x0a, 0x03,
|
||||
0x50, 0x32, 0x50, 0x12, 0x49, 0x0a, 0x0d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74,
|
||||
0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65,
|
||||
0x2e, 0x4b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x18,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4b, 0x61, 0x73, 0x70, 0x61,
|
||||
0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x32, 0x50,
|
||||
0x0a, 0x03, 0x52, 0x50, 0x43, 0x12, 0x49, 0x0a, 0x0d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69,
|
||||
0x72, 0x65, 0x2e, 0x4b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x1a, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4b, 0x61, 0x73,
|
||||
0x70, 0x61, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01,
|
||||
0x42, 0x26, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b,
|
||||
0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x2f, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x65, 0x12, 0x78, 0x0a, 0x1d, 0x67, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73,
|
||||
0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x18, 0xb7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65,
|
||||
0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1d, 0x67, 0x65,
|
||||
0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65,
|
||||
0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x7b, 0x0a, 0x1e, 0x67,
|
||||
0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72,
|
||||
0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xb8, 0x08,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64,
|
||||
0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1e, 0x67, 0x65, 0x74, 0x42, 0x61, 0x6c,
|
||||
0x61, 0x6e, 0x63, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c,
|
||||
0x6f, 0x61, 0x64, 0x32, 0x50, 0x0a, 0x03, 0x50, 0x32, 0x50, 0x12, 0x49, 0x0a, 0x0d, 0x4d, 0x65,
|
||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x4d, 0x65,
|
||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72,
|
||||
0x65, 0x2e, 0x4b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22,
|
||||
0x00, 0x28, 0x01, 0x30, 0x01, 0x32, 0x50, 0x0a, 0x03, 0x52, 0x50, 0x43, 0x12, 0x49, 0x0a, 0x0d,
|
||||
0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4b, 0x61, 0x73, 0x70, 0x61, 0x64,
|
||||
0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77,
|
||||
0x69, 0x72, 0x65, 0x2e, 0x4b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||
0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x26, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b,
|
||||
0x61, 0x73, 0x70, 0x61, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -2647,6 +2691,8 @@ var file_messages_proto_goTypes = []interface{}{
|
||||
(*VirtualDaaScoreChangedNotificationMessage)(nil), // 114: protowire.VirtualDaaScoreChangedNotificationMessage
|
||||
(*GetBalanceByAddressRequestMessage)(nil), // 115: protowire.GetBalanceByAddressRequestMessage
|
||||
(*GetBalanceByAddressResponseMessage)(nil), // 116: protowire.GetBalanceByAddressResponseMessage
|
||||
(*GetBalancesByAddressesRequestMessage)(nil), // 117: protowire.GetBalancesByAddressesRequestMessage
|
||||
(*GetBalancesByAddressesResponseMessage)(nil), // 118: protowire.GetBalancesByAddressesResponseMessage
|
||||
}
|
||||
var file_messages_proto_depIdxs = []int32{
|
||||
1, // 0: protowire.KaspadMessage.addresses:type_name -> protowire.AddressesMessage
|
||||
@@ -2766,15 +2812,17 @@ var file_messages_proto_depIdxs = []int32{
|
||||
114, // 114: protowire.KaspadMessage.virtualDaaScoreChangedNotification:type_name -> protowire.VirtualDaaScoreChangedNotificationMessage
|
||||
115, // 115: protowire.KaspadMessage.getBalanceByAddressRequest:type_name -> protowire.GetBalanceByAddressRequestMessage
|
||||
116, // 116: protowire.KaspadMessage.getBalanceByAddressResponse:type_name -> protowire.GetBalanceByAddressResponseMessage
|
||||
0, // 117: protowire.P2P.MessageStream:input_type -> protowire.KaspadMessage
|
||||
0, // 118: protowire.RPC.MessageStream:input_type -> protowire.KaspadMessage
|
||||
0, // 119: protowire.P2P.MessageStream:output_type -> protowire.KaspadMessage
|
||||
0, // 120: protowire.RPC.MessageStream:output_type -> protowire.KaspadMessage
|
||||
119, // [119:121] is the sub-list for method output_type
|
||||
117, // [117:119] is the sub-list for method input_type
|
||||
117, // [117:117] is the sub-list for extension type_name
|
||||
117, // [117:117] is the sub-list for extension extendee
|
||||
0, // [0:117] is the sub-list for field type_name
|
||||
117, // 117: protowire.KaspadMessage.getBalancesByAddressesRequest:type_name -> protowire.GetBalancesByAddressesRequestMessage
|
||||
118, // 118: protowire.KaspadMessage.getBalancesByAddressesResponse:type_name -> protowire.GetBalancesByAddressesResponseMessage
|
||||
0, // 119: protowire.P2P.MessageStream:input_type -> protowire.KaspadMessage
|
||||
0, // 120: protowire.RPC.MessageStream:input_type -> protowire.KaspadMessage
|
||||
0, // 121: protowire.P2P.MessageStream:output_type -> protowire.KaspadMessage
|
||||
0, // 122: protowire.RPC.MessageStream:output_type -> protowire.KaspadMessage
|
||||
121, // [121:123] is the sub-list for method output_type
|
||||
119, // [119:121] is the sub-list for method input_type
|
||||
119, // [119:119] is the sub-list for extension type_name
|
||||
119, // [119:119] is the sub-list for extension extendee
|
||||
0, // [0:119] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_messages_proto_init() }
|
||||
@@ -2916,6 +2964,8 @@ func file_messages_proto_init() {
|
||||
(*KaspadMessage_VirtualDaaScoreChangedNotification)(nil),
|
||||
(*KaspadMessage_GetBalanceByAddressRequest)(nil),
|
||||
(*KaspadMessage_GetBalanceByAddressResponse)(nil),
|
||||
(*KaspadMessage_GetBalancesByAddressesRequest)(nil),
|
||||
(*KaspadMessage_GetBalancesByAddressesResponse)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
|
||||
@@ -126,6 +126,8 @@ message KaspadMessage {
|
||||
VirtualDaaScoreChangedNotificationMessage virtualDaaScoreChangedNotification = 1076;
|
||||
GetBalanceByAddressRequestMessage getBalanceByAddressRequest = 1077;
|
||||
GetBalanceByAddressResponseMessage getBalanceByAddressResponse = 1078;
|
||||
GetBalancesByAddressesRequestMessage getBalancesByAddressesRequest = 1079;
|
||||
GetBalancesByAddressesResponseMessage getBalancesByAddressesResponse = 1080;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -79,6 +79,9 @@
|
||||
- [GetUtxosByAddressesResponseMessage](#protowire.GetUtxosByAddressesResponseMessage)
|
||||
- [GetBalanceByAddressRequestMessage](#protowire.GetBalanceByAddressRequestMessage)
|
||||
- [GetBalanceByAddressResponseMessage](#protowire.GetBalanceByAddressResponseMessage)
|
||||
- [GetBalancesByAddressesRequestMessage](#protowire.GetBalancesByAddressesRequestMessage)
|
||||
- [BalancesByAddressEntry](#protowire.BalancesByAddressEntry)
|
||||
- [GetBalancesByAddressesResponseMessage](#protowire.GetBalancesByAddressesResponseMessage)
|
||||
- [GetVirtualSelectedParentBlueScoreRequestMessage](#protowire.GetVirtualSelectedParentBlueScoreRequestMessage)
|
||||
- [GetVirtualSelectedParentBlueScoreResponseMessage](#protowire.GetVirtualSelectedParentBlueScoreResponseMessage)
|
||||
- [NotifyVirtualSelectedParentBlueScoreChangedRequestMessage](#protowire.NotifyVirtualSelectedParentBlueScoreChangedRequestMessage)
|
||||
@@ -211,6 +214,9 @@ Receivers of any ResponseMessage are expected to check whether its error field i
|
||||
| isHeaderOnly | [bool](#bool) | | |
|
||||
| blueScore | [uint64](#uint64) | | |
|
||||
| childrenHashes | [string](#string) | repeated | |
|
||||
| mergeSetBluesHashes | [string](#string) | repeated | |
|
||||
| mergeSetRedsHashes | [string](#string) | repeated | |
|
||||
| isChainBlock | [bool](#bool) | | |
|
||||
|
||||
|
||||
|
||||
@@ -410,6 +416,7 @@ See: GetBlockTemplateRequestMessage
|
||||
| Field | Type | Label | Description |
|
||||
| ----- | ---- | ----- | ----------- |
|
||||
| block | [RpcBlock](#protowire.RpcBlock) | | |
|
||||
| allowNonDAABlocks | [bool](#bool) | | |
|
||||
|
||||
|
||||
|
||||
@@ -1328,6 +1335,54 @@ This call is only available when this kaspad was started with `--utxoindex`
|
||||
|
||||
|
||||
|
||||
<a name="protowire.GetBalancesByAddressesRequestMessage"></a>
|
||||
|
||||
### GetBalancesByAddressesRequestMessage
|
||||
|
||||
|
||||
|
||||
| Field | Type | Label | Description |
|
||||
| ----- | ---- | ----- | ----------- |
|
||||
| addresses | [string](#string) | repeated | |
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<a name="protowire.BalancesByAddressEntry"></a>
|
||||
|
||||
### BalancesByAddressEntry
|
||||
|
||||
|
||||
|
||||
| Field | Type | Label | Description |
|
||||
| ----- | ---- | ----- | ----------- |
|
||||
| address | [string](#string) | | |
|
||||
| balance | [uint64](#uint64) | | |
|
||||
| error | [RPCError](#protowire.RPCError) | | |
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<a name="protowire.GetBalancesByAddressesResponseMessage"></a>
|
||||
|
||||
### GetBalancesByAddressesResponseMessage
|
||||
|
||||
|
||||
|
||||
| Field | Type | Label | Description |
|
||||
| ----- | ---- | ----- | ----------- |
|
||||
| entries | [BalancesByAddressEntry](#protowire.BalancesByAddressEntry) | repeated | |
|
||||
| error | [RPCError](#protowire.RPCError) | | |
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<a name="protowire.GetVirtualSelectedParentBlueScoreRequestMessage"></a>
|
||||
|
||||
### GetVirtualSelectedParentBlueScoreRequestMessage
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user