Merge branch 'dev' into add-tx-index

This commit is contained in:
D-Stacks 2022-06-30 00:40:40 +02:00 committed by GitHub
commit edb6962e40
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 113 additions and 17 deletions

View File

@ -5,7 +5,6 @@ import (
"github.com/kaspanet/kaspad/app/protocol/peer" "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
) )
@ -34,7 +33,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
if err != nil { if err != nil {
return err return err
} }
if !blockInfo.Exists { if !blockInfo.HasHeader() {
return protocolerrors.Errorf(true, "received IBDBlockLocator "+ return protocolerrors.Errorf(true, "received IBDBlockLocator "+
"with an unknown targetHash %s", targetHash) "with an unknown targetHash %s", targetHash)
} }
@ -47,7 +46,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
} }
// The IBD block locator is checking only existing blocks with bodies. // The IBD block locator is checking only existing blocks with bodies.
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly { if !blockInfo.HasBody() {
continue continue
} }

View File

@ -4,7 +4,6 @@ import (
"github.com/kaspanet/kaspad/app/appmessage" "github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -32,7 +31,7 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
if err != nil { if err != nil {
return err return err
} }
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly { if !blockInfo.HasBody() {
return protocolerrors.Errorf(true, "block %s not found (v5)", hash) return protocolerrors.Errorf(true, "block %s not found (v5)", hash)
} }
block, err := context.Domain().Consensus().GetBlock(hash) block, err := context.Domain().Consensus().GetBlock(hash)

View File

@ -5,7 +5,6 @@ import (
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer" peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors" "github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain" "github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router" "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -33,7 +32,7 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
if err != nil { if err != nil {
return err return err
} }
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly { if !blockInfo.HasBody() {
return protocolerrors.Errorf(true, "block %s not found", hash) return protocolerrors.Errorf(true, "block %s not found", hash)
} }
block, err := context.Domain().Consensus().GetBlock(hash) block, err := context.Domain().Consensus().GetBlock(hash)

View File

@ -47,9 +47,9 @@ func (flow *handleRequestAnticoneFlow) start() error {
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip // GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since // intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
// we relay blocks only if they enter virtual's mergeset. We add 2 for a small margin error. // we relay blocks only if they enter virtual's mergeset. We add a 2 factor for possible sync gaps.
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash, blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
flow.Config().ActiveNetParams.MergeSetSizeLimit+2) flow.Config().ActiveNetParams.MergeSetSizeLimit*2)
if err != nil { if err != nil {
return protocolerrors.Wrap(true, err, "Failed querying anticone") return protocolerrors.Wrap(true, err, "Failed querying anticone")
} }

View File

@ -46,7 +46,25 @@ func (flow *handleRequestHeadersFlow) start() error {
} }
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash) log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
isLowSelectedAncestorOfHigh, err := flow.Domain().Consensus().IsInSelectedParentChainOf(lowHash, highHash) consensus := flow.Domain().Consensus()
lowHashInfo, err := consensus.GetBlockInfo(lowHash)
if err != nil {
return err
}
if !lowHashInfo.HasHeader() {
return protocolerrors.Errorf(true, "Block %s does not exist", lowHash)
}
highHashInfo, err := consensus.GetBlockInfo(highHash)
if err != nil {
return err
}
if !highHashInfo.HasHeader() {
return protocolerrors.Errorf(true, "Block %s does not exist", highHash)
}
isLowSelectedAncestorOfHigh, err := consensus.IsInSelectedParentChainOf(lowHash, highHash)
if err != nil { if err != nil {
return err return err
} }
@ -62,7 +80,7 @@ func (flow *handleRequestHeadersFlow) start() error {
// in order to avoid locking the consensus for too long // in order to avoid locking the consensus for too long
// maxBlocks MUST be >= MergeSetSizeLimit + 1 // maxBlocks MUST be >= MergeSetSizeLimit + 1
const maxBlocks = 1 << 10 const maxBlocks = 1 << 10
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks) blockHashes, _, err := consensus.GetHashesBetween(lowHash, highHash, maxBlocks)
if err != nil { if err != nil {
return err return err
} }
@ -70,7 +88,7 @@ func (flow *handleRequestHeadersFlow) start() error {
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes)) blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
for i, blockHash := range blockHashes { for i, blockHash := range blockHashes {
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash) blockHeader, err := consensus.GetBlockHeader(blockHash)
if err != nil { if err != nil {
return err return err
} }

View File

@ -9,6 +9,14 @@ import (
// HandleAddPeer handles the respectively named RPC command // HandleAddPeer handles the respectively named RPC command
func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("AddPeer RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewAddPeerResponseMessage()
response.Error =
appmessage.RPCErrorf("AddPeer RPC command called while node in safe RPC mode")
return response, nil
}
AddPeerRequest := request.(*appmessage.AddPeerRequestMessage) AddPeerRequest := request.(*appmessage.AddPeerRequestMessage)
address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort) address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort)
if err != nil { if err != nil {

View File

@ -9,6 +9,14 @@ import (
// HandleBan handles the respectively named RPC command // HandleBan handles the respectively named RPC command
func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("Ban RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewBanResponseMessage()
response.Error =
appmessage.RPCErrorf("Ban RPC command called while node in safe RPC mode")
return response, nil
}
banRequest := request.(*appmessage.BanRequestMessage) banRequest := request.(*appmessage.BanRequestMessage)
ip := net.ParseIP(banRequest.IP) ip := net.ParseIP(banRequest.IP)
if ip == nil { if ip == nil {

View File

@ -27,6 +27,27 @@ func HandleEstimateNetworkHashesPerSecond(
} }
} }
if context.Config.SafeRPC {
const windowSizeLimit = 10000
if windowSize > windowSizeLimit {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
response.Error =
appmessage.RPCErrorf(
"Requested window size %d is larger than max allowed in RPC safe mode (%d)",
windowSize, windowSizeLimit)
return response, nil
}
}
if uint64(windowSize) > context.Config.ActiveNetParams.PruningDepth() {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
response.Error =
appmessage.RPCErrorf(
"Requested window size %d is larger than pruning point depth %d",
windowSize, context.Config.ActiveNetParams.PruningDepth())
return response, nil
}
networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize) networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize)
if err != nil { if err != nil {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{} response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}

View File

@ -43,7 +43,7 @@ func HandleGetMempoolEntriesByAddresses(context *rpccontext.Context, _ *router.R
if getMempoolEntriesByAddressesRequest.IncludeOrphanPool { if getMempoolEntriesByAddressesRequest.IncludeOrphanPool {
orphanPoolTransactions := context.Domain.MiningManager().AllOrphanTransactions() orphanPoolTransactions := context.Domain.MiningManager().AllOrphanTransactions()
orphanPoolEntriesByAddresse, err := extractMempoolEntriesByAddressesFromTransactions( orphanPoolEntriesByAddress, err := extractMempoolEntriesByAddressesFromTransactions(
context, context,
getMempoolEntriesByAddressesRequest.Addresses, getMempoolEntriesByAddressesRequest.Addresses,
orphanPoolTransactions, orphanPoolTransactions,
@ -59,7 +59,7 @@ func HandleGetMempoolEntriesByAddresses(context *rpccontext.Context, _ *router.R
return errorMessage, nil return errorMessage, nil
} }
mempoolEntriesByAddresses = append(mempoolEntriesByAddresses, orphanPoolEntriesByAddresse...) mempoolEntriesByAddresses = append(mempoolEntriesByAddresses, orphanPoolEntriesByAddress...)
} }
return appmessage.NewGetMempoolEntriesByAddressesResponseMessage(mempoolEntriesByAddresses), nil return appmessage.NewGetMempoolEntriesByAddressesResponseMessage(mempoolEntriesByAddresses), nil
@ -80,9 +80,13 @@ func extractMempoolEntriesByAddressesFromTransactions(context *rpccontext.Contex
for _, transaction := range transactions { for _, transaction := range transactions {
for i, input := range transaction.Inputs { for i, input := range transaction.Inputs {
// TODO: Fix this
if input.UTXOEntry == nil { if input.UTXOEntry == nil {
log.Errorf("Couldn't find UTXO entry for input %d in mempool transaction %s. This is a bug and should be fixed.", i, consensushashing.TransactionID(transaction)) if !areOrphans { // Orphans can legitimately have `input.UTXOEntry == nil`
// TODO: Fix the underlying cause of the bug for non-orphan entries
log.Debugf(
"Couldn't find UTXO entry for input %d in mempool transaction %s. This is a bug and should be fixed.",
i, consensushashing.TransactionID(transaction))
}
continue continue
} }

View File

@ -8,6 +8,14 @@ import (
// HandleResolveFinalityConflict handles the respectively named RPC command // HandleResolveFinalityConflict handles the respectively named RPC command
func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("ResolveFinalityConflict RPC command called while node in safe RPC mode -- ignoring.")
response := &appmessage.ResolveFinalityConflictResponseMessage{}
response.Error =
appmessage.RPCErrorf("ResolveFinalityConflict RPC command called while node in safe RPC mode")
return response, nil
}
response := &appmessage.ResolveFinalityConflictResponseMessage{} response := &appmessage.ResolveFinalityConflictResponseMessage{}
response.Error = appmessage.RPCErrorf("not implemented") response.Error = appmessage.RPCErrorf("not implemented")
return response, nil return response, nil

View File

@ -12,6 +12,14 @@ const pauseBeforeShutDown = time.Second
// HandleShutDown handles the respectively named RPC command // HandleShutDown handles the respectively named RPC command
func HandleShutDown(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { func HandleShutDown(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("ShutDown RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewShutDownResponseMessage()
response.Error =
appmessage.RPCErrorf("ShutDown RPC command called while node in safe RPC mode")
return response, nil
}
log.Warn("ShutDown RPC called.") log.Warn("ShutDown RPC called.")
// Wait a second before shutting down, to allow time to return the response to the caller // Wait a second before shutting down, to allow time to return the response to the caller

View File

@ -9,6 +9,14 @@ import (
// HandleUnban handles the respectively named RPC command // HandleUnban handles the respectively named RPC command
func HandleUnban(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { func HandleUnban(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("Unban RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewUnbanResponseMessage()
response.Error =
appmessage.RPCErrorf("Unban RPC command called while node in safe RPC mode")
return response, nil
}
unbanRequest := request.(*appmessage.UnbanRequestMessage) unbanRequest := request.(*appmessage.UnbanRequestMessage)
ip := net.ParseIP(unbanRequest.IP) ip := net.ParseIP(unbanRequest.IP)
if ip == nil { if ip == nil {

View File

@ -1,3 +1,8 @@
Kaspad v0.12.3 - 2022-06-29
===========================
* Fixes a few bugs which can lead to node crashes or out-of-memory errors
Kaspad v0.12.2 - 2022-06-17 Kaspad v0.12.2 - 2022-06-17
=========================== ===========================

View File

@ -13,6 +13,16 @@ type BlockInfo struct {
MergeSetReds []*DomainHash MergeSetReds []*DomainHash
} }
// HasHeader returns whether the block exists and has a valid header
func (bi *BlockInfo) HasHeader() bool {
return bi.Exists && bi.BlockStatus != StatusInvalid
}
// HasBody returns whether the block exists and has a valid body
func (bi *BlockInfo) HasBody() bool {
return bi.Exists && bi.BlockStatus != StatusInvalid && bi.BlockStatus != StatusHeaderOnly
}
// Clone returns a clone of BlockInfo // Clone returns a clone of BlockInfo
func (bi *BlockInfo) Clone() *BlockInfo { func (bi *BlockInfo) Clone() *BlockInfo {
return &BlockInfo{ return &BlockInfo{

View File

@ -98,6 +98,7 @@ type Flags struct {
RPCMaxWebsockets int `long:"rpcmaxwebsockets" description:"Max number of RPC websocket connections"` RPCMaxWebsockets int `long:"rpcmaxwebsockets" description:"Max number of RPC websocket connections"`
RPCMaxConcurrentReqs int `long:"rpcmaxconcurrentreqs" description:"Max number of concurrent RPC requests that may be processed concurrently"` RPCMaxConcurrentReqs int `long:"rpcmaxconcurrentreqs" description:"Max number of concurrent RPC requests that may be processed concurrently"`
DisableRPC bool `long:"norpc" description:"Disable built-in RPC server"` DisableRPC bool `long:"norpc" description:"Disable built-in RPC server"`
SafeRPC bool `long:"saferpc" description:"Disable RPC commands which affect the state of the node"`
DisableDNSSeed bool `long:"nodnsseed" description:"Disable DNS seeding for peers"` DisableDNSSeed bool `long:"nodnsseed" description:"Disable DNS seeding for peers"`
DNSSeed string `long:"dnsseed" description:"Override DNS seeds with specified hostname (Only 1 hostname allowed)"` DNSSeed string `long:"dnsseed" description:"Override DNS seeds with specified hostname (Only 1 hostname allowed)"`
GRPCSeed string `long:"grpcseed" description:"Hostname of gRPC server for seeding peers"` GRPCSeed string `long:"grpcseed" description:"Hostname of gRPC server for seeding peers"`

View File

@ -11,7 +11,7 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs
const ( const (
appMajor uint = 0 appMajor uint = 0
appMinor uint = 12 appMinor uint = 12
appPatch uint = 2 appPatch uint = 3
) )
// appBuild is defined as a variable so it can be overridden during the build // appBuild is defined as a variable so it can be overridden during the build