Compare commits

..

17 Commits

Author SHA1 Message Date
msutton
9df231f810 added relay hash to the log print 2022-03-08 09:29:00 +02:00
msutton
09cebe6960 Perform side-chain check earlier to avoid IBD start 2022-03-08 09:18:24 +02:00
msutton
7c327683d3 route capacity workaround (for new syncing nodes) 2022-03-08 09:12:53 +02:00
msutton
c903a65def a temp patch for fixing IBD issues for all side-chains 2022-03-08 03:51:06 +02:00
msutton
685c049a12 yet another checkpoint 2022-03-07 15:51:59 +02:00
msutton
9b45e803d0 Merge branch 'dev' into patch 2022-03-07 14:54:57 +02:00
msutton
cb5e9b55b7 Update checkpoint to yet another side-chain 2022-03-07 14:54:05 +02:00
Ori Newman
190e725dd0 Optimize expected header pruning point (#1962)
* Use the correct heuristic to avoid checking for next pruning point movement when not needed
2022-03-07 00:16:29 +02:00
msutton
20f16cf729 Update checkpoint to new side-chain 2022-03-06 01:03:44 +02:00
Ori Newman
4d3f504b73 Check checkpoint only if highestSharedBlockFound 2022-03-02 21:17:46 +02:00
Ori Newman
b5eda33488 remove count 2022-03-02 13:11:32 +02:00
Ori Newman
ef1a3c0dce remove debug log 2022-03-02 13:09:34 +02:00
Ori Newman
1cedc720ac patch 2022-03-02 12:31:14 +02:00
Ori Newman
6449b03034 Ignore transaction invs on IBD (#1960)
* Ignore transaction invs on IBD

* Add IsIBDRunning mock to TestHandleRelayedTransactionsNotFound

Co-authored-by: Ori Newman <>
2022-02-26 22:20:08 +02:00
Ori Newman
9f02a24e8b Add merge set and IsChainBlock to the RPC (#1961)
* Add merge set and IsChainBlock to the RPC

* Fix BlockInfo.Clone()
2022-02-25 16:22:00 +02:00
Isaac Cook
9b23bbcdb5 kaspactl: string slice deser for GetUtxosByAddresses (#1955)
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-02-24 00:40:01 +02:00
stasatdaglabs
b30f7309a2 Implement a parse sub command in the walllet (#1953)
* Add boilerplate for the `parse` sub command.

* Deserialize the given transaction hax.

* Implement the rest of the wallet parse command.

* Hide transaction inputs behind a `verbose` flag.

* Indicate that we aren't able to extract an address out of a nonstandard transaction.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-02-20 22:12:23 +02:00
22 changed files with 1048 additions and 723 deletions

View File

@@ -92,11 +92,14 @@ type RPCBlockLevelParents struct {
// RPCBlockVerboseData holds verbose data about a block
type RPCBlockVerboseData struct {
Hash string
Difficulty float64
SelectedParentHash string
TransactionIDs []string
IsHeaderOnly bool
BlueScore uint64
ChildrenHashes []string
Hash string
Difficulty float64
SelectedParentHash string
TransactionIDs []string
IsHeaderOnly bool
BlueScore uint64
ChildrenHashes []string
MergeSetBluesHashes []string
MergeSetRedsHashes []string
IsChainBlock bool
}

View File

@@ -13,7 +13,9 @@ import (
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
"math/big"
"time"
)
@@ -64,6 +66,29 @@ func (flow *handleIBDFlow) start() error {
}
func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error {
highHash := consensushashing.BlockHash(block)
// Temp code to avoid IBD from lagging nodes publishing their side-chain
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
if err == nil {
virtualSelectedParentHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualSelectedParent)
if err == nil {
if virtualSelectedParentHeader.DAAScore() > block.Header.DAAScore()+2641 {
virtualDifficulty := difficulty.CalcWork(virtualSelectedParentHeader.Bits())
var virtualSub, difficultyMul big.Int
if difficultyMul.Mul(virtualDifficulty, big.NewInt(180)).
Cmp(virtualSub.Sub(virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())) < 0 {
log.Criticalf("Avoiding IBD triggered by relay %s because it is coming from " +
"a deep (%d DAA score depth) side-chain which has much lower blue work (%d, %d)",
highHash,
virtualSelectedParentHeader.DAAScore()-block.Header.DAAScore(),
virtualSelectedParentHeader.BlueWork(), block.Header.BlueWork())
return nil
}
}
}
}
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
if !wasIBDNotRunning {
log.Debugf("IBD is already running")
@@ -76,15 +101,14 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
flow.logIBDFinished(isFinishedSuccessfully)
}()
highHash := consensushashing.BlockHash(block)
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
log.Debugf("Syncing blocks up to %s", highHash)
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
log.Criticalf("IBD started with peer %s and highHash %s", flow.peer, highHash)
log.Criticalf("Syncing blocks up to %s", highHash)
log.Criticalf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
if err != nil {
return err
}
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
log.Criticalf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
if err != nil {
@@ -324,7 +348,7 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
return nil
}
for _, header := range ibdBlocksMessage.BlockHeaders {
err = flow.processHeader(consensus, header)
_, err := flow.processHeader(consensus, header)
if err != nil {
return err
}
@@ -365,7 +389,7 @@ func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeader
}
}
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error {
func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) (bool, error) {
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
block := &externalapi.DomainBlock{
Header: header,
@@ -375,27 +399,26 @@ func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlo
blockHash := consensushashing.BlockHash(block)
blockInfo, err := consensus.GetBlockInfo(blockHash)
if err != nil {
return err
return false, err
}
if blockInfo.Exists {
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
return nil
return false, nil
}
_, err = consensus.ValidateAndInsertBlock(block, false)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
return false, errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
}
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
} else {
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
return false, protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
}
}
return nil
return true, nil
}
func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error {

View File

@@ -22,6 +22,7 @@ type TransactionsRelayContext interface {
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
OnTransactionAddedToMempool()
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
IsIBDRunning() bool
}
type handleRelayedTransactionsFlow struct {
@@ -49,6 +50,10 @@ func (flow *handleRelayedTransactionsFlow) start() error {
return err
}
if flow.IsIBDRunning() {
continue
}
requestedIDs, err := flow.requestInvTransactions(inv)
if err != nil {
return err

View File

@@ -47,6 +47,10 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
}
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
return false
}
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
// have the requested transactions in the mempool.
func TestHandleRelayedTransactionsNotFound(t *testing.T) {

View File

@@ -56,21 +56,29 @@ func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, dom
"invalid block")
}
_, selectedParentHash, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
_, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
if err != nil {
return err
}
isChainBlock, err := ctx.Domain.Consensus().IsChainBlock(blockHash)
if err != nil {
return err
}
block.VerboseData = &appmessage.RPCBlockVerboseData{
Hash: blockHash.String(),
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
ChildrenHashes: hashes.ToStrings(childrenHashes),
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
BlueScore: blockInfo.BlueScore,
Hash: blockHash.String(),
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
ChildrenHashes: hashes.ToStrings(childrenHashes),
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
BlueScore: blockInfo.BlueScore,
MergeSetBluesHashes: hashes.ToStrings(blockInfo.MergeSetBlues),
MergeSetRedsHashes: hashes.ToStrings(blockInfo.MergeSetReds),
IsChainBlock: isChainBlock,
}
// selectedParentHash will be nil in the genesis block
if selectedParentHash != nil {
block.VerboseData.SelectedParentHash = selectedParentHash.String()
if blockInfo.SelectedParent != nil {
block.VerboseData.SelectedParentHash = blockInfo.SelectedParent.String()
}
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {

View File

@@ -3,6 +3,7 @@ package main
import (
"reflect"
"strconv"
"strings"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
@@ -149,12 +150,24 @@ func stringToValue(parameterDesc *parameterDescription, valueStr string) (reflec
value = pointer.Interface()
case reflect.Slice:
sliceType := parameterDesc.typeof.Elem()
if sliceType.Kind() != reflect.String {
return reflect.Value{},
errors.Errorf("Unsupported slice type '%s' for parameter '%s'",
sliceType,
parameterDesc.name)
}
if valueStr == "" {
value = []string{}
} else {
value = strings.Split(valueStr, ",")
}
// Int and uint are not supported because their size is platform-dependant
case reflect.Int,
reflect.Uint,
// Other types are not supported simply because they are not used in any command right now
// but support can be added if and when needed
reflect.Slice,
reflect.Func,
reflect.Interface,
reflect.Map,

View File

@@ -15,6 +15,7 @@ const (
createUnsignedTransactionSubCmd = "create-unsigned-transaction"
signSubCmd = "sign"
broadcastSubCmd = "broadcast"
parseSubCmd = "parse"
showAddressesSubCmd = "show-addresses"
newAddressSubCmd = "new-address"
dumpUnencryptedDataSubCmd = "dump-unencrypted-data"
@@ -79,6 +80,13 @@ type broadcastConfig struct {
config.NetworkFlags
}
type parseConfig struct {
Transaction string `long:"transaction" short:"t" description:"The transaction to parse (encoded in hex)"`
TransactionFile string `long:"transaction-file" short:"F" description:"The file containing the transaction to parse (encoded in hex)"`
Verbose bool `long:"verbose" short:"v" description:"Verbose: show transaction inputs"`
config.NetworkFlags
}
type showAddressesConfig struct {
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
config.NetworkFlags
@@ -133,6 +141,10 @@ func parseCommandLine() (subCommand string, config interface{}) {
parser.AddCommand(broadcastSubCmd, "Broadcast the given transaction",
"Broadcast the given transaction", broadcastConf)
parseConf := &parseConfig{}
parser.AddCommand(parseSubCmd, "Parse the given transaction and print its contents",
"Parse the given transaction and print its contents", parseConf)
showAddressesConf := &showAddressesConfig{DaemonAddress: defaultListen}
parser.AddCommand(showAddressesSubCmd, "Shows all generated public addresses of the current wallet",
"Shows all generated public addresses of the current wallet", showAddressesConf)
@@ -207,6 +219,13 @@ func parseCommandLine() (subCommand string, config interface{}) {
printErrorAndExit(err)
}
config = broadcastConf
case parseSubCmd:
combineNetworkFlags(&parseConf.NetworkFlags, &cfg.NetworkFlags)
err := parseConf.ResolveNetwork(parser)
if err != nil {
printErrorAndExit(err)
}
config = parseConf
case showAddressesSubCmd:
combineNetworkFlags(&showAddressesConf.NetworkFlags, &cfg.NetworkFlags)
err := showAddressesConf.ResolveNetwork(parser)

View File

@@ -19,6 +19,8 @@ func main() {
err = sign(config.(*signConfig))
case broadcastSubCmd:
err = broadcast(config.(*broadcastConfig))
case parseSubCmd:
err = parse(config.(*parseConfig))
case showAddressesSubCmd:
err = showAddresses(config.(*showAddressesConfig))
case newAddressSubCmd:

83
cmd/kaspawallet/parse.go Normal file
View File

@@ -0,0 +1,83 @@
package main
import (
"encoding/hex"
"fmt"
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/serialization"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/pkg/errors"
"io/ioutil"
"strings"
)
func parse(conf *parseConfig) error {
if conf.Transaction == "" && conf.TransactionFile == "" {
return errors.Errorf("Either --transaction or --transaction-file is required")
}
if conf.Transaction != "" && conf.TransactionFile != "" {
return errors.Errorf("Both --transaction and --transaction-file cannot be passed at the same time")
}
transactionHex := conf.Transaction
if conf.TransactionFile != "" {
transactionHexBytes, err := ioutil.ReadFile(conf.TransactionFile)
if err != nil {
return errors.Wrapf(err, "Could not read hex from %s", conf.TransactionFile)
}
transactionHex = strings.TrimSpace(string(transactionHexBytes))
}
transaction, err := hex.DecodeString(transactionHex)
if err != nil {
return err
}
partiallySignedTransaction, err := serialization.DeserializePartiallySignedTransaction(transaction)
if err != nil {
return err
}
fmt.Printf("Transaction ID: \t%s\n", consensushashing.TransactionID(partiallySignedTransaction.Tx))
fmt.Println()
allInputSompi := uint64(0)
for index, input := range partiallySignedTransaction.Tx.Inputs {
partiallySignedInput := partiallySignedTransaction.PartiallySignedInputs[index]
if conf.Verbose {
fmt.Printf("Input %d: \tOutpoint: %s:%d \tAmount: %.2f Kaspa\n", index, input.PreviousOutpoint.TransactionID,
input.PreviousOutpoint.Index, float64(partiallySignedInput.PrevOutput.Value)/float64(constants.SompiPerKaspa))
}
allInputSompi += partiallySignedInput.PrevOutput.Value
}
if conf.Verbose {
fmt.Println()
}
allOutputSompi := uint64(0)
for index, output := range partiallySignedTransaction.Tx.Outputs {
scriptPublicKeyType, scriptPublicKeyAddress, err := txscript.ExtractScriptPubKeyAddress(output.ScriptPublicKey, conf.ActiveNetParams)
if err != nil {
return err
}
addressString := scriptPublicKeyAddress.EncodeAddress()
if scriptPublicKeyType == txscript.NonStandardTy {
scriptPublicKeyHex := hex.EncodeToString(output.ScriptPublicKey.Script)
addressString = fmt.Sprintf("<Non-standard transaction script public key: %s>", scriptPublicKeyHex)
}
fmt.Printf("Output %d: \tRecipient: %s \tAmount: %.2f Kaspa\n",
index, addressString, float64(output.Value)/float64(constants.SompiPerKaspa))
allOutputSompi += output.Value
}
fmt.Println()
fmt.Printf("Fee:\t%d Sompi\n", allInputSompi-allOutputSompi)
return nil
}

View File

@@ -286,13 +286,15 @@ func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalap
blockInfo.BlueScore = ghostdagData.BlueScore()
blockInfo.BlueWork = ghostdagData.BlueWork()
blockInfo.SelectedParent = ghostdagData.SelectedParent()
blockInfo.MergeSetBlues = ghostdagData.MergeSetBlues()
blockInfo.MergeSetReds = ghostdagData.MergeSetReds()
return blockInfo, nil
}
func (s *consensus) GetBlockRelations(blockHash *externalapi.DomainHash) (
parents []*externalapi.DomainHash, selectedParent *externalapi.DomainHash,
children []*externalapi.DomainHash, err error) {
parents []*externalapi.DomainHash, children []*externalapi.DomainHash, err error) {
s.lock.Lock()
defer s.lock.Unlock()
@@ -301,15 +303,10 @@ func (s *consensus) GetBlockRelations(blockHash *externalapi.DomainHash) (
blockRelation, err := s.blockRelationStores[0].BlockRelation(s.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, nil, nil, err
return nil, nil, err
}
blockGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, blockHash, false)
if err != nil {
return nil, nil, nil, err
}
return blockRelation.Parents, blockGHOSTDAGData.SelectedParent(), blockRelation.Children, nil
return blockRelation.Parents, blockRelation.Children, nil
}
func (s *consensus) GetBlockAcceptanceData(blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) {
@@ -824,3 +821,16 @@ func (s *consensus) TrustedGHOSTDAGData(blockHash *externalapi.DomainHash) (*ext
return ghostdagData, nil
}
func (s *consensus) IsChainBlock(blockHash *externalapi.DomainHash) (bool, error) {
s.lock.Lock()
defer s.lock.Unlock()
stagingArea := model.NewStagingArea()
virtualGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, model.VirtualBlockHash, false)
if err != nil {
return false, err
}
return s.dagTopologyManagers[0].IsInSelectedParentChainOf(stagingArea, blockHash, virtualGHOSTDAGData.SelectedParent())
}

View File

@@ -4,18 +4,24 @@ import "math/big"
// BlockInfo contains various information about a specific block
type BlockInfo struct {
Exists bool
BlockStatus BlockStatus
BlueScore uint64
BlueWork *big.Int
Exists bool
BlockStatus BlockStatus
BlueScore uint64
BlueWork *big.Int
SelectedParent *DomainHash
MergeSetBlues []*DomainHash
MergeSetReds []*DomainHash
}
// Clone returns a clone of BlockInfo
func (bi *BlockInfo) Clone() *BlockInfo {
return &BlockInfo{
Exists: bi.Exists,
BlockStatus: bi.BlockStatus.Clone(),
BlueScore: bi.BlueScore,
BlueWork: new(big.Int).Set(bi.BlueWork),
Exists: bi.Exists,
BlockStatus: bi.BlockStatus.Clone(),
BlueScore: bi.BlueScore,
BlueWork: new(big.Int).Set(bi.BlueWork),
SelectedParent: bi.SelectedParent,
MergeSetBlues: CloneHashes(bi.MergeSetBlues),
MergeSetReds: CloneHashes(bi.MergeSetReds),
}
}

View File

@@ -14,31 +14,83 @@ func initTestBlockInfoStructsForClone() []*BlockInfo {
BlockStatus(0x01),
0,
big.NewInt(0),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
true,
BlockStatus(0x02),
0,
big.NewInt(0),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
true,
1,
1,
big.NewInt(0),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
true,
255,
2,
big.NewInt(0),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
true,
0,
3,
big.NewInt(0),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
true,
BlockStatus(0x01),
0,
big.NewInt(1),
nil,
[]*DomainHash{},
[]*DomainHash{},
}, {
false,
BlockStatus(0x01),
0,
big.NewInt(1),
NewDomainHashFromByteArray(&[DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}),
[]*DomainHash{
NewDomainHashFromByteArray(&[DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}),
NewDomainHashFromByteArray(&[DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}),
},
[]*DomainHash{
NewDomainHashFromByteArray(&[DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04}),
NewDomainHashFromByteArray(&[DomainHashSize]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}),
},
},
}
return tests

View File

@@ -16,7 +16,7 @@ type Consensus interface {
GetBlockEvenIfHeaderOnly(blockHash *DomainHash) (*DomainBlock, error)
GetBlockHeader(blockHash *DomainHash) (BlockHeader, error)
GetBlockInfo(blockHash *DomainHash) (*BlockInfo, error)
GetBlockRelations(blockHash *DomainHash) (parents []*DomainHash, selectedParent *DomainHash, children []*DomainHash, err error)
GetBlockRelations(blockHash *DomainHash) (parents []*DomainHash, children []*DomainHash, err error)
GetBlockAcceptanceData(blockHash *DomainHash) (AcceptanceData, error)
GetHashesBetween(lowHash, highHash *DomainHash, maxBlocks uint64) (hashes []*DomainHash, actualHighHash *DomainHash, err error)
@@ -50,4 +50,5 @@ type Consensus interface {
TrustedDataDataDAAHeader(trustedBlockHash, daaBlockHash *DomainHash, daaBlockWindowIndex uint64) (*TrustedDataDataDAAHeader, error)
TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash *DomainHash) ([]*DomainHash, error)
TrustedGHOSTDAGData(blockHash *DomainHash) (*BlockGHOSTDAGData, error)
IsChainBlock(blockHash *DomainHash) (bool, error)
}

View File

@@ -995,7 +995,13 @@ func (pm *pruningManager) ExpectedHeaderPruningPoint(stagingArea *model.StagingA
return nil, err
}
if hasPruningPointInItsSelectedChain && pm.finalityScore(ghostdagData.BlueScore()) > pm.finalityScore(selectedParentPruningPointHeader.BlueScore()+pm.pruningDepth) {
// Note: the pruning point from the POV of the current block is the first block in its chain that is in depth of pm.pruningDepth and
// its finality score is greater than the previous pruning point. This is why the diff between finalityScore(selectedParent.blueScore + 1) * finalityInterval
// and the current block blue score is less than pm.pruningDepth we can know for sure that this block didn't trigger a pruning point change.
minRequiredBlueScoreForNextPruningPoint := (pm.finalityScore(selectedParentPruningPointHeader.BlueScore()) + 1) * pm.finalityInterval
if hasPruningPointInItsSelectedChain &&
minRequiredBlueScoreForNextPruningPoint+pm.pruningDepth <= ghostdagData.BlueScore() {
var suggestedLowHash *externalapi.DomainHash
hasReachabilityData, err := pm.reachabilityDataStore.HasReachabilityData(pm.databaseContext, stagingArea, selectedParentHeader.PruningPoint())
if err != nil {

View File

@@ -12,7 +12,7 @@ import (
const (
// DefaultMaxMessages is the default capacity for a route with a capacity defined
DefaultMaxMessages = 100
DefaultMaxMessages = 1000
)
var (

View File

@@ -1,12 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc-gen-go v1.25.0
// protoc v3.12.3
// source: messages.proto
package protowire
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@@ -20,6 +21,10 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type KaspadMessage struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache

View File

@@ -11,8 +11,7 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
const _ = grpc.SupportPackageIsVersion6
// P2PClient is the client API for P2P service.
//
@@ -30,7 +29,7 @@ func NewP2PClient(cc grpc.ClientConnInterface) P2PClient {
}
func (c *p2PClient) MessageStream(ctx context.Context, opts ...grpc.CallOption) (P2P_MessageStreamClient, error) {
stream, err := c.cc.NewStream(ctx, &P2P_ServiceDesc.Streams[0], "/protowire.P2P/MessageStream", opts...)
stream, err := c.cc.NewStream(ctx, &_P2P_serviceDesc.Streams[0], "/protowire.P2P/MessageStream", opts...)
if err != nil {
return nil, err
}
@@ -72,20 +71,13 @@ type P2PServer interface {
type UnimplementedP2PServer struct {
}
func (UnimplementedP2PServer) MessageStream(P2P_MessageStreamServer) error {
func (*UnimplementedP2PServer) MessageStream(P2P_MessageStreamServer) error {
return status.Errorf(codes.Unimplemented, "method MessageStream not implemented")
}
func (UnimplementedP2PServer) mustEmbedUnimplementedP2PServer() {}
func (*UnimplementedP2PServer) mustEmbedUnimplementedP2PServer() {}
// UnsafeP2PServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to P2PServer will
// result in compilation errors.
type UnsafeP2PServer interface {
mustEmbedUnimplementedP2PServer()
}
func RegisterP2PServer(s grpc.ServiceRegistrar, srv P2PServer) {
s.RegisterService(&P2P_ServiceDesc, srv)
func RegisterP2PServer(s *grpc.Server, srv P2PServer) {
s.RegisterService(&_P2P_serviceDesc, srv)
}
func _P2P_MessageStream_Handler(srv interface{}, stream grpc.ServerStream) error {
@@ -114,10 +106,7 @@ func (x *p2PMessageStreamServer) Recv() (*KaspadMessage, error) {
return m, nil
}
// P2P_ServiceDesc is the grpc.ServiceDesc for P2P service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var P2P_ServiceDesc = grpc.ServiceDesc{
var _P2P_serviceDesc = grpc.ServiceDesc{
ServiceName: "protowire.P2P",
HandlerType: (*P2PServer)(nil),
Methods: []grpc.MethodDesc{},
@@ -148,7 +137,7 @@ func NewRPCClient(cc grpc.ClientConnInterface) RPCClient {
}
func (c *rPCClient) MessageStream(ctx context.Context, opts ...grpc.CallOption) (RPC_MessageStreamClient, error) {
stream, err := c.cc.NewStream(ctx, &RPC_ServiceDesc.Streams[0], "/protowire.RPC/MessageStream", opts...)
stream, err := c.cc.NewStream(ctx, &_RPC_serviceDesc.Streams[0], "/protowire.RPC/MessageStream", opts...)
if err != nil {
return nil, err
}
@@ -190,20 +179,13 @@ type RPCServer interface {
type UnimplementedRPCServer struct {
}
func (UnimplementedRPCServer) MessageStream(RPC_MessageStreamServer) error {
func (*UnimplementedRPCServer) MessageStream(RPC_MessageStreamServer) error {
return status.Errorf(codes.Unimplemented, "method MessageStream not implemented")
}
func (UnimplementedRPCServer) mustEmbedUnimplementedRPCServer() {}
func (*UnimplementedRPCServer) mustEmbedUnimplementedRPCServer() {}
// UnsafeRPCServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to RPCServer will
// result in compilation errors.
type UnsafeRPCServer interface {
mustEmbedUnimplementedRPCServer()
}
func RegisterRPCServer(s grpc.ServiceRegistrar, srv RPCServer) {
s.RegisterService(&RPC_ServiceDesc, srv)
func RegisterRPCServer(s *grpc.Server, srv RPCServer) {
s.RegisterService(&_RPC_serviceDesc, srv)
}
func _RPC_MessageStream_Handler(srv interface{}, stream grpc.ServerStream) error {
@@ -232,10 +214,7 @@ func (x *rPCMessageStreamServer) Recv() (*KaspadMessage, error) {
return m, nil
}
// RPC_ServiceDesc is the grpc.ServiceDesc for RPC service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var RPC_ServiceDesc = grpc.ServiceDesc{
var _RPC_serviceDesc = grpc.ServiceDesc{
ServiceName: "protowire.RPC",
HandlerType: (*RPCServer)(nil),
Methods: []grpc.MethodDesc{},

View File

@@ -1,12 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc-gen-go v1.25.0
// protoc v3.12.3
// source: p2p.proto
package protowire
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@@ -20,6 +21,10 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type RequestAddressesMessage struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache

View File

@@ -79,6 +79,9 @@
- [GetUtxosByAddressesResponseMessage](#protowire.GetUtxosByAddressesResponseMessage)
- [GetBalanceByAddressRequestMessage](#protowire.GetBalanceByAddressRequestMessage)
- [GetBalanceByAddressResponseMessage](#protowire.GetBalanceByAddressResponseMessage)
- [GetBalancesByAddressesRequestMessage](#protowire.GetBalancesByAddressesRequestMessage)
- [BalancesByAddressEntry](#protowire.BalancesByAddressEntry)
- [GetBalancesByAddressesResponseMessage](#protowire.GetBalancesByAddressesResponseMessage)
- [GetVirtualSelectedParentBlueScoreRequestMessage](#protowire.GetVirtualSelectedParentBlueScoreRequestMessage)
- [GetVirtualSelectedParentBlueScoreResponseMessage](#protowire.GetVirtualSelectedParentBlueScoreResponseMessage)
- [NotifyVirtualSelectedParentBlueScoreChangedRequestMessage](#protowire.NotifyVirtualSelectedParentBlueScoreChangedRequestMessage)
@@ -211,6 +214,9 @@ Receivers of any ResponseMessage are expected to check whether its error field i
| isHeaderOnly | [bool](#bool) | | |
| blueScore | [uint64](#uint64) | | |
| childrenHashes | [string](#string) | repeated | |
| mergeSetBluesHashes | [string](#string) | repeated | |
| mergeSetRedsHashes | [string](#string) | repeated | |
| isChainBlock | [bool](#bool) | | |
@@ -410,6 +416,7 @@ See: GetBlockTemplateRequestMessage
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| block | [RpcBlock](#protowire.RpcBlock) | | |
| allowNonDAABlocks | [bool](#bool) | | |
@@ -1328,6 +1335,54 @@ This call is only available when this kaspad was started with `--utxoindex`
<a name="protowire.GetBalancesByAddressesRequestMessage"></a>
### GetBalancesByAddressesRequestMessage
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| addresses | [string](#string) | repeated | |
<a name="protowire.BalancesByAddressEntry"></a>
### BalancesByAddressEntry
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| address | [string](#string) | | |
| balance | [uint64](#uint64) | | |
| error | [RPCError](#protowire.RPCError) | | |
<a name="protowire.GetBalancesByAddressesResponseMessage"></a>
### GetBalancesByAddressesResponseMessage
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| entries | [BalancesByAddressEntry](#protowire.BalancesByAddressEntry) | repeated | |
| error | [RPCError](#protowire.RPCError) | | |
<a name="protowire.GetVirtualSelectedParentBlueScoreRequestMessage"></a>
### GetVirtualSelectedParentBlueScoreRequestMessage

View File

@@ -52,6 +52,9 @@ message RpcBlockVerboseData{
bool isHeaderOnly = 15;
uint64 blueScore = 16;
repeated string childrenHashes = 17;
repeated string mergeSetBluesHashes = 18;
repeated string mergeSetRedsHashes = 19;
bool isChainBlock = 20;
}
message RpcTransaction {

View File

@@ -193,24 +193,30 @@ func (x *RpcBlockVerboseData) toAppMessage() (*appmessage.RPCBlockVerboseData, e
return nil, errors.Wrapf(errorNil, "RpcBlockVerboseData is nil")
}
return &appmessage.RPCBlockVerboseData{
Hash: x.Hash,
Difficulty: x.Difficulty,
SelectedParentHash: x.SelectedParentHash,
TransactionIDs: x.TransactionIds,
IsHeaderOnly: x.IsHeaderOnly,
BlueScore: x.BlueScore,
ChildrenHashes: x.ChildrenHashes,
Hash: x.Hash,
Difficulty: x.Difficulty,
SelectedParentHash: x.SelectedParentHash,
TransactionIDs: x.TransactionIds,
IsHeaderOnly: x.IsHeaderOnly,
BlueScore: x.BlueScore,
ChildrenHashes: x.ChildrenHashes,
MergeSetBluesHashes: x.MergeSetBluesHashes,
MergeSetRedsHashes: x.MergeSetRedsHashes,
IsChainBlock: x.IsChainBlock,
}, nil
}
func (x *RpcBlockVerboseData) fromAppMessage(message *appmessage.RPCBlockVerboseData) {
*x = RpcBlockVerboseData{
Hash: message.Hash,
Difficulty: message.Difficulty,
SelectedParentHash: message.SelectedParentHash,
TransactionIds: message.TransactionIDs,
IsHeaderOnly: message.IsHeaderOnly,
BlueScore: message.BlueScore,
ChildrenHashes: message.ChildrenHashes,
Hash: message.Hash,
Difficulty: message.Difficulty,
SelectedParentHash: message.SelectedParentHash,
TransactionIds: message.TransactionIDs,
IsHeaderOnly: message.IsHeaderOnly,
BlueScore: message.BlueScore,
ChildrenHashes: message.ChildrenHashes,
MergeSetBluesHashes: message.MergeSetBluesHashes,
MergeSetRedsHashes: message.MergeSetRedsHashes,
IsChainBlock: message.IsChainBlock,
}
}