Compare commits

...

10 Commits

Author SHA1 Message Date
Ori Newman
52be8b622a Add cache to BuildPruningPointProof 2021-11-20 14:11:12 +02:00
Ori Newman
72e7d528a9 Increase race detector timeout 2021-11-20 13:40:19 +02:00
Ori Newman
e2bd2778ff Fix a bug in BuildPruningPointProof 2021-11-20 13:39:22 +02:00
Ori Newman
126e14ac29 Get rid of ParentsAtLevel header method 2021-11-20 12:25:11 +02:00
Ori Newman
1121054d11 Fix NumThreads bug in the wallet 2021-11-20 11:36:10 +02:00
Ori Newman
08121207dc Add hardForkOmitGenesisFromParentsDAAScore logic 2021-11-20 01:18:40 +02:00
Ori Newman
a3acb23215 Fix IBD indication on submit block 2021-11-19 18:03:31 +02:00
Ori Newman
35e76020d2 Cache block level in the header 2021-11-19 17:51:53 +02:00
Ori Newman
81713c2ec6 Cache existence in reachability store 2021-11-19 17:43:03 +02:00
Michael Sutton
b8d36a1772
Modify DefaultTimeout to 120 seconds
A temporary workaround for nodes having trouble to sync (currently the download of pruning point related data during IBD takes more than 30 seconds)
2021-11-19 12:28:39 +02:00
29 changed files with 358 additions and 73 deletions

View File

@ -46,4 +46,4 @@ jobs:
run: | run: |
git checkout "${{ env.run_on }}" git checkout "${{ env.run_on }}"
git status git status
go test -race ./... go test -timeout 20m -race ./...

View File

@ -8,7 +8,7 @@ import (
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing // DefaultTimeout is the default duration to wait for enqueuing/dequeuing
// to/from routes. // to/from routes.
const DefaultTimeout = 30 * time.Second const DefaultTimeout = 120 * time.Second
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist. // ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists") var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")

View File

@ -130,7 +130,7 @@ func (flow *handleRelayInvsFlow) downloadHeadersAndPruningUTXOSet(highHash *exte
return err return err
} }
log.Debugf("Headers downloaded from peer %s", flow.peer) log.Infof("Headers downloaded from peer %s", flow.peer)
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash) highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
if err != nil { if err != nil {

View File

@ -14,9 +14,14 @@ import (
func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage) submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage)
if context.ProtocolManager.IsIBDRunning() { isSynced, err := context.ProtocolManager.ShouldMine()
if err != nil {
return nil, err
}
if !context.Config.AllowSubmitBlockWhenNotSynced && !isSynced {
return &appmessage.SubmitBlockResponseMessage{ return &appmessage.SubmitBlockResponseMessage{
Error: appmessage.RPCErrorf("Block not submitted - IBD is running"), Error: appmessage.RPCErrorf("Block not submitted - node is not synced"),
RejectReason: appmessage.RejectReasonIsInIBD, RejectReason: appmessage.RejectReasonIsInIBD,
}, nil }, nil
} }

View File

@ -56,6 +56,7 @@ func create(conf *createConfig) error {
} }
file := keys.File{ file := keys.File{
Version: keys.LastVersion,
EncryptedMnemonics: encryptedMnemonics, EncryptedMnemonics: encryptedMnemonics,
ExtendedPublicKeys: extendedPublicKeys, ExtendedPublicKeys: extendedPublicKeys,
MinimumSignatures: conf.MinimumSignatures, MinimumSignatures: conf.MinimumSignatures,

View File

@ -10,6 +10,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strings"
"github.com/kaspanet/kaspad/domain/dagconfig" "github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/util" "github.com/kaspanet/kaspad/util"
@ -22,6 +23,8 @@ var (
defaultAppDir = util.AppDir("kaspawallet", false) defaultAppDir = util.AppDir("kaspawallet", false)
) )
const LastVersion = 1
func defaultKeysFile(netParams *dagconfig.Params) string { func defaultKeysFile(netParams *dagconfig.Params) string {
return filepath.Join(defaultAppDir, netParams.Name, "keys.json") return filepath.Join(defaultAppDir, netParams.Name, "keys.json")
} }
@ -32,6 +35,8 @@ type encryptedPrivateKeyJSON struct {
} }
type keysFileJSON struct { type keysFileJSON struct {
Version uint32 `json:"version"`
NumThreads uint8 `json:"numThreads,omitempty"` // This field is ignored for versions different than 0
EncryptedPrivateKeys []*encryptedPrivateKeyJSON `json:"encryptedMnemonics"` EncryptedPrivateKeys []*encryptedPrivateKeyJSON `json:"encryptedMnemonics"`
ExtendedPublicKeys []string `json:"publicKeys"` ExtendedPublicKeys []string `json:"publicKeys"`
MinimumSignatures uint32 `json:"minimumSignatures"` MinimumSignatures uint32 `json:"minimumSignatures"`
@ -49,6 +54,8 @@ type EncryptedMnemonic struct {
// File holds all the data related to the wallet keys // File holds all the data related to the wallet keys
type File struct { type File struct {
Version uint32
NumThreads uint8 // This field is ignored for versions different than 0
EncryptedMnemonics []*EncryptedMnemonic EncryptedMnemonics []*EncryptedMnemonic
ExtendedPublicKeys []string ExtendedPublicKeys []string
MinimumSignatures uint32 MinimumSignatures uint32
@ -69,6 +76,8 @@ func (d *File) toJSON() *keysFileJSON {
} }
return &keysFileJSON{ return &keysFileJSON{
Version: d.Version,
NumThreads: d.NumThreads,
EncryptedPrivateKeys: encryptedPrivateKeysJSON, EncryptedPrivateKeys: encryptedPrivateKeysJSON,
ExtendedPublicKeys: d.ExtendedPublicKeys, ExtendedPublicKeys: d.ExtendedPublicKeys,
MinimumSignatures: d.MinimumSignatures, MinimumSignatures: d.MinimumSignatures,
@ -80,6 +89,8 @@ func (d *File) toJSON() *keysFileJSON {
} }
func (d *File) fromJSON(fileJSON *keysFileJSON) error { func (d *File) fromJSON(fileJSON *keysFileJSON) error {
d.Version = fileJSON.Version
d.NumThreads = fileJSON.NumThreads
d.MinimumSignatures = fileJSON.MinimumSignatures d.MinimumSignatures = fileJSON.MinimumSignatures
d.ECDSA = fileJSON.ECDSA d.ECDSA = fileJSON.ECDSA
d.ExtendedPublicKeys = fileJSON.ExtendedPublicKeys d.ExtendedPublicKeys = fileJSON.ExtendedPublicKeys
@ -181,10 +192,20 @@ func (d *File) DecryptMnemonics(cmdLinePassword string) ([]string, error) {
if len(password) == 0 { if len(password) == 0 {
password = getPassword("Password:") password = getPassword("Password:")
} }
var numThreads uint8
if len(d.EncryptedMnemonics) > 0 {
var err error
numThreads, err = d.numThreads(password)
if err != nil {
return nil, err
}
}
privateKeys := make([]string, len(d.EncryptedMnemonics)) privateKeys := make([]string, len(d.EncryptedMnemonics))
for i, encryptedPrivateKey := range d.EncryptedMnemonics { for i, encryptedPrivateKey := range d.EncryptedMnemonics {
var err error var err error
privateKeys[i], err = decryptMnemonic(encryptedPrivateKey, password) privateKeys[i], err = decryptMnemonic(numThreads, encryptedPrivateKey, password)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -278,13 +299,64 @@ func (d *File) Save() error {
return nil return nil
} }
func getAEAD(password, salt []byte) (cipher.AEAD, error) { func (d *File) numThreads(password []byte) (uint8, error) {
key := argon2.IDKey(password, salt, 1, 64*1024, uint8(runtime.NumCPU()), 32) if d.Version != 0 {
return 8, nil
}
if d.NumThreads != 0 {
return d.NumThreads, nil
}
numThreads, err := d.detectNumThreads(password, d.EncryptedMnemonics[0].salt)
if err != nil {
return 0, err
}
d.NumThreads = numThreads
err = d.Save()
if err != nil {
return 0, err
}
return numThreads, nil
}
func (d *File) detectNumThreads(password, salt []byte) (uint8, error) {
numCPU := uint8(runtime.NumCPU())
_, err := getAEAD(numCPU, password, salt)
if err != nil {
if !strings.Contains(err.Error(), "message authentication failed") {
return 0, err
}
} else {
return numCPU, nil
}
for i := uint8(1); ; i++ {
if i == numCPU {
continue
}
_, err := getAEAD(i, password, salt)
if err != nil {
const maxTries = 32
if i == maxTries || !strings.Contains(err.Error(), "message authentication failed") {
return 0, err
}
} else {
return i, nil
}
}
}
func getAEAD(threads uint8, password, salt []byte) (cipher.AEAD, error) {
key := argon2.IDKey(password, salt, 1, 64*1024, threads, 32)
return chacha20poly1305.NewX(key) return chacha20poly1305.NewX(key)
} }
func decryptMnemonic(encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) { func decryptMnemonic(numThreads uint8, encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) {
aead, err := getAEAD(password, encryptedPrivateKey.salt) aead, err := getAEAD(numThreads, password, encryptedPrivateKey.salt)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -716,7 +716,10 @@ func (s *consensus) PopulateMass(transaction *externalapi.DomainTransaction) {
func (s *consensus) ResolveVirtual() error { func (s *consensus) ResolveVirtual() error {
// In order to prevent a situation that the consensus lock is held for too much time, we // In order to prevent a situation that the consensus lock is held for too much time, we
// release the lock each time resolve 100 blocks. // release the lock each time resolve 100 blocks.
for { for i := 0; ; i++ {
if i%10 == 0 {
log.Infof("Resolving virtual. This may take some time...")
}
var isCompletelyResolved bool var isCompletelyResolved bool
var err error var err error
func() { func() {
@ -730,6 +733,7 @@ func (s *consensus) ResolveVirtual() error {
} }
if isCompletelyResolved { if isCompletelyResolved {
log.Infof("Resolved virtual")
return nil return nil
} }
} }

View File

@ -6,7 +6,9 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/util/staging" "github.com/kaspanet/kaspad/util/staging"
"github.com/pkg/errors"
) )
var reachabilityDataBucketName = []byte("reachability-data") var reachabilityDataBucketName = []byte("reachability-data")
@ -50,6 +52,8 @@ func (rds *reachabilityDataStore) IsStaged(stagingArea *model.StagingArea) bool
return rds.stagingShard(stagingArea).isStaged() return rds.stagingShard(stagingArea).isStaged()
} }
var errNotFound = errors.Wrap(database.ErrNotFound, "reachability data not found")
// ReachabilityData returns the reachabilityData associated with the given blockHash // ReachabilityData returns the reachabilityData associated with the given blockHash
func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.ReachabilityData, error) { func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.ReachabilityData, error) {
stagingShard := rds.stagingShard(stagingArea) stagingShard := rds.stagingShard(stagingArea)
@ -59,10 +63,16 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta
} }
if reachabilityData, ok := rds.reachabilityDataCache.Get(blockHash); ok { if reachabilityData, ok := rds.reachabilityDataCache.Get(blockHash); ok {
if reachabilityData == nil {
return nil, errNotFound
}
return reachabilityData.(model.ReachabilityData), nil return reachabilityData.(model.ReachabilityData), nil
} }
reachabilityDataBytes, err := dbContext.Get(rds.reachabilityDataBlockHashAsKey(blockHash)) reachabilityDataBytes, err := dbContext.Get(rds.reachabilityDataBlockHashAsKey(blockHash))
if database.IsNotFoundError(err) {
rds.reachabilityDataCache.Add(blockHash, nil)
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -76,19 +86,17 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta
} }
func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
stagingShard := rds.stagingShard(stagingArea) _, err := rds.ReachabilityData(dbContext, stagingArea, blockHash)
if database.IsNotFoundError(err) {
if _, ok := stagingShard.reachabilityData[*blockHash]; ok { return false, nil
return true, nil }
if err != nil {
return false, err
} }
if rds.reachabilityDataCache.Has(blockHash) {
return true, nil return true, nil
} }
return dbContext.Has(rds.reachabilityDataBlockHashAsKey(blockHash))
}
// ReachabilityReindexRoot returns the current reachability reindex root // ReachabilityReindexRoot returns the current reachability reindex root
func (rds *reachabilityDataStore) ReachabilityReindexRoot(dbContext model.DBReader, stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { func (rds *reachabilityDataStore) ReachabilityReindexRoot(dbContext model.DBReader, stagingArea *model.StagingArea) (*externalapi.DomainHash, error) {
stagingShard := rds.stagingShard(stagingArea) stagingShard := rds.stagingShard(stagingArea)

View File

@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore" "github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder" "github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager"
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager" "github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants" "github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"io/ioutil" "io/ioutil"
@ -158,12 +159,17 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
dagTraversalManager := dagTraversalManagers[0] dagTraversalManager := dagTraversalManagers[0]
// Processes // Processes
parentsManager := parentssanager.New(config.GenesisHash, config.HardForkOmitGenesisFromParentsDAAScore)
blockParentBuilder := blockparentbuilder.New( blockParentBuilder := blockparentbuilder.New(
dbManager, dbManager,
blockHeaderStore, blockHeaderStore,
dagTopologyManager, dagTopologyManager,
parentsManager,
reachabilityDataStore, reachabilityDataStore,
pruningStore, pruningStore,
config.HardForkOmitGenesisFromParentsDAAScore,
config.GenesisHash,
) )
pastMedianTimeManager := f.pastMedianTimeConsructor( pastMedianTimeManager := f.pastMedianTimeConsructor(
config.TimestampDeviationTolerance, config.TimestampDeviationTolerance,
@ -316,6 +322,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
finalityManager, finalityManager,
blockParentBuilder, blockParentBuilder,
pruningManager, pruningManager,
parentsManager,
pruningStore, pruningStore,
blockStore, blockStore,
@ -403,6 +410,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
ghostdagManagers, ghostdagManagers,
reachabilityManagers, reachabilityManagers,
dagTraversalManagers, dagTraversalManagers,
parentsManager,
ghostdagDataStores, ghostdagDataStores,
pruningStore, pruningStore,
@ -581,7 +589,7 @@ func dagStores(config *Config,
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches) ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches)
} else { } else {
blockRelationStores[i] = blockrelationstore.New(prefixBucket, 200, false) blockRelationStores[i] = blockrelationstore.New(prefixBucket, 200, false)
reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 200, false) reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 86400, false)
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, 200, false) ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, 200, false)
} }
} }

View File

@ -58,7 +58,6 @@ type BlockHeader interface {
type BaseBlockHeader interface { type BaseBlockHeader interface {
Version() uint16 Version() uint16
Parents() []BlockLevelParents Parents() []BlockLevelParents
ParentsAtLevel(level int) BlockLevelParents
DirectParents() BlockLevelParents DirectParents() BlockLevelParents
HashMerkleRoot() *DomainHash HashMerkleRoot() *DomainHash
AcceptedIDMerkleRoot() *DomainHash AcceptedIDMerkleRoot() *DomainHash
@ -70,6 +69,7 @@ type BaseBlockHeader interface {
BlueScore() uint64 BlueScore() uint64
BlueWork() *big.Int BlueWork() *big.Int
PruningPoint() *DomainHash PruningPoint() *DomainHash
BlockLevel() int
Equal(other BaseBlockHeader) bool Equal(other BaseBlockHeader) bool
} }

View File

@ -5,5 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// BlockParentBuilder exposes a method to build super-block parents for // BlockParentBuilder exposes a method to build super-block parents for
// a given set of direct parents // a given set of direct parents
type BlockParentBuilder interface { type BlockParentBuilder interface {
BuildParents(stagingArea *StagingArea, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) BuildParents(stagingArea *StagingArea,
daaScore uint64,
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error)
} }

View File

@ -0,0 +1,8 @@
package model
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
type ParentsManager interface {
ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents
Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents
}

View File

@ -183,10 +183,16 @@ func (bb *blockBuilder) newBlockCoinbaseTransaction(stagingArea *model.StagingAr
func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions []*externalapi.DomainTransaction, func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions []*externalapi.DomainTransaction,
newBlockPruningPoint *externalapi.DomainHash) (externalapi.BlockHeader, error) { newBlockPruningPoint *externalapi.DomainHash) (externalapi.BlockHeader, error) {
parents, err := bb.newBlockParents(stagingArea) daaScore, err := bb.newBlockDAAScore(stagingArea)
if err != nil { if err != nil {
return nil, err return nil, err
} }
parents, err := bb.newBlockParents(stagingArea, daaScore)
if err != nil {
return nil, err
}
timeInMilliseconds, err := bb.newBlockTime(stagingArea) timeInMilliseconds, err := bb.newBlockTime(stagingArea)
if err != nil { if err != nil {
return nil, err return nil, err
@ -204,10 +210,6 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions
if err != nil { if err != nil {
return nil, err return nil, err
} }
daaScore, err := bb.newBlockDAAScore(stagingArea)
if err != nil {
return nil, err
}
blueWork, err := bb.newBlockBlueWork(stagingArea) blueWork, err := bb.newBlockBlueWork(stagingArea)
if err != nil { if err != nil {
return nil, err return nil, err
@ -233,12 +235,12 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions
), nil ), nil
} }
func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea) ([]externalapi.BlockLevelParents, error) { func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea, daaScore uint64) ([]externalapi.BlockLevelParents, error) {
virtualBlockRelations, err := bb.blockRelationStore.BlockRelation(bb.databaseContext, stagingArea, model.VirtualBlockHash) virtualBlockRelations, err := bb.blockRelationStore.BlockRelation(bb.databaseContext, stagingArea, model.VirtualBlockHash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return bb.blockParentBuilder.BuildParents(stagingArea, virtualBlockRelations.Parents) return bb.blockParentBuilder.BuildParents(stagingArea, daaScore, virtualBlockRelations.Parents)
} }
func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) { func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) {

View File

@ -83,7 +83,7 @@ func (bb *testBlockBuilder) buildUTXOInvalidHeader(stagingArea *model.StagingAre
return nil, err return nil, err
} }
parents, err := bb.blockParentBuilder.BuildParents(stagingArea, parentHashes) parents, err := bb.blockParentBuilder.BuildParents(stagingArea, daaScore, parentHashes)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -5,7 +5,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset" "github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -13,8 +12,12 @@ type blockParentBuilder struct {
databaseContext model.DBManager databaseContext model.DBManager
blockHeaderStore model.BlockHeaderStore blockHeaderStore model.BlockHeaderStore
dagTopologyManager model.DAGTopologyManager dagTopologyManager model.DAGTopologyManager
parentsManager model.ParentsManager
reachabilityDataStore model.ReachabilityDataStore reachabilityDataStore model.ReachabilityDataStore
pruningStore model.PruningStore pruningStore model.PruningStore
hardForkOmitGenesisFromParentsDAAScore uint64
genesisHash *externalapi.DomainHash
} }
// New creates a new instance of a BlockParentBuilder // New creates a new instance of a BlockParentBuilder
@ -22,20 +25,29 @@ func New(
databaseContext model.DBManager, databaseContext model.DBManager,
blockHeaderStore model.BlockHeaderStore, blockHeaderStore model.BlockHeaderStore,
dagTopologyManager model.DAGTopologyManager, dagTopologyManager model.DAGTopologyManager,
parentsManager model.ParentsManager,
reachabilityDataStore model.ReachabilityDataStore, reachabilityDataStore model.ReachabilityDataStore,
pruningStore model.PruningStore, pruningStore model.PruningStore,
hardForkOmitGenesisFromParentsDAAScore uint64,
genesisHash *externalapi.DomainHash,
) model.BlockParentBuilder { ) model.BlockParentBuilder {
return &blockParentBuilder{ return &blockParentBuilder{
databaseContext: databaseContext, databaseContext: databaseContext,
blockHeaderStore: blockHeaderStore, blockHeaderStore: blockHeaderStore,
dagTopologyManager: dagTopologyManager, dagTopologyManager: dagTopologyManager,
parentsManager: parentsManager,
reachabilityDataStore: reachabilityDataStore, reachabilityDataStore: reachabilityDataStore,
pruningStore: pruningStore, pruningStore: pruningStore,
hardForkOmitGenesisFromParentsDAAScore: hardForkOmitGenesisFromParentsDAAScore,
genesisHash: genesisHash,
} }
} }
func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea, func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) { daaScore uint64, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
// Late on we'll mutate direct parent hashes, so we first clone it. // Late on we'll mutate direct parent hashes, so we first clone it.
directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes)) directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes))
@ -93,7 +105,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
// all the block levels they occupy // all the block levels they occupy
for _, directParentHeader := range directParentHeaders { for _, directParentHeader := range directParentHeaders {
directParentHash := consensushashing.HeaderHash(directParentHeader) directParentHash := consensushashing.HeaderHash(directParentHeader)
blockLevel := pow.BlockLevel(directParentHeader) blockLevel := directParentHeader.BlockLevel()
for i := 0; i <= blockLevel; i++ { for i := 0; i <= blockLevel; i++ {
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists { if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash) candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
@ -116,7 +128,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
} }
for _, directParentHeader := range directParentHeaders { for _, directParentHeader := range directParentHeaders {
for blockLevel, blockLevelParentsInHeader := range directParentHeader.Parents() { for blockLevel, blockLevelParentsInHeader := range bpb.parentsManager.Parents(directParentHeader) {
isEmptyLevel := false isEmptyLevel := false
if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists { if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists {
candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash) candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
@ -145,7 +157,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
} else { } else {
for childHash, childHeader := range virtualGenesisChildrenHeaders { for childHash, childHeader := range virtualGenesisChildrenHeaders {
childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse
if childHeader.ParentsAtLevel(blockLevel).Contains(parent) { if bpb.parentsManager.ParentsAtLevel(childHeader, blockLevel).Contains(parent) {
referenceBlocks = append(referenceBlocks, &childHash) referenceBlocks = append(referenceBlocks, &childHash)
} }
} }
@ -205,11 +217,16 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap)) parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap))
for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ { for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ {
if _, ok := candidatesByLevelToReferenceBlocksMap[blockLevel][*bpb.genesisHash]; daaScore >= bpb.hardForkOmitGenesisFromParentsDAAScore && ok && len(candidatesByLevelToReferenceBlocksMap[blockLevel]) == 1 {
break
}
levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel])) levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel]))
for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] { for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
block := block // Assign to a new pointer to avoid `range` pointer reuse block := block // Assign to a new pointer to avoid `range` pointer reuse
levelBlocks = append(levelBlocks, &block) levelBlocks = append(levelBlocks, &block)
} }
parents[blockLevel] = levelBlocks parents[blockLevel] = levelBlocks
} }
return parents, nil return parents, nil

View File

@ -6,7 +6,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors" "github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/infrastructure/logger" "github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -63,7 +62,7 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
return err return err
} }
if !hasReachabilityData { if !hasReachabilityData {
blockLevel := pow.BlockLevel(header) blockLevel := header.BlockLevel()
for i := 0; i <= blockLevel; i++ { for i := 0; i <= blockLevel; i++ {
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash) err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
if err != nil { if err != nil {
@ -195,7 +194,7 @@ func (v *blockValidator) checkMergeSizeLimit(stagingArea *model.StagingArea, has
} }
func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error { func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error {
expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DirectParents()) expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DAAScore(), header.DirectParents())
if err != nil { if err != nil {
return err return err
} }

View File

@ -37,6 +37,7 @@ type blockValidator struct {
finalityManager model.FinalityManager finalityManager model.FinalityManager
blockParentBuilder model.BlockParentBuilder blockParentBuilder model.BlockParentBuilder
pruningManager model.PruningManager pruningManager model.PruningManager
parentsManager model.ParentsManager
blockStore model.BlockStore blockStore model.BlockStore
ghostdagDataStores []model.GHOSTDAGDataStore ghostdagDataStores []model.GHOSTDAGDataStore
@ -72,6 +73,7 @@ func New(powMax *big.Int,
finalityManager model.FinalityManager, finalityManager model.FinalityManager,
blockParentBuilder model.BlockParentBuilder, blockParentBuilder model.BlockParentBuilder,
pruningManager model.PruningManager, pruningManager model.PruningManager,
parentsManager model.ParentsManager,
pruningStore model.PruningStore, pruningStore model.PruningStore,
blockStore model.BlockStore, blockStore model.BlockStore,
@ -108,6 +110,7 @@ func New(powMax *big.Int,
finalityManager: finalityManager, finalityManager: finalityManager,
blockParentBuilder: blockParentBuilder, blockParentBuilder: blockParentBuilder,
pruningManager: pruningManager, pruningManager: pruningManager,
parentsManager: parentsManager,
pruningStore: pruningStore, pruningStore: pruningStore,
blockStore: blockStore, blockStore: blockStore,

View File

@ -67,9 +67,9 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
header externalapi.BlockHeader, header externalapi.BlockHeader,
isBlockWithTrustedData bool) error { isBlockWithTrustedData bool) error {
for level := 0; level <= pow.BlockLevel(header); level++ { for level := 0; level <= header.BlockLevel(); level++ {
var parents []*externalapi.DomainHash var parents []*externalapi.DomainHash
for _, parent := range header.ParentsAtLevel(level) { for _, parent := range v.parentsManager.ParentsAtLevel(header, level) {
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false) _, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
isNotFoundError := database.IsNotFoundError(err) isNotFoundError := database.IsNotFoundError(err)
if !isNotFoundError && err != nil { if !isNotFoundError && err != nil {
@ -116,7 +116,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
return err return err
} }
blockLevel := pow.BlockLevel(header) blockLevel := header.BlockLevel()
for i := 1; i <= blockLevel; i++ { for i := 1; i <= blockLevel; i++ {
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash) err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
if err != nil { if err != nil {

View File

@ -35,7 +35,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo
var selectedTip *externalapi.DomainHash var selectedTip *externalapi.DomainHash
isCompletelyResolved := true isCompletelyResolved := true
for _, tip := range tips { for _, tip := range tips {
log.Infof("Resolving tip %s", tip) log.Debugf("Resolving tip %s", tip)
resolveStagingArea := model.NewStagingArea() resolveStagingArea := model.NewStagingArea()
unverifiedBlocks, err := csm.getUnverifiedChainBlocks(resolveStagingArea, tip) unverifiedBlocks, err := csm.getUnverifiedChainBlocks(resolveStagingArea, tip)
if err != nil { if err != nil {
@ -46,7 +46,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo
hasMoreUnverifiedThanMax := maxBlocksToResolve != 0 && uint64(len(unverifiedBlocks)) > maxBlocksToResolve hasMoreUnverifiedThanMax := maxBlocksToResolve != 0 && uint64(len(unverifiedBlocks)) > maxBlocksToResolve
if hasMoreUnverifiedThanMax { if hasMoreUnverifiedThanMax {
resolveTip = unverifiedBlocks[uint64(len(unverifiedBlocks))-maxBlocksToResolve] resolveTip = unverifiedBlocks[uint64(len(unverifiedBlocks))-maxBlocksToResolve]
log.Infof("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip) log.Debugf("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip)
} }
blockStatus, reversalData, err := csm.resolveBlockStatus(resolveStagingArea, resolveTip, true) blockStatus, reversalData, err := csm.resolveBlockStatus(resolveStagingArea, resolveTip, true)

View File

@ -0,0 +1,45 @@
package parentssanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
)
type parentsManager struct {
hardForkOmitGenesisFromParentsDAAScore uint64
genesisHash *externalapi.DomainHash
}
// New instantiates a new HeadersSelectedTipManager
func New(genesisHash *externalapi.DomainHash, hardForkOmitGenesisFromParentsDAAScore uint64) model.ParentsManager {
return &parentsManager{
genesisHash: genesisHash,
hardForkOmitGenesisFromParentsDAAScore: hardForkOmitGenesisFromParentsDAAScore,
}
}
func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents {
if len(blockHeader.Parents()) <= level {
if blockHeader.DAAScore() >= pm.hardForkOmitGenesisFromParentsDAAScore {
return externalapi.BlockLevelParents{pm.genesisHash}
}
return externalapi.BlockLevelParents{}
}
return blockHeader.Parents()[level]
}
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {
numParents := len(blockHeader.Parents())
if blockHeader.DAAScore() >= pm.hardForkOmitGenesisFromParentsDAAScore {
numParents = constants.MaxBlockLevel + 1
}
parents := make([]externalapi.BlockLevelParents, numParents)
for i := 0; i < numParents; i++ {
parents[i] = pm.ParentsAtLevel(blockHeader, i)
}
return parents
}

View File

@ -0,0 +1,5 @@
package pruningproofmanager
import "github.com/kaspanet/kaspad/infrastructure/logger"
var log = logger.RegisterSubSystem("PPMN")

View File

@ -15,8 +15,8 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants" "github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset" "github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/infrastructure/db/database" "github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors" "github.com/pkg/errors"
"math/big" "math/big"
) )
@ -28,6 +28,7 @@ type pruningProofManager struct {
ghostdagManagers []model.GHOSTDAGManager ghostdagManagers []model.GHOSTDAGManager
reachabilityManagers []model.ReachabilityManager reachabilityManagers []model.ReachabilityManager
dagTraversalManagers []model.DAGTraversalManager dagTraversalManagers []model.DAGTraversalManager
parentsManager model.ParentsManager
ghostdagDataStores []model.GHOSTDAGDataStore ghostdagDataStores []model.GHOSTDAGDataStore
pruningStore model.PruningStore pruningStore model.PruningStore
@ -39,6 +40,9 @@ type pruningProofManager struct {
genesisHash *externalapi.DomainHash genesisHash *externalapi.DomainHash
k externalapi.KType k externalapi.KType
pruningProofM uint64 pruningProofM uint64
cachedPruningPoint *externalapi.DomainHash
cachedProof *externalapi.PruningPointProof
} }
// New instantiates a new PruningManager // New instantiates a new PruningManager
@ -49,6 +53,7 @@ func New(
ghostdagManagers []model.GHOSTDAGManager, ghostdagManagers []model.GHOSTDAGManager,
reachabilityManagers []model.ReachabilityManager, reachabilityManagers []model.ReachabilityManager,
dagTraversalManagers []model.DAGTraversalManager, dagTraversalManagers []model.DAGTraversalManager,
parentsManager model.ParentsManager,
ghostdagDataStores []model.GHOSTDAGDataStore, ghostdagDataStores []model.GHOSTDAGDataStore,
pruningStore model.PruningStore, pruningStore model.PruningStore,
@ -68,6 +73,7 @@ func New(
ghostdagManagers: ghostdagManagers, ghostdagManagers: ghostdagManagers,
reachabilityManagers: reachabilityManagers, reachabilityManagers: reachabilityManagers,
dagTraversalManagers: dagTraversalManagers, dagTraversalManagers: dagTraversalManagers,
parentsManager: parentsManager,
ghostdagDataStores: ghostdagDataStores, ghostdagDataStores: ghostdagDataStores,
pruningStore: pruningStore, pruningStore: pruningStore,
@ -83,6 +89,33 @@ func New(
} }
func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) { func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "BuildPruningPointProof")
defer onEnd()
pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea)
if err != nil {
return nil, err
}
if ppm.cachedPruningPoint != nil && ppm.cachedPruningPoint.Equal(pruningPoint) {
return ppm.cachedProof, nil
}
proof, err := ppm.buildPruningPointProof(stagingArea)
if err != nil {
return nil, err
}
ppm.cachedProof = proof
ppm.cachedPruningPoint = pruningPoint
return proof, nil
}
func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "buildPruningPointProof")
defer onEnd()
pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea) pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea)
if err != nil { if err != nil {
return nil, err return nil, err
@ -97,17 +130,33 @@ func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.Stagin
return nil, err return nil, err
} }
maxLevel := len(pruningPointHeader.Parents()) - 1 maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
headersByLevel := make(map[int][]externalapi.BlockHeader) headersByLevel := make(map[int][]externalapi.BlockHeader)
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1) selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
pruningPointLevel := pow.BlockLevel(pruningPointHeader) pruningPointLevel := pruningPointHeader.BlockLevel()
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- { for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
var selectedTip *externalapi.DomainHash var selectedTip *externalapi.DomainHash
if blockLevel <= pruningPointLevel { if blockLevel <= pruningPointLevel {
selectedTip = pruningPoint selectedTip = pruningPoint
} else { } else {
blockLevelParents := pruningPointHeader.ParentsAtLevel(blockLevel) blockLevelParents := ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel)
selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, []*externalapi.DomainHash(blockLevelParents)...) selectedTipCandidates := make([]*externalapi.DomainHash, 0, len(blockLevelParents))
// In a pruned node, some pruning point parents might be missing, but we're guaranteed that its
// selected parent is not missing.
for _, parent := range blockLevelParents {
_, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) {
continue
}
if err != nil {
return nil, err
}
selectedTipCandidates = append(selectedTipCandidates, parent)
}
selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, selectedTipCandidates...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -248,6 +297,9 @@ func (ppm *pruningProofManager) blockAtDepth(stagingArea *model.StagingArea, gho
} }
func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error { func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "ValidatePruningPointProof")
defer onEnd()
stagingArea := model.NewStagingArea() stagingArea := model.NewStagingArea()
if len(pruningPointProof.Headers) == 0 { if len(pruningPointProof.Headers) == 0 {
@ -257,8 +309,8 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
level0Headers := pruningPointProof.Headers[0] level0Headers := pruningPointProof.Headers[0]
pruningPointHeader := level0Headers[len(level0Headers)-1] pruningPointHeader := level0Headers[len(level0Headers)-1]
pruningPoint := consensushashing.HeaderHash(pruningPointHeader) pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
pruningPointBlockLevel := pow.BlockLevel(pruningPointHeader) pruningPointBlockLevel := pruningPointHeader.BlockLevel()
maxLevel := len(pruningPointHeader.Parents()) - 1 maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
if maxLevel >= len(pruningPointProof.Headers) { if maxLevel >= len(pruningPointProof.Headers) {
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+ return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
"has parents from %d levels", len(pruningPointProof.Headers), maxLevel+1) "has parents from %d levels", len(pruningPointProof.Headers), maxLevel+1)
@ -300,15 +352,15 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
var selectedTip *externalapi.DomainHash var selectedTip *externalapi.DomainHash
for i, header := range headers { for i, header := range headers {
blockHash := consensushashing.HeaderHash(header) blockHash := consensushashing.HeaderHash(header)
if pow.BlockLevel(header) < blockLevel { if header.BlockLevel() < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+ return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel) "expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
} }
blockHeaderStore.Stage(stagingArea, blockHash, header) blockHeaderStore.Stage(stagingArea, blockHash, header)
var parents []*externalapi.DomainHash var parents []*externalapi.DomainHash
for _, parent := range header.ParentsAtLevel(blockLevel) { for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) {
_, err := ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false) _, err := ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) { if database.IsNotFoundError(err) {
continue continue
@ -377,7 +429,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
} }
} }
if !selectedTip.Equal(pruningPoint) && !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) { if !selectedTip.Equal(pruningPoint) && !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) {
return errors.Wrapf(ruleerrors.ErrPruningProofMissesBlocksBelowPruningPoint, "the selected tip %s at "+ return errors.Wrapf(ruleerrors.ErrPruningProofMissesBlocksBelowPruningPoint, "the selected tip %s at "+
"level %d is not a parent of the pruning point", selectedTip, blockLevel) "level %d is not a parent of the pruning point", selectedTip, blockLevel)
} }
@ -395,7 +447,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipIsNotThePruningPoint, "the pruning "+ return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipIsNotThePruningPoint, "the pruning "+
"proof selected tip %s at level %d is not the pruning point", selectedTip, blockLevel) "proof selected tip %s at level %d is not the pruning point", selectedTip, blockLevel)
} }
} else if !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) { } else if !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) {
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipNotParentOfPruningPoint, "the pruning "+ return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipNotParentOfPruningPoint, "the pruning "+
"proof selected tip %s at level %d is not a parent of the of the pruning point on the same "+ "proof selected tip %s at level %d is not a parent of the of the pruning point on the same "+
"level", selectedTip, blockLevel) "level", selectedTip, blockLevel)
@ -554,19 +606,22 @@ func (ppm *pruningProofManager) dagProcesses(
} }
func (ppm *pruningProofManager) ApplyPruningPointProof(stagingArea *model.StagingArea, pruningPointProof *externalapi.PruningPointProof) error { func (ppm *pruningProofManager) ApplyPruningPointProof(stagingArea *model.StagingArea, pruningPointProof *externalapi.PruningPointProof) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "ApplyPruningPointProof")
defer onEnd()
for blockLevel, headers := range pruningPointProof.Headers { for blockLevel, headers := range pruningPointProof.Headers {
var selectedTip *externalapi.DomainHash var selectedTip *externalapi.DomainHash
for i, header := range headers { for i, header := range headers {
blockHash := consensushashing.HeaderHash(header) blockHash := consensushashing.HeaderHash(header)
if pow.BlockLevel(header) < blockLevel { if header.BlockLevel() < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+ return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel) "expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
} }
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header) ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)
var parents []*externalapi.DomainHash var parents []*externalapi.DomainHash
for _, parent := range header.ParentsAtLevel(blockLevel) { for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) {
_, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false) _, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) { if database.IsNotFoundError(err) {
continue continue

View File

@ -2,6 +2,7 @@ package blockheader
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"math/big" "math/big"
) )
@ -18,6 +19,9 @@ type blockHeader struct {
blueScore uint64 blueScore uint64
blueWork *big.Int blueWork *big.Int
pruningPoint *externalapi.DomainHash pruningPoint *externalapi.DomainHash
isBlockLevelCached bool
blockLevel int
} }
func (bh *blockHeader) BlueScore() uint64 { func (bh *blockHeader) BlueScore() uint64 {
@ -41,10 +45,12 @@ func (bh *blockHeader) ToImmutable() externalapi.BlockHeader {
} }
func (bh *blockHeader) SetNonce(nonce uint64) { func (bh *blockHeader) SetNonce(nonce uint64) {
bh.isBlockLevelCached = false
bh.nonce = nonce bh.nonce = nonce
} }
func (bh *blockHeader) SetTimeInMilliseconds(timeInMilliseconds int64) { func (bh *blockHeader) SetTimeInMilliseconds(timeInMilliseconds int64) {
bh.isBlockLevelCached = false
bh.timeInMilliseconds = timeInMilliseconds bh.timeInMilliseconds = timeInMilliseconds
} }
@ -56,16 +62,12 @@ func (bh *blockHeader) Parents() []externalapi.BlockLevelParents {
return bh.parents return bh.parents
} }
func (bh *blockHeader) ParentsAtLevel(level int) externalapi.BlockLevelParents { func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
if len(bh.parents) <= level { if len(bh.parents) == 0 {
return externalapi.BlockLevelParents{} return externalapi.BlockLevelParents{}
} }
return bh.parents[level] return bh.parents[0]
}
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
return bh.ParentsAtLevel(0)
} }
func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash { func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash {
@ -177,6 +179,15 @@ func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader {
return bh.clone() return bh.clone()
} }
func (bh *blockHeader) BlockLevel() int {
if !bh.isBlockLevelCached {
bh.blockLevel = pow.BlockLevel(bh)
bh.isBlockLevelCached = true
}
return bh.blockLevel
}
// NewImmutableBlockHeader returns a new immutable header // NewImmutableBlockHeader returns a new immutable header
func NewImmutableBlockHeader( func NewImmutableBlockHeader(
version uint16, version uint16,

View File

@ -36,6 +36,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
8, 8,
big.NewInt(9), big.NewInt(9),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -55,6 +57,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
headersToCompareTo: []headerToCompare{ headersToCompareTo: []headerToCompare{
{ {
@ -75,6 +79,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: true, expectedResult: true,
}, },
@ -92,6 +98,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -111,6 +119,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -128,6 +138,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -145,6 +157,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -162,6 +176,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -179,6 +195,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -196,6 +214,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -213,6 +233,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -230,6 +252,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -247,6 +271,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -264,6 +290,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
100, 100,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -281,6 +309,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(100), big.NewInt(100),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },
@ -298,6 +328,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9, 9,
big.NewInt(10), big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}), externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}),
false,
0,
}, },
expectedResult: false, expectedResult: false,
}, },

View File

@ -186,6 +186,8 @@ type Params struct {
FixedSubsidySwitchPruningPointInterval uint64 FixedSubsidySwitchPruningPointInterval uint64
FixedSubsidySwitchHashRateThreshold *big.Int FixedSubsidySwitchHashRateThreshold *big.Int
HardForkOmitGenesisFromParentsDAAScore uint64
} }
// NormalizeRPCServerAddress returns addr with the current network default // NormalizeRPCServerAddress returns addr with the current network default
@ -264,6 +266,7 @@ var MainnetParams = Params{
PruningProofM: defaultPruningProofM, PruningProofM: defaultPruningProofM,
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval, FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000), FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
HardForkOmitGenesisFromParentsDAAScore: 2e6,
} }
// TestnetParams defines the network parameters for the test Kaspa network. // TestnetParams defines the network parameters for the test Kaspa network.
@ -326,6 +329,7 @@ var TestnetParams = Params{
PruningProofM: defaultPruningProofM, PruningProofM: defaultPruningProofM,
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval, FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000), FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
HardForkOmitGenesisFromParentsDAAScore: 2e6,
} }
// SimnetParams defines the network parameters for the simulation test Kaspa // SimnetParams defines the network parameters for the simulation test Kaspa
@ -392,6 +396,7 @@ var SimnetParams = Params{
PruningProofM: defaultPruningProofM, PruningProofM: defaultPruningProofM,
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval, FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000), FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
HardForkOmitGenesisFromParentsDAAScore: 5,
} }
// DevnetParams defines the network parameters for the development Kaspa network. // DevnetParams defines the network parameters for the development Kaspa network.
@ -454,6 +459,7 @@ var DevnetParams = Params{
PruningProofM: defaultPruningProofM, PruningProofM: defaultPruningProofM,
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval, FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000), FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
HardForkOmitGenesisFromParentsDAAScore: 3000,
} }
var ( var (

View File

@ -121,6 +121,7 @@ type Flags struct {
MaxUTXOCacheSize uint64 `long:"maxutxocachesize" description:"Max size of loaded UTXO into ram from the disk in bytes"` MaxUTXOCacheSize uint64 `long:"maxutxocachesize" description:"Max size of loaded UTXO into ram from the disk in bytes"`
UTXOIndex bool `long:"utxoindex" description:"Enable the UTXO index"` UTXOIndex bool `long:"utxoindex" description:"Enable the UTXO index"`
IsArchivalNode bool `long:"archival" description:"Run as an archival node: don't delete old block data when moving the pruning point (Warning: heavy disk usage)'"` IsArchivalNode bool `long:"archival" description:"Run as an archival node: don't delete old block data when moving the pruning point (Warning: heavy disk usage)'"`
AllowSubmitBlockWhenNotSynced bool `long:"allow-submit-block-when-not-synced" hidden:"true" description:"Allow the node to accept blocks from RPC while not synced (this flag is mainly used for testing)"`
EnableSanityCheckPruningUTXOSet bool `long:"enable-sanity-check-pruning-utxo" hidden:"true" description:"When moving the pruning point - check that the utxo set matches the utxo commitment"` EnableSanityCheckPruningUTXOSet bool `long:"enable-sanity-check-pruning-utxo" hidden:"true" description:"When moving the pruning point - check that the utxo set matches the utxo commitment"`
NetworkFlags NetworkFlags
ServiceOptions *ServiceOptions ServiceOptions *ServiceOptions

View File

@ -66,7 +66,7 @@ func (c *ConnectionManager) checkRequestedConnections(connSet connectionSet) {
log.Debugf("Connecting to connection request %s", connReq.address) log.Debugf("Connecting to connection request %s", connReq.address)
err := c.initiateConnection(connReq.address) err := c.initiateConnection(connReq.address)
if err != nil { if err != nil {
log.Infof("Couldn't connect to %s: %s", address, err) log.Infof("Couldn't connect to requested connection %s: %s", address, err)
// if connection request is one try - remove from pending and ignore failure // if connection request is one try - remove from pending and ignore failure
if !connReq.isPermanent { if !connReq.isPermanent {
delete(c.pendingRequested, address) delete(c.pendingRequested, address)

View File

@ -41,7 +41,7 @@ func (c *ConnectionManager) checkOutgoingConnections(connSet connectionSet) {
err := c.initiateConnection(addressString) err := c.initiateConnection(addressString)
if err != nil { if err != nil {
log.Infof("Couldn't connect to %s: %s", addressString, err) log.Debugf("Couldn't connect to %s: %s", addressString, err)
c.addressManager.MarkConnectionFailure(netAddress) c.addressManager.MarkConnectionFailure(netAddress)
continue continue
} }

View File

@ -36,6 +36,7 @@ func setConfig(t *testing.T, harness *appHarness) {
harness.config.Listeners = []string{harness.p2pAddress} harness.config.Listeners = []string{harness.p2pAddress}
harness.config.RPCListeners = []string{harness.rpcAddress} harness.config.RPCListeners = []string{harness.rpcAddress}
harness.config.UTXOIndex = harness.utxoIndex harness.config.UTXOIndex = harness.utxoIndex
harness.config.AllowSubmitBlockWhenNotSynced = true
if harness.overrideDAGParams != nil { if harness.overrideDAGParams != nil {
harness.config.ActiveNetParams = harness.overrideDAGParams harness.config.ActiveNetParams = harness.overrideDAGParams