Compare commits

...

2 Commits

Author SHA1 Message Date
Ori Newman
1c18a49992 Add cache to block window (#1948)
* Add cache to block window

* Copy the window heap slice with the right capacity

* Use WindowHeapSliceStore

* Use the selected parent window as a basis (and some comments and variable renames)

* Clone slice on newSizedUpHeapFromSlice

* Rename isNotFoundError->currentIsNonTrustedBlock

* Increase windowHeapSliceStore cache size to 2000 and some cosmetic changes
2022-02-20 16:52:36 +02:00
stasatdaglabs
28d0f1ea2e Set MaxBlockLevels for non-mainnet networks to 250 (#1952)
* Make MaxBlockLevel a DAG params instead of a constant.

* Change the testnet network name to 9.

* Fix TestBlockWindow.

* Set MaxBlockLevels for non-mainnet networks to 250.

* Revert "Fix TestBlockWindow."

This reverts commit 30a7892f53.

* Fix TestPruning.
2022-02-20 13:43:42 +02:00
22 changed files with 378 additions and 85 deletions

View File

@@ -0,0 +1,44 @@
package blockwindowheapslicestore
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
type shardKey struct {
hash externalapi.DomainHash
windowSize int
}
type blockWindowHeapSliceStagingShard struct {
store *blockWindowHeapSliceStore
toAdd map[shardKey][]*externalapi.BlockGHOSTDAGDataHashPair
}
func (bss *blockWindowHeapSliceStore) stagingShard(stagingArea *model.StagingArea) *blockWindowHeapSliceStagingShard {
return stagingArea.GetOrCreateShard(bss.shardID, func() model.StagingShard {
return &blockWindowHeapSliceStagingShard{
store: bss,
toAdd: make(map[shardKey][]*externalapi.BlockGHOSTDAGDataHashPair),
}
}).(*blockWindowHeapSliceStagingShard)
}
func (bsss *blockWindowHeapSliceStagingShard) Commit(_ model.DBTransaction) error {
for key, heapSlice := range bsss.toAdd {
bsss.store.cache.Add(&key.hash, key.windowSize, heapSlice)
}
return nil
}
func (bsss *blockWindowHeapSliceStagingShard) isStaged() bool {
return len(bsss.toAdd) != 0
}
func newShardKey(hash *externalapi.DomainHash, windowSize int) shardKey {
return shardKey{
hash: *hash,
windowSize: windowSize,
}
}

View File

@@ -0,0 +1,47 @@
package blockwindowheapslicestore
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucachehashandwindowsizetoblockghostdagdatahashpairs"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/util/staging"
"github.com/pkg/errors"
)
type blockWindowHeapSliceStore struct {
shardID model.StagingShardID
cache *lrucachehashandwindowsizetoblockghostdagdatahashpairs.LRUCache
}
// New instantiates a new WindowHeapSliceStore
func New(cacheSize int, preallocate bool) model.WindowHeapSliceStore {
return &blockWindowHeapSliceStore{
shardID: staging.GenerateShardingID(),
cache: lrucachehashandwindowsizetoblockghostdagdatahashpairs.New(cacheSize, preallocate),
}
}
// Stage stages the given blockStatus for the given blockHash
func (bss *blockWindowHeapSliceStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, windowSize int, heapSlice []*externalapi.BlockGHOSTDAGDataHashPair) {
stagingShard := bss.stagingShard(stagingArea)
stagingShard.toAdd[newShardKey(blockHash, windowSize)] = heapSlice
}
func (bss *blockWindowHeapSliceStore) IsStaged(stagingArea *model.StagingArea) bool {
return bss.stagingShard(stagingArea).isStaged()
}
func (bss *blockWindowHeapSliceStore) Get(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, error) {
stagingShard := bss.stagingShard(stagingArea)
if heapSlice, ok := stagingShard.toAdd[newShardKey(blockHash, windowSize)]; ok {
return heapSlice, nil
}
if heapSlice, ok := bss.cache.Get(blockHash, windowSize); ok {
return heapSlice, nil
}
return nil, errors.Wrap(database.ErrNotFound, "Window heap slice not found")
}

View File

@@ -1,12 +1,12 @@
package consensus
import (
"github.com/kaspanet/kaspad/domain/consensus/datastructures/blockwindowheapslicestore"
"github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager"
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"io/ioutil"
"os"
"sync"
@@ -145,9 +145,10 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
finalityStore := finalitystore.New(prefixBucket, 200, preallocateCaches)
headersSelectedChainStore := headersselectedchainstore.New(prefixBucket, pruningWindowSizeForCaches, preallocateCaches)
daaBlocksStore := daablocksstore.New(prefixBucket, pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches)
windowHeapSliceStore := blockwindowheapslicestore.New(2000, preallocateCaches)
blockRelationStores, reachabilityDataStores, ghostdagDataStores := dagStores(config, prefixBucket, pruningWindowSizePlusFinalityDepthForCache, pruningWindowSizeForCaches, preallocateCaches)
reachabilityManagers, dagTopologyManagers, ghostdagManagers, dagTraversalManagers := f.dagProcesses(config, dbManager, blockHeaderStore, daaWindowStore, blockRelationStores, reachabilityDataStores, ghostdagDataStores)
reachabilityManagers, dagTopologyManagers, ghostdagManagers, dagTraversalManagers := f.dagProcesses(config, dbManager, blockHeaderStore, daaWindowStore, windowHeapSliceStore, blockRelationStores, reachabilityDataStores, ghostdagDataStores)
blockRelationStore := blockRelationStores[0]
reachabilityDataStore := reachabilityDataStores[0]
@@ -158,7 +159,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
dagTraversalManager := dagTraversalManagers[0]
// Processes
parentsManager := parentssanager.New(config.GenesisHash)
parentsManager := parentssanager.New(config.GenesisHash, config.MaxBlockLevel)
blockParentBuilder := blockparentbuilder.New(
dbManager,
blockHeaderStore,
@@ -168,6 +169,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
pruningStore,
config.GenesisHash,
config.MaxBlockLevel,
)
pastMedianTimeManager := f.pastMedianTimeConsructor(
config.TimestampDeviationTolerance,
@@ -304,6 +306,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
config.TimestampDeviationTolerance,
config.TargetTimePerBlock,
config.IgnoreHeaderMass,
config.MaxBlockLevel,
dbManager,
difficultyManager,
@@ -370,6 +373,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
blockProcessor := blockprocessor.New(
genesisHash,
config.TargetTimePerBlock,
config.MaxBlockLevel,
dbManager,
consensusStateManager,
pruningManager,
@@ -417,6 +421,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
genesisHash,
config.K,
config.PruningProofM,
config.MaxBlockLevel,
)
c := &consensus{
@@ -568,16 +573,16 @@ func dagStores(config *Config,
pruningWindowSizePlusFinalityDepthForCache, pruningWindowSizeForCaches int,
preallocateCaches bool) ([]model.BlockRelationStore, []model.ReachabilityDataStore, []model.GHOSTDAGDataStore) {
blockRelationStores := make([]model.BlockRelationStore, constants.MaxBlockLevel+1)
reachabilityDataStores := make([]model.ReachabilityDataStore, constants.MaxBlockLevel+1)
ghostdagDataStores := make([]model.GHOSTDAGDataStore, constants.MaxBlockLevel+1)
blockRelationStores := make([]model.BlockRelationStore, config.MaxBlockLevel+1)
reachabilityDataStores := make([]model.ReachabilityDataStore, config.MaxBlockLevel+1)
ghostdagDataStores := make([]model.GHOSTDAGDataStore, config.MaxBlockLevel+1)
ghostdagDataCacheSize := pruningWindowSizeForCaches * 2
if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize {
ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize
}
for i := 0; i <= constants.MaxBlockLevel; i++ {
for i := 0; i <= config.MaxBlockLevel; i++ {
prefixBucket := prefixBucket.Bucket([]byte{byte(i)})
if i == 0 {
blockRelationStores[i] = blockrelationstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
@@ -597,6 +602,7 @@ func (f *factory) dagProcesses(config *Config,
dbManager model.DBManager,
blockHeaderStore model.BlockHeaderStore,
daaWindowStore model.BlocksWithTrustedDataDAAWindowStore,
windowHeapSliceStore model.WindowHeapSliceStore,
blockRelationStores []model.BlockRelationStore,
reachabilityDataStores []model.ReachabilityDataStore,
ghostdagDataStores []model.GHOSTDAGDataStore) (
@@ -606,12 +612,12 @@ func (f *factory) dagProcesses(config *Config,
[]model.DAGTraversalManager,
) {
reachabilityManagers := make([]model.ReachabilityManager, constants.MaxBlockLevel+1)
dagTopologyManagers := make([]model.DAGTopologyManager, constants.MaxBlockLevel+1)
ghostdagManagers := make([]model.GHOSTDAGManager, constants.MaxBlockLevel+1)
dagTraversalManagers := make([]model.DAGTraversalManager, constants.MaxBlockLevel+1)
reachabilityManagers := make([]model.ReachabilityManager, config.MaxBlockLevel+1)
dagTopologyManagers := make([]model.DAGTopologyManager, config.MaxBlockLevel+1)
ghostdagManagers := make([]model.GHOSTDAGManager, config.MaxBlockLevel+1)
dagTraversalManagers := make([]model.DAGTraversalManager, config.MaxBlockLevel+1)
for i := 0; i <= constants.MaxBlockLevel; i++ {
for i := 0; i <= config.MaxBlockLevel; i++ {
reachabilityManagers[i] = reachabilitymanager.New(
dbManager,
ghostdagDataStores[i],
@@ -638,6 +644,7 @@ func (f *factory) dagProcesses(config *Config,
reachabilityDataStores[i],
ghostdagManagers[i],
daaWindowStore,
windowHeapSliceStore,
config.GenesisHash,
config.DifficultyAdjustmentWindowSize)
}

View File

@@ -69,7 +69,7 @@ type BaseBlockHeader interface {
BlueScore() uint64
BlueWork() *big.Int
PruningPoint() *DomainHash
BlockLevel() int
BlockLevel(maxBlockLevel int) int
Equal(other BaseBlockHeader) bool
}

View File

@@ -0,0 +1,11 @@
package model
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// WindowHeapSliceStore caches the slices that are needed for the heap implementation of DAGTraversalManager.BlockWindow
type WindowHeapSliceStore interface {
Store
Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, windowSize int, pairs []*externalapi.BlockGHOSTDAGDataHashPair)
IsStaged(stagingArea *StagingArea) bool
Get(stagingArea *StagingArea, blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, error)
}

View File

@@ -16,7 +16,8 @@ type blockParentBuilder struct {
reachabilityDataStore model.ReachabilityDataStore
pruningStore model.PruningStore
genesisHash *externalapi.DomainHash
genesisHash *externalapi.DomainHash
maxBlockLevel int
}
// New creates a new instance of a BlockParentBuilder
@@ -30,6 +31,7 @@ func New(
pruningStore model.PruningStore,
genesisHash *externalapi.DomainHash,
maxBlockLevel int,
) model.BlockParentBuilder {
return &blockParentBuilder{
databaseContext: databaseContext,
@@ -40,6 +42,7 @@ func New(
reachabilityDataStore: reachabilityDataStore,
pruningStore: pruningStore,
genesisHash: genesisHash,
maxBlockLevel: maxBlockLevel,
}
}
@@ -102,7 +105,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
// all the block levels they occupy
for _, directParentHeader := range directParentHeaders {
directParentHash := consensushashing.HeaderHash(directParentHeader)
blockLevel := directParentHeader.BlockLevel()
blockLevel := directParentHeader.BlockLevel(bpb.maxBlockLevel)
for i := 0; i <= blockLevel; i++ {
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)

View File

@@ -14,6 +14,7 @@ import (
type blockProcessor struct {
genesisHash *externalapi.DomainHash
targetTimePerBlock time.Duration
maxBlockLevel int
databaseContext model.DBManager
blockLogger *blocklogger.BlockLogger
@@ -52,6 +53,7 @@ type blockProcessor struct {
func New(
genesisHash *externalapi.DomainHash,
targetTimePerBlock time.Duration,
maxBlockLevel int,
databaseContext model.DBManager,
consensusStateManager model.ConsensusStateManager,
@@ -86,6 +88,7 @@ func New(
return &blockProcessor{
genesisHash: genesisHash,
targetTimePerBlock: targetTimePerBlock,
maxBlockLevel: maxBlockLevel,
databaseContext: databaseContext,
blockLogger: blocklogger.NewBlockLogger(),
pruningManager: pruningManager,

View File

@@ -259,7 +259,7 @@ func (bp *blockProcessor) updateReachabilityReindexRoot(stagingArea *model.Stagi
return err
}
headersSelectedTipHeaderBlockLevel := headersSelectedTipHeader.BlockLevel()
headersSelectedTipHeaderBlockLevel := headersSelectedTipHeader.BlockLevel(bp.maxBlockLevel)
for blockLevel := 0; blockLevel <= headersSelectedTipHeaderBlockLevel; blockLevel++ {
err := bp.reachabilityManagers[blockLevel].UpdateReindexRoot(stagingArea, headersSelectedTip)
if err != nil {

View File

@@ -62,7 +62,7 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
return err
}
if !hasReachabilityData {
blockLevel := header.BlockLevel()
blockLevel := header.BlockLevel(v.maxBlockLevel)
for i := 0; i <= blockLevel; i++ {
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
if err != nil {

View File

@@ -23,6 +23,7 @@ type blockValidator struct {
timestampDeviationTolerance int
targetTimePerBlock time.Duration
ignoreHeaderMass bool
maxBlockLevel int
databaseContext model.DBReader
difficultyManager model.DifficultyManager
@@ -60,6 +61,7 @@ func New(powMax *big.Int,
timestampDeviationTolerance int,
targetTimePerBlock time.Duration,
ignoreHeaderMass bool,
maxBlockLevel int,
databaseContext model.DBReader,
@@ -97,6 +99,7 @@ func New(powMax *big.Int,
mergeSetSizeLimit: mergeSetSizeLimit,
maxBlockParents: maxBlockParents,
ignoreHeaderMass: ignoreHeaderMass,
maxBlockLevel: maxBlockLevel,
timestampDeviationTolerance: timestampDeviationTolerance,
targetTimePerBlock: targetTimePerBlock,

View File

@@ -69,7 +69,7 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
header externalapi.BlockHeader,
isBlockWithTrustedData bool) error {
for level := 0; level <= header.BlockLevel(); level++ {
for level := 0; level <= header.BlockLevel(v.maxBlockLevel); level++ {
var parents []*externalapi.DomainHash
for _, parent := range v.parentsManager.ParentsAtLevel(header, level) {
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
@@ -118,7 +118,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
return err
}
blockLevel := header.BlockLevel()
blockLevel := header.BlockLevel(v.maxBlockLevel)
for i := 1; i <= blockLevel; i++ {
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
if err != nil {

View File

@@ -152,6 +152,18 @@ func (dtm *dagTraversalManager) newSizedUpHeap(stagingArea *model.StagingArea, c
return &h
}
func (dtm *dagTraversalManager) newSizedUpHeapFromSlice(stagingArea *model.StagingArea, slice []*externalapi.BlockGHOSTDAGDataHashPair) *sizedUpBlockHeap {
sliceClone := make([]*externalapi.BlockGHOSTDAGDataHashPair, len(slice), cap(slice))
copy(sliceClone, slice)
h := sizedUpBlockHeap{
impl: upHeap{baseHeap{slice: sliceClone, ghostdagManager: dtm.ghostdagManager}},
ghostdagStore: dtm.ghostdagDataStore,
dbContext: dtm.databaseContext,
stagingArea: stagingArea,
}
return &h
}
// len returns the length of this heap
func (sbh *sizedUpBlockHeap) len() int {
return sbh.impl.Len()

View File

@@ -18,6 +18,7 @@ type dagTraversalManager struct {
daaWindowStore model.BlocksWithTrustedDataDAAWindowStore
genesisHash *externalapi.DomainHash
difficultyAdjustmentWindowSize int
windowHeapSliceStore model.WindowHeapSliceStore
}
// New instantiates a new DAGTraversalManager
@@ -28,6 +29,7 @@ func New(
reachabilityDataStore model.ReachabilityDataStore,
ghostdagManager model.GHOSTDAGManager,
daaWindowStore model.BlocksWithTrustedDataDAAWindowStore,
windowHeapSliceStore model.WindowHeapSliceStore,
genesisHash *externalapi.DomainHash,
difficultyAdjustmentWindowSize int) model.DAGTraversalManager {
return &dagTraversalManager{
@@ -40,6 +42,7 @@ func New(
genesisHash: genesisHash,
difficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
windowHeapSliceStore: windowHeapSliceStore,
}
}

View File

@@ -16,7 +16,7 @@ func (dtm *dagTraversalManager) DAABlockWindow(stagingArea *model.StagingArea, h
func (dtm *dagTraversalManager) BlockWindow(stagingArea *model.StagingArea, highHash *externalapi.DomainHash,
windowSize int) ([]*externalapi.DomainHash, error) {
windowHeap, err := dtm.calculateBlockWindowHeap(stagingArea, highHash, windowSize)
windowHeap, err := dtm.blockWindowHeap(stagingArea, highHash, windowSize)
if err != nil {
return nil, err
}
@@ -28,6 +28,28 @@ func (dtm *dagTraversalManager) BlockWindow(stagingArea *model.StagingArea, high
return window, nil
}
func (dtm *dagTraversalManager) blockWindowHeap(stagingArea *model.StagingArea,
highHash *externalapi.DomainHash, windowSize int) (*sizedUpBlockHeap, error) {
windowHeapSlice, err := dtm.windowHeapSliceStore.Get(stagingArea, highHash, windowSize)
sliceNotCached := database.IsNotFoundError(err)
if !sliceNotCached && err != nil {
return nil, err
}
if !sliceNotCached {
return dtm.newSizedUpHeapFromSlice(stagingArea, windowHeapSlice), nil
}
heap, err := dtm.calculateBlockWindowHeap(stagingArea, highHash, windowSize)
if err != nil {
return nil, err
}
if !highHash.Equal(model.VirtualBlockHash) {
dtm.windowHeapSliceStore.Stage(stagingArea, highHash, windowSize, heap.impl.slice)
}
return heap, nil
}
func (dtm *dagTraversalManager) calculateBlockWindowHeap(stagingArea *model.StagingArea,
highHash *externalapi.DomainHash, windowSize int) (*sizedUpBlockHeap, error) {
@@ -45,18 +67,54 @@ func (dtm *dagTraversalManager) calculateBlockWindowHeap(stagingArea *model.Stag
return nil, err
}
// If the block has a trusted DAA window attached, we just take it as is and don't use cache of selected parent to
// build the window. This is because tryPushMergeSet might not be able to find all the GHOSTDAG data that is
// associated with the block merge set.
_, err = dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, 0)
isNonTrustedBlock := database.IsNotFoundError(err)
if !isNonTrustedBlock && err != nil {
return nil, err
}
if isNonTrustedBlock && currentGHOSTDAGData.SelectedParent() != nil {
windowHeapSlice, err := dtm.windowHeapSliceStore.Get(stagingArea, currentGHOSTDAGData.SelectedParent(), windowSize)
selectedParentNotCached := database.IsNotFoundError(err)
if !selectedParentNotCached && err != nil {
return nil, err
}
if !selectedParentNotCached {
windowHeap := dtm.newSizedUpHeapFromSlice(stagingArea, windowHeapSlice)
if !currentGHOSTDAGData.SelectedParent().Equal(dtm.genesisHash) {
selectedParentGHOSTDAGData, err := dtm.ghostdagDataStore.Get(
dtm.databaseContext, stagingArea, currentGHOSTDAGData.SelectedParent(), false)
if err != nil {
return nil, err
}
_, err = dtm.tryPushMergeSet(windowHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData)
if err != nil {
return nil, err
}
}
return windowHeap, nil
}
}
// Walk down the chain until you finish or find a trusted block and then take complete the rest
// of the window with the trusted window.
for {
if currentGHOSTDAGData.SelectedParent().Equal(dtm.genesisHash) {
break
}
_, err := dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, 0)
isNotFoundError := database.IsNotFoundError(err)
if !isNotFoundError && err != nil {
currentIsNonTrustedBlock := database.IsNotFoundError(err)
if !currentIsNonTrustedBlock && err != nil {
return nil, err
}
if !isNotFoundError {
if !currentIsNonTrustedBlock {
for i := uint64(0); ; i++ {
daaBlock, err := dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, i)
if database.IsNotFoundError(err) {
@@ -83,47 +141,60 @@ func (dtm *dagTraversalManager) calculateBlockWindowHeap(stagingArea *model.Stag
if err != nil {
return nil, err
}
added, err := windowHeap.tryPushWithGHOSTDAGData(currentGHOSTDAGData.SelectedParent(), selectedParentGHOSTDAGData)
done, err := dtm.tryPushMergeSet(windowHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData)
if err != nil {
return nil, err
}
// If the window is full and the selected parent is less than the minimum then we break
// because this means that there cannot be any more blocks in the past with higher blueWork
if !added {
if done {
break
}
// Now we go over the merge set.
// Remove the SP from the blue merge set because we already added it.
mergeSetBlues := currentGHOSTDAGData.MergeSetBlues()[1:]
// Go over the merge set in reverse because it's ordered in reverse by blueWork.
for i := len(mergeSetBlues) - 1; i >= 0; i-- {
added, err := windowHeap.tryPush(mergeSetBlues[i])
if err != nil {
return nil, err
}
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
if !added {
break
}
}
mergeSetReds := currentGHOSTDAGData.MergeSetReds()
for i := len(mergeSetReds) - 1; i >= 0; i-- {
added, err := windowHeap.tryPush(mergeSetReds[i])
if err != nil {
return nil, err
}
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
if !added {
break
}
}
current = currentGHOSTDAGData.SelectedParent()
currentGHOSTDAGData = selectedParentGHOSTDAGData
}
return windowHeap, nil
}
func (dtm *dagTraversalManager) tryPushMergeSet(windowHeap *sizedUpBlockHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData *externalapi.BlockGHOSTDAGData) (bool, error) {
added, err := windowHeap.tryPushWithGHOSTDAGData(currentGHOSTDAGData.SelectedParent(), selectedParentGHOSTDAGData)
if err != nil {
return false, err
}
// If the window is full and the selected parent is less than the minimum then we break
// because this means that there cannot be any more blocks in the past with higher blueWork
if !added {
return true, nil
}
// Now we go over the merge set.
// Remove the SP from the blue merge set because we already added it.
mergeSetBlues := currentGHOSTDAGData.MergeSetBlues()[1:]
// Go over the merge set in reverse because it's ordered in reverse by blueWork.
for i := len(mergeSetBlues) - 1; i >= 0; i-- {
added, err := windowHeap.tryPush(mergeSetBlues[i])
if err != nil {
return false, err
}
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
if !added {
break
}
}
mergeSetReds := currentGHOSTDAGData.MergeSetReds()
for i := len(mergeSetReds) - 1; i >= 0; i-- {
added, err := windowHeap.tryPush(mergeSetReds[i])
if err != nil {
return false, err
}
// If it's smaller than minimum then we won't be able to add the rest because they're even smaller.
if !added {
break
}
}
return false, nil
}

View File

@@ -3,17 +3,18 @@ package parentssanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
)
type parentsManager struct {
genesisHash *externalapi.DomainHash
genesisHash *externalapi.DomainHash
maxBlockLevel int
}
// New instantiates a new ParentsManager
func New(genesisHash *externalapi.DomainHash) model.ParentsManager {
func New(genesisHash *externalapi.DomainHash, maxBlockLevel int) model.ParentsManager {
return &parentsManager{
genesisHash: genesisHash,
genesisHash: genesisHash,
maxBlockLevel: maxBlockLevel,
}
}
@@ -31,7 +32,7 @@ func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, le
}
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {
numParents := constants.MaxBlockLevel + 1
numParents := pm.maxBlockLevel + 1
parents := make([]externalapi.BlockLevelParents, numParents)
for i := 0; i < numParents; i++ {
parents[i] = pm.ParentsAtLevel(blockHeader, i)

View File

@@ -38,8 +38,8 @@ func TestPruning(t *testing.T) {
"dag-for-test-pruning.json": {
dagconfig.MainnetParams.Name: "503",
dagconfig.TestnetParams.Name: "502",
dagconfig.DevnetParams.Name: "503",
dagconfig.SimnetParams.Name: "502",
dagconfig.DevnetParams.Name: "502",
dagconfig.SimnetParams.Name: "503",
},
}

View File

@@ -13,7 +13,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/processes/reachabilitymanager"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/logger"
@@ -41,6 +40,7 @@ type pruningProofManager struct {
genesisHash *externalapi.DomainHash
k externalapi.KType
pruningProofM uint64
maxBlockLevel int
cachedPruningPoint *externalapi.DomainHash
cachedProof *externalapi.PruningPointProof
@@ -66,6 +66,7 @@ func New(
genesisHash *externalapi.DomainHash,
k externalapi.KType,
pruningProofM uint64,
maxBlockLevel int,
) model.PruningProofManager {
return &pruningProofManager{
@@ -86,6 +87,7 @@ func New(
genesisHash: genesisHash,
k: k,
pruningProofM: pruningProofM,
maxBlockLevel: maxBlockLevel,
}
}
@@ -134,7 +136,7 @@ func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.Stagin
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
headersByLevel := make(map[int][]externalapi.BlockHeader)
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
pruningPointLevel := pruningPointHeader.BlockLevel()
pruningPointLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel)
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
var selectedTip *externalapi.DomainHash
if blockLevel <= pruningPointLevel {
@@ -310,7 +312,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
level0Headers := pruningPointProof.Headers[0]
pruningPointHeader := level0Headers[len(level0Headers)-1]
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
pruningPointBlockLevel := pruningPointHeader.BlockLevel()
pruningPointBlockLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel)
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
if maxLevel >= len(pruningPointProof.Headers) {
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
@@ -354,9 +356,9 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
var selectedTip *externalapi.DomainHash
for i, header := range headers {
blockHash := consensushashing.HeaderHash(header)
if header.BlockLevel() < blockLevel {
if header.BlockLevel(ppm.maxBlockLevel) < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
"expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel)
}
blockHeaderStore.Stage(stagingArea, blockHash, header)
@@ -581,9 +583,9 @@ func (ppm *pruningProofManager) dagProcesses(
[]model.GHOSTDAGManager,
) {
reachabilityManagers := make([]model.ReachabilityManager, constants.MaxBlockLevel+1)
dagTopologyManagers := make([]model.DAGTopologyManager, constants.MaxBlockLevel+1)
ghostdagManagers := make([]model.GHOSTDAGManager, constants.MaxBlockLevel+1)
reachabilityManagers := make([]model.ReachabilityManager, ppm.maxBlockLevel+1)
dagTopologyManagers := make([]model.DAGTopologyManager, ppm.maxBlockLevel+1)
ghostdagManagers := make([]model.GHOSTDAGManager, ppm.maxBlockLevel+1)
for i := 0; i <= maxLevel; i++ {
reachabilityManagers[i] = reachabilitymanager.New(
@@ -627,9 +629,9 @@ func (ppm *pruningProofManager) ApplyPruningPointProof(pruningPointProof *extern
stagingArea := model.NewStagingArea()
blockHash := consensushashing.HeaderHash(header)
if header.BlockLevel() < blockLevel {
if header.BlockLevel(ppm.maxBlockLevel) < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
"expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel)
}
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)

View File

@@ -179,9 +179,9 @@ func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader {
return bh.clone()
}
func (bh *blockHeader) BlockLevel() int {
func (bh *blockHeader) BlockLevel(maxBlockLevel int) int {
if !bh.isBlockLevelCached {
bh.blockLevel = pow.BlockLevel(bh)
bh.blockLevel = pow.BlockLevel(bh, maxBlockLevel)
bh.isBlockLevelCached = true
}

View File

@@ -35,9 +35,4 @@ const (
// LockTimeThreshold is the number below which a lock time is
// interpreted to be a DAA score.
LockTimeThreshold = 5e11 // Tue Nov 5 00:53:20 1985 UTC
// MaxBlockLevel is the maximum possible block level.
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
// This means that any block that has a level lower or equal to genesis will be level 0.
MaxBlockLevel = 225
)

View File

@@ -0,0 +1,79 @@
package lrucachehashandwindowsizetoblockghostdagdatahashpairs
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
type lruKey struct {
blockHash externalapi.DomainHash
windowSize int
}
func newKey(blockHash *externalapi.DomainHash, windowSize int) lruKey {
return lruKey{
blockHash: *blockHash,
windowSize: windowSize,
}
}
// LRUCache is a least-recently-used cache from
// lruKey to *externalapi.BlockGHOSTDAGDataHashPair
type LRUCache struct {
cache map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair
capacity int
}
// New creates a new LRUCache
func New(capacity int, preallocate bool) *LRUCache {
var cache map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair
if preallocate {
cache = make(map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair, capacity+1)
} else {
cache = make(map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair)
}
return &LRUCache{
cache: cache,
capacity: capacity,
}
}
// Add adds an entry to the LRUCache
func (c *LRUCache) Add(blockHash *externalapi.DomainHash, windowSize int, value []*externalapi.BlockGHOSTDAGDataHashPair) {
key := newKey(blockHash, windowSize)
c.cache[key] = value
if len(c.cache) > c.capacity {
c.evictRandom()
}
}
// Get returns the entry for the given key, or (nil, false) otherwise
func (c *LRUCache) Get(blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, bool) {
key := newKey(blockHash, windowSize)
value, ok := c.cache[key]
if !ok {
return nil, false
}
return value, true
}
// Has returns whether the LRUCache contains the given key
func (c *LRUCache) Has(blockHash *externalapi.DomainHash, windowSize int) bool {
key := newKey(blockHash, windowSize)
_, ok := c.cache[key]
return ok
}
// Remove removes the entry for the the given key. Does nothing if
// the entry does not exist
func (c *LRUCache) Remove(blockHash *externalapi.DomainHash, windowSize int) {
key := newKey(blockHash, windowSize)
delete(c.cache, key)
}
func (c *LRUCache) evictRandom() {
var keyToEvict lruKey
for key := range c.cache {
keyToEvict = key
break
}
c.Remove(&keyToEvict.blockHash, keyToEvict.windowSize)
}

View File

@@ -3,7 +3,6 @@ package pow
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
"github.com/kaspanet/kaspad/domain/consensus/utils/serialization"
"github.com/kaspanet/kaspad/util/difficulty"
@@ -96,15 +95,15 @@ func toBig(hash *externalapi.DomainHash) *big.Int {
}
// BlockLevel returns the block level of the given header.
func BlockLevel(header externalapi.BlockHeader) int {
func BlockLevel(header externalapi.BlockHeader, maxBlockLevel int) int {
// Genesis is defined to be the root of all blocks at all levels, so we define it to be the maximal
// block level.
if len(header.DirectParents()) == 0 {
return constants.MaxBlockLevel
return maxBlockLevel
}
proofOfWorkValue := NewState(header.ToMutable()).CalculateProofOfWorkValue()
level := constants.MaxBlockLevel - proofOfWorkValue.BitLen()
level := maxBlockLevel - proofOfWorkValue.BitLen()
// If the block has a level lower than genesis make it zero.
if level < 0 {
level = 0

View File

@@ -185,6 +185,9 @@ type Params struct {
DisallowDirectBlocksOnTopOfGenesis bool
IgnoreHeaderMass bool
// MaxBlockLevel is the maximum possible block level.
MaxBlockLevel int
}
// NormalizeRPCServerAddress returns addr with the current network default
@@ -279,16 +282,20 @@ var MainnetParams = Params{
PruningProofM: defaultPruningProofM,
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
DisallowDirectBlocksOnTopOfGenesis: true,
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
// This means that any block that has a level lower or equal to genesis will be level 0.
MaxBlockLevel: 225,
}
// TestnetParams defines the network parameters for the test Kaspa network.
var TestnetParams = Params{
K: defaultGHOSTDAGK,
Name: "kaspa-testnet-8",
Name: "kaspa-testnet-9",
Net: appmessage.Testnet,
RPCPort: "16210",
DefaultPort: "16211",
DNSSeeds: []string{"testnet-8-dnsseed.daglabs-dev.com"},
DNSSeeds: []string{"testnet-9-dnsseed.daglabs-dev.com"},
// DAG parameters
GenesisBlock: &testnetGenesisBlock,
@@ -339,6 +346,8 @@ var TestnetParams = Params{
PruningProofM: defaultPruningProofM,
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
IgnoreHeaderMass: true,
MaxBlockLevel: 250,
}
// SimnetParams defines the network parameters for the simulation test Kaspa
@@ -402,6 +411,8 @@ var SimnetParams = Params{
CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength,
PruningProofM: defaultPruningProofM,
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
MaxBlockLevel: 250,
}
// DevnetParams defines the network parameters for the development Kaspa network.
@@ -462,6 +473,8 @@ var DevnetParams = Params{
PruningProofM: defaultPruningProofM,
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
IgnoreHeaderMass: true,
MaxBlockLevel: 250,
}
// ErrDuplicateNet describes an error where the parameters for a Kaspa