mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-06-06 06:06:49 +00:00
Set MaxBlockLevels for non-mainnet networks to 250 (#1952)
* Make MaxBlockLevel a DAG params instead of a constant. * Change the testnet network name to 9. * Fix TestBlockWindow. * Set MaxBlockLevels for non-mainnet networks to 250. * Revert "Fix TestBlockWindow." This reverts commit 30a7892f53e0bb8d0d24435a68f0561a8efab575. * Fix TestPruning.
This commit is contained in:
parent
3f7e482291
commit
28d0f1ea2e
@ -6,7 +6,6 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
|
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
|
||||||
parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager"
|
parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
|
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
@ -158,7 +157,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
|||||||
dagTraversalManager := dagTraversalManagers[0]
|
dagTraversalManager := dagTraversalManagers[0]
|
||||||
|
|
||||||
// Processes
|
// Processes
|
||||||
parentsManager := parentssanager.New(config.GenesisHash)
|
parentsManager := parentssanager.New(config.GenesisHash, config.MaxBlockLevel)
|
||||||
blockParentBuilder := blockparentbuilder.New(
|
blockParentBuilder := blockparentbuilder.New(
|
||||||
dbManager,
|
dbManager,
|
||||||
blockHeaderStore,
|
blockHeaderStore,
|
||||||
@ -168,6 +167,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
|||||||
pruningStore,
|
pruningStore,
|
||||||
|
|
||||||
config.GenesisHash,
|
config.GenesisHash,
|
||||||
|
config.MaxBlockLevel,
|
||||||
)
|
)
|
||||||
pastMedianTimeManager := f.pastMedianTimeConsructor(
|
pastMedianTimeManager := f.pastMedianTimeConsructor(
|
||||||
config.TimestampDeviationTolerance,
|
config.TimestampDeviationTolerance,
|
||||||
@ -304,6 +304,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
|||||||
config.TimestampDeviationTolerance,
|
config.TimestampDeviationTolerance,
|
||||||
config.TargetTimePerBlock,
|
config.TargetTimePerBlock,
|
||||||
config.IgnoreHeaderMass,
|
config.IgnoreHeaderMass,
|
||||||
|
config.MaxBlockLevel,
|
||||||
|
|
||||||
dbManager,
|
dbManager,
|
||||||
difficultyManager,
|
difficultyManager,
|
||||||
@ -370,6 +371,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
|||||||
blockProcessor := blockprocessor.New(
|
blockProcessor := blockprocessor.New(
|
||||||
genesisHash,
|
genesisHash,
|
||||||
config.TargetTimePerBlock,
|
config.TargetTimePerBlock,
|
||||||
|
config.MaxBlockLevel,
|
||||||
dbManager,
|
dbManager,
|
||||||
consensusStateManager,
|
consensusStateManager,
|
||||||
pruningManager,
|
pruningManager,
|
||||||
@ -417,6 +419,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
|||||||
genesisHash,
|
genesisHash,
|
||||||
config.K,
|
config.K,
|
||||||
config.PruningProofM,
|
config.PruningProofM,
|
||||||
|
config.MaxBlockLevel,
|
||||||
)
|
)
|
||||||
|
|
||||||
c := &consensus{
|
c := &consensus{
|
||||||
@ -568,16 +571,16 @@ func dagStores(config *Config,
|
|||||||
pruningWindowSizePlusFinalityDepthForCache, pruningWindowSizeForCaches int,
|
pruningWindowSizePlusFinalityDepthForCache, pruningWindowSizeForCaches int,
|
||||||
preallocateCaches bool) ([]model.BlockRelationStore, []model.ReachabilityDataStore, []model.GHOSTDAGDataStore) {
|
preallocateCaches bool) ([]model.BlockRelationStore, []model.ReachabilityDataStore, []model.GHOSTDAGDataStore) {
|
||||||
|
|
||||||
blockRelationStores := make([]model.BlockRelationStore, constants.MaxBlockLevel+1)
|
blockRelationStores := make([]model.BlockRelationStore, config.MaxBlockLevel+1)
|
||||||
reachabilityDataStores := make([]model.ReachabilityDataStore, constants.MaxBlockLevel+1)
|
reachabilityDataStores := make([]model.ReachabilityDataStore, config.MaxBlockLevel+1)
|
||||||
ghostdagDataStores := make([]model.GHOSTDAGDataStore, constants.MaxBlockLevel+1)
|
ghostdagDataStores := make([]model.GHOSTDAGDataStore, config.MaxBlockLevel+1)
|
||||||
|
|
||||||
ghostdagDataCacheSize := pruningWindowSizeForCaches * 2
|
ghostdagDataCacheSize := pruningWindowSizeForCaches * 2
|
||||||
if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize {
|
if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize {
|
||||||
ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize
|
ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i <= constants.MaxBlockLevel; i++ {
|
for i := 0; i <= config.MaxBlockLevel; i++ {
|
||||||
prefixBucket := prefixBucket.Bucket([]byte{byte(i)})
|
prefixBucket := prefixBucket.Bucket([]byte{byte(i)})
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
blockRelationStores[i] = blockrelationstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
|
blockRelationStores[i] = blockrelationstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
|
||||||
@ -606,12 +609,12 @@ func (f *factory) dagProcesses(config *Config,
|
|||||||
[]model.DAGTraversalManager,
|
[]model.DAGTraversalManager,
|
||||||
) {
|
) {
|
||||||
|
|
||||||
reachabilityManagers := make([]model.ReachabilityManager, constants.MaxBlockLevel+1)
|
reachabilityManagers := make([]model.ReachabilityManager, config.MaxBlockLevel+1)
|
||||||
dagTopologyManagers := make([]model.DAGTopologyManager, constants.MaxBlockLevel+1)
|
dagTopologyManagers := make([]model.DAGTopologyManager, config.MaxBlockLevel+1)
|
||||||
ghostdagManagers := make([]model.GHOSTDAGManager, constants.MaxBlockLevel+1)
|
ghostdagManagers := make([]model.GHOSTDAGManager, config.MaxBlockLevel+1)
|
||||||
dagTraversalManagers := make([]model.DAGTraversalManager, constants.MaxBlockLevel+1)
|
dagTraversalManagers := make([]model.DAGTraversalManager, config.MaxBlockLevel+1)
|
||||||
|
|
||||||
for i := 0; i <= constants.MaxBlockLevel; i++ {
|
for i := 0; i <= config.MaxBlockLevel; i++ {
|
||||||
reachabilityManagers[i] = reachabilitymanager.New(
|
reachabilityManagers[i] = reachabilitymanager.New(
|
||||||
dbManager,
|
dbManager,
|
||||||
ghostdagDataStores[i],
|
ghostdagDataStores[i],
|
||||||
|
@ -69,7 +69,7 @@ type BaseBlockHeader interface {
|
|||||||
BlueScore() uint64
|
BlueScore() uint64
|
||||||
BlueWork() *big.Int
|
BlueWork() *big.Int
|
||||||
PruningPoint() *DomainHash
|
PruningPoint() *DomainHash
|
||||||
BlockLevel() int
|
BlockLevel(maxBlockLevel int) int
|
||||||
Equal(other BaseBlockHeader) bool
|
Equal(other BaseBlockHeader) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,7 +16,8 @@ type blockParentBuilder struct {
|
|||||||
reachabilityDataStore model.ReachabilityDataStore
|
reachabilityDataStore model.ReachabilityDataStore
|
||||||
pruningStore model.PruningStore
|
pruningStore model.PruningStore
|
||||||
|
|
||||||
genesisHash *externalapi.DomainHash
|
genesisHash *externalapi.DomainHash
|
||||||
|
maxBlockLevel int
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new instance of a BlockParentBuilder
|
// New creates a new instance of a BlockParentBuilder
|
||||||
@ -30,6 +31,7 @@ func New(
|
|||||||
pruningStore model.PruningStore,
|
pruningStore model.PruningStore,
|
||||||
|
|
||||||
genesisHash *externalapi.DomainHash,
|
genesisHash *externalapi.DomainHash,
|
||||||
|
maxBlockLevel int,
|
||||||
) model.BlockParentBuilder {
|
) model.BlockParentBuilder {
|
||||||
return &blockParentBuilder{
|
return &blockParentBuilder{
|
||||||
databaseContext: databaseContext,
|
databaseContext: databaseContext,
|
||||||
@ -40,6 +42,7 @@ func New(
|
|||||||
reachabilityDataStore: reachabilityDataStore,
|
reachabilityDataStore: reachabilityDataStore,
|
||||||
pruningStore: pruningStore,
|
pruningStore: pruningStore,
|
||||||
genesisHash: genesisHash,
|
genesisHash: genesisHash,
|
||||||
|
maxBlockLevel: maxBlockLevel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,7 +105,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
|
|||||||
// all the block levels they occupy
|
// all the block levels they occupy
|
||||||
for _, directParentHeader := range directParentHeaders {
|
for _, directParentHeader := range directParentHeaders {
|
||||||
directParentHash := consensushashing.HeaderHash(directParentHeader)
|
directParentHash := consensushashing.HeaderHash(directParentHeader)
|
||||||
blockLevel := directParentHeader.BlockLevel()
|
blockLevel := directParentHeader.BlockLevel(bpb.maxBlockLevel)
|
||||||
for i := 0; i <= blockLevel; i++ {
|
for i := 0; i <= blockLevel; i++ {
|
||||||
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
|
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
|
||||||
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
|
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
type blockProcessor struct {
|
type blockProcessor struct {
|
||||||
genesisHash *externalapi.DomainHash
|
genesisHash *externalapi.DomainHash
|
||||||
targetTimePerBlock time.Duration
|
targetTimePerBlock time.Duration
|
||||||
|
maxBlockLevel int
|
||||||
databaseContext model.DBManager
|
databaseContext model.DBManager
|
||||||
blockLogger *blocklogger.BlockLogger
|
blockLogger *blocklogger.BlockLogger
|
||||||
|
|
||||||
@ -52,6 +53,7 @@ type blockProcessor struct {
|
|||||||
func New(
|
func New(
|
||||||
genesisHash *externalapi.DomainHash,
|
genesisHash *externalapi.DomainHash,
|
||||||
targetTimePerBlock time.Duration,
|
targetTimePerBlock time.Duration,
|
||||||
|
maxBlockLevel int,
|
||||||
databaseContext model.DBManager,
|
databaseContext model.DBManager,
|
||||||
|
|
||||||
consensusStateManager model.ConsensusStateManager,
|
consensusStateManager model.ConsensusStateManager,
|
||||||
@ -86,6 +88,7 @@ func New(
|
|||||||
return &blockProcessor{
|
return &blockProcessor{
|
||||||
genesisHash: genesisHash,
|
genesisHash: genesisHash,
|
||||||
targetTimePerBlock: targetTimePerBlock,
|
targetTimePerBlock: targetTimePerBlock,
|
||||||
|
maxBlockLevel: maxBlockLevel,
|
||||||
databaseContext: databaseContext,
|
databaseContext: databaseContext,
|
||||||
blockLogger: blocklogger.NewBlockLogger(),
|
blockLogger: blocklogger.NewBlockLogger(),
|
||||||
pruningManager: pruningManager,
|
pruningManager: pruningManager,
|
||||||
|
@ -259,7 +259,7 @@ func (bp *blockProcessor) updateReachabilityReindexRoot(stagingArea *model.Stagi
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
headersSelectedTipHeaderBlockLevel := headersSelectedTipHeader.BlockLevel()
|
headersSelectedTipHeaderBlockLevel := headersSelectedTipHeader.BlockLevel(bp.maxBlockLevel)
|
||||||
for blockLevel := 0; blockLevel <= headersSelectedTipHeaderBlockLevel; blockLevel++ {
|
for blockLevel := 0; blockLevel <= headersSelectedTipHeaderBlockLevel; blockLevel++ {
|
||||||
err := bp.reachabilityManagers[blockLevel].UpdateReindexRoot(stagingArea, headersSelectedTip)
|
err := bp.reachabilityManagers[blockLevel].UpdateReindexRoot(stagingArea, headersSelectedTip)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -62,7 +62,7 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !hasReachabilityData {
|
if !hasReachabilityData {
|
||||||
blockLevel := header.BlockLevel()
|
blockLevel := header.BlockLevel(v.maxBlockLevel)
|
||||||
for i := 0; i <= blockLevel; i++ {
|
for i := 0; i <= blockLevel; i++ {
|
||||||
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
|
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -23,6 +23,7 @@ type blockValidator struct {
|
|||||||
timestampDeviationTolerance int
|
timestampDeviationTolerance int
|
||||||
targetTimePerBlock time.Duration
|
targetTimePerBlock time.Duration
|
||||||
ignoreHeaderMass bool
|
ignoreHeaderMass bool
|
||||||
|
maxBlockLevel int
|
||||||
|
|
||||||
databaseContext model.DBReader
|
databaseContext model.DBReader
|
||||||
difficultyManager model.DifficultyManager
|
difficultyManager model.DifficultyManager
|
||||||
@ -60,6 +61,7 @@ func New(powMax *big.Int,
|
|||||||
timestampDeviationTolerance int,
|
timestampDeviationTolerance int,
|
||||||
targetTimePerBlock time.Duration,
|
targetTimePerBlock time.Duration,
|
||||||
ignoreHeaderMass bool,
|
ignoreHeaderMass bool,
|
||||||
|
maxBlockLevel int,
|
||||||
|
|
||||||
databaseContext model.DBReader,
|
databaseContext model.DBReader,
|
||||||
|
|
||||||
@ -97,6 +99,7 @@ func New(powMax *big.Int,
|
|||||||
mergeSetSizeLimit: mergeSetSizeLimit,
|
mergeSetSizeLimit: mergeSetSizeLimit,
|
||||||
maxBlockParents: maxBlockParents,
|
maxBlockParents: maxBlockParents,
|
||||||
ignoreHeaderMass: ignoreHeaderMass,
|
ignoreHeaderMass: ignoreHeaderMass,
|
||||||
|
maxBlockLevel: maxBlockLevel,
|
||||||
|
|
||||||
timestampDeviationTolerance: timestampDeviationTolerance,
|
timestampDeviationTolerance: timestampDeviationTolerance,
|
||||||
targetTimePerBlock: targetTimePerBlock,
|
targetTimePerBlock: targetTimePerBlock,
|
||||||
|
@ -69,7 +69,7 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
|
|||||||
header externalapi.BlockHeader,
|
header externalapi.BlockHeader,
|
||||||
isBlockWithTrustedData bool) error {
|
isBlockWithTrustedData bool) error {
|
||||||
|
|
||||||
for level := 0; level <= header.BlockLevel(); level++ {
|
for level := 0; level <= header.BlockLevel(v.maxBlockLevel); level++ {
|
||||||
var parents []*externalapi.DomainHash
|
var parents []*externalapi.DomainHash
|
||||||
for _, parent := range v.parentsManager.ParentsAtLevel(header, level) {
|
for _, parent := range v.parentsManager.ParentsAtLevel(header, level) {
|
||||||
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
|
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
|
||||||
@ -118,7 +118,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
blockLevel := header.BlockLevel()
|
blockLevel := header.BlockLevel(v.maxBlockLevel)
|
||||||
for i := 1; i <= blockLevel; i++ {
|
for i := 1; i <= blockLevel; i++ {
|
||||||
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
|
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -3,17 +3,18 @@ package parentssanager
|
|||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type parentsManager struct {
|
type parentsManager struct {
|
||||||
genesisHash *externalapi.DomainHash
|
genesisHash *externalapi.DomainHash
|
||||||
|
maxBlockLevel int
|
||||||
}
|
}
|
||||||
|
|
||||||
// New instantiates a new ParentsManager
|
// New instantiates a new ParentsManager
|
||||||
func New(genesisHash *externalapi.DomainHash) model.ParentsManager {
|
func New(genesisHash *externalapi.DomainHash, maxBlockLevel int) model.ParentsManager {
|
||||||
return &parentsManager{
|
return &parentsManager{
|
||||||
genesisHash: genesisHash,
|
genesisHash: genesisHash,
|
||||||
|
maxBlockLevel: maxBlockLevel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -31,7 +32,7 @@ func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, le
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {
|
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {
|
||||||
numParents := constants.MaxBlockLevel + 1
|
numParents := pm.maxBlockLevel + 1
|
||||||
parents := make([]externalapi.BlockLevelParents, numParents)
|
parents := make([]externalapi.BlockLevelParents, numParents)
|
||||||
for i := 0; i < numParents; i++ {
|
for i := 0; i < numParents; i++ {
|
||||||
parents[i] = pm.ParentsAtLevel(blockHeader, i)
|
parents[i] = pm.ParentsAtLevel(blockHeader, i)
|
||||||
|
@ -38,8 +38,8 @@ func TestPruning(t *testing.T) {
|
|||||||
"dag-for-test-pruning.json": {
|
"dag-for-test-pruning.json": {
|
||||||
dagconfig.MainnetParams.Name: "503",
|
dagconfig.MainnetParams.Name: "503",
|
||||||
dagconfig.TestnetParams.Name: "502",
|
dagconfig.TestnetParams.Name: "502",
|
||||||
dagconfig.DevnetParams.Name: "503",
|
dagconfig.DevnetParams.Name: "502",
|
||||||
dagconfig.SimnetParams.Name: "502",
|
dagconfig.SimnetParams.Name: "503",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/domain/consensus/processes/reachabilitymanager"
|
"github.com/kaspanet/kaspad/domain/consensus/processes/reachabilitymanager"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||||
@ -41,6 +40,7 @@ type pruningProofManager struct {
|
|||||||
genesisHash *externalapi.DomainHash
|
genesisHash *externalapi.DomainHash
|
||||||
k externalapi.KType
|
k externalapi.KType
|
||||||
pruningProofM uint64
|
pruningProofM uint64
|
||||||
|
maxBlockLevel int
|
||||||
|
|
||||||
cachedPruningPoint *externalapi.DomainHash
|
cachedPruningPoint *externalapi.DomainHash
|
||||||
cachedProof *externalapi.PruningPointProof
|
cachedProof *externalapi.PruningPointProof
|
||||||
@ -66,6 +66,7 @@ func New(
|
|||||||
genesisHash *externalapi.DomainHash,
|
genesisHash *externalapi.DomainHash,
|
||||||
k externalapi.KType,
|
k externalapi.KType,
|
||||||
pruningProofM uint64,
|
pruningProofM uint64,
|
||||||
|
maxBlockLevel int,
|
||||||
) model.PruningProofManager {
|
) model.PruningProofManager {
|
||||||
|
|
||||||
return &pruningProofManager{
|
return &pruningProofManager{
|
||||||
@ -86,6 +87,7 @@ func New(
|
|||||||
genesisHash: genesisHash,
|
genesisHash: genesisHash,
|
||||||
k: k,
|
k: k,
|
||||||
pruningProofM: pruningProofM,
|
pruningProofM: pruningProofM,
|
||||||
|
maxBlockLevel: maxBlockLevel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,7 +136,7 @@ func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.Stagin
|
|||||||
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
|
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
|
||||||
headersByLevel := make(map[int][]externalapi.BlockHeader)
|
headersByLevel := make(map[int][]externalapi.BlockHeader)
|
||||||
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
|
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
|
||||||
pruningPointLevel := pruningPointHeader.BlockLevel()
|
pruningPointLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel)
|
||||||
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
|
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
|
||||||
var selectedTip *externalapi.DomainHash
|
var selectedTip *externalapi.DomainHash
|
||||||
if blockLevel <= pruningPointLevel {
|
if blockLevel <= pruningPointLevel {
|
||||||
@ -310,7 +312,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
|
|||||||
level0Headers := pruningPointProof.Headers[0]
|
level0Headers := pruningPointProof.Headers[0]
|
||||||
pruningPointHeader := level0Headers[len(level0Headers)-1]
|
pruningPointHeader := level0Headers[len(level0Headers)-1]
|
||||||
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
|
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
|
||||||
pruningPointBlockLevel := pruningPointHeader.BlockLevel()
|
pruningPointBlockLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel)
|
||||||
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
|
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
|
||||||
if maxLevel >= len(pruningPointProof.Headers) {
|
if maxLevel >= len(pruningPointProof.Headers) {
|
||||||
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
|
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
|
||||||
@ -354,9 +356,9 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
|
|||||||
var selectedTip *externalapi.DomainHash
|
var selectedTip *externalapi.DomainHash
|
||||||
for i, header := range headers {
|
for i, header := range headers {
|
||||||
blockHash := consensushashing.HeaderHash(header)
|
blockHash := consensushashing.HeaderHash(header)
|
||||||
if header.BlockLevel() < blockLevel {
|
if header.BlockLevel(ppm.maxBlockLevel) < blockLevel {
|
||||||
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
||||||
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
|
"expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
blockHeaderStore.Stage(stagingArea, blockHash, header)
|
blockHeaderStore.Stage(stagingArea, blockHash, header)
|
||||||
@ -581,9 +583,9 @@ func (ppm *pruningProofManager) dagProcesses(
|
|||||||
[]model.GHOSTDAGManager,
|
[]model.GHOSTDAGManager,
|
||||||
) {
|
) {
|
||||||
|
|
||||||
reachabilityManagers := make([]model.ReachabilityManager, constants.MaxBlockLevel+1)
|
reachabilityManagers := make([]model.ReachabilityManager, ppm.maxBlockLevel+1)
|
||||||
dagTopologyManagers := make([]model.DAGTopologyManager, constants.MaxBlockLevel+1)
|
dagTopologyManagers := make([]model.DAGTopologyManager, ppm.maxBlockLevel+1)
|
||||||
ghostdagManagers := make([]model.GHOSTDAGManager, constants.MaxBlockLevel+1)
|
ghostdagManagers := make([]model.GHOSTDAGManager, ppm.maxBlockLevel+1)
|
||||||
|
|
||||||
for i := 0; i <= maxLevel; i++ {
|
for i := 0; i <= maxLevel; i++ {
|
||||||
reachabilityManagers[i] = reachabilitymanager.New(
|
reachabilityManagers[i] = reachabilitymanager.New(
|
||||||
@ -627,9 +629,9 @@ func (ppm *pruningProofManager) ApplyPruningPointProof(pruningPointProof *extern
|
|||||||
stagingArea := model.NewStagingArea()
|
stagingArea := model.NewStagingArea()
|
||||||
|
|
||||||
blockHash := consensushashing.HeaderHash(header)
|
blockHash := consensushashing.HeaderHash(header)
|
||||||
if header.BlockLevel() < blockLevel {
|
if header.BlockLevel(ppm.maxBlockLevel) < blockLevel {
|
||||||
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
||||||
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
|
"expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)
|
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)
|
||||||
|
@ -179,9 +179,9 @@ func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader {
|
|||||||
return bh.clone()
|
return bh.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bh *blockHeader) BlockLevel() int {
|
func (bh *blockHeader) BlockLevel(maxBlockLevel int) int {
|
||||||
if !bh.isBlockLevelCached {
|
if !bh.isBlockLevelCached {
|
||||||
bh.blockLevel = pow.BlockLevel(bh)
|
bh.blockLevel = pow.BlockLevel(bh, maxBlockLevel)
|
||||||
bh.isBlockLevelCached = true
|
bh.isBlockLevelCached = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,9 +35,4 @@ const (
|
|||||||
// LockTimeThreshold is the number below which a lock time is
|
// LockTimeThreshold is the number below which a lock time is
|
||||||
// interpreted to be a DAA score.
|
// interpreted to be a DAA score.
|
||||||
LockTimeThreshold = 5e11 // Tue Nov 5 00:53:20 1985 UTC
|
LockTimeThreshold = 5e11 // Tue Nov 5 00:53:20 1985 UTC
|
||||||
|
|
||||||
// MaxBlockLevel is the maximum possible block level.
|
|
||||||
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
|
|
||||||
// This means that any block that has a level lower or equal to genesis will be level 0.
|
|
||||||
MaxBlockLevel = 225
|
|
||||||
)
|
)
|
||||||
|
@ -3,7 +3,6 @@ package pow
|
|||||||
import (
|
import (
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||||
"github.com/kaspanet/kaspad/domain/consensus/utils/serialization"
|
"github.com/kaspanet/kaspad/domain/consensus/utils/serialization"
|
||||||
"github.com/kaspanet/kaspad/util/difficulty"
|
"github.com/kaspanet/kaspad/util/difficulty"
|
||||||
@ -96,15 +95,15 @@ func toBig(hash *externalapi.DomainHash) *big.Int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BlockLevel returns the block level of the given header.
|
// BlockLevel returns the block level of the given header.
|
||||||
func BlockLevel(header externalapi.BlockHeader) int {
|
func BlockLevel(header externalapi.BlockHeader, maxBlockLevel int) int {
|
||||||
// Genesis is defined to be the root of all blocks at all levels, so we define it to be the maximal
|
// Genesis is defined to be the root of all blocks at all levels, so we define it to be the maximal
|
||||||
// block level.
|
// block level.
|
||||||
if len(header.DirectParents()) == 0 {
|
if len(header.DirectParents()) == 0 {
|
||||||
return constants.MaxBlockLevel
|
return maxBlockLevel
|
||||||
}
|
}
|
||||||
|
|
||||||
proofOfWorkValue := NewState(header.ToMutable()).CalculateProofOfWorkValue()
|
proofOfWorkValue := NewState(header.ToMutable()).CalculateProofOfWorkValue()
|
||||||
level := constants.MaxBlockLevel - proofOfWorkValue.BitLen()
|
level := maxBlockLevel - proofOfWorkValue.BitLen()
|
||||||
// If the block has a level lower than genesis make it zero.
|
// If the block has a level lower than genesis make it zero.
|
||||||
if level < 0 {
|
if level < 0 {
|
||||||
level = 0
|
level = 0
|
||||||
|
@ -185,6 +185,9 @@ type Params struct {
|
|||||||
DisallowDirectBlocksOnTopOfGenesis bool
|
DisallowDirectBlocksOnTopOfGenesis bool
|
||||||
|
|
||||||
IgnoreHeaderMass bool
|
IgnoreHeaderMass bool
|
||||||
|
|
||||||
|
// MaxBlockLevel is the maximum possible block level.
|
||||||
|
MaxBlockLevel int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NormalizeRPCServerAddress returns addr with the current network default
|
// NormalizeRPCServerAddress returns addr with the current network default
|
||||||
@ -279,16 +282,20 @@ var MainnetParams = Params{
|
|||||||
PruningProofM: defaultPruningProofM,
|
PruningProofM: defaultPruningProofM,
|
||||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||||
DisallowDirectBlocksOnTopOfGenesis: true,
|
DisallowDirectBlocksOnTopOfGenesis: true,
|
||||||
|
|
||||||
|
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
|
||||||
|
// This means that any block that has a level lower or equal to genesis will be level 0.
|
||||||
|
MaxBlockLevel: 225,
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestnetParams defines the network parameters for the test Kaspa network.
|
// TestnetParams defines the network parameters for the test Kaspa network.
|
||||||
var TestnetParams = Params{
|
var TestnetParams = Params{
|
||||||
K: defaultGHOSTDAGK,
|
K: defaultGHOSTDAGK,
|
||||||
Name: "kaspa-testnet-8",
|
Name: "kaspa-testnet-9",
|
||||||
Net: appmessage.Testnet,
|
Net: appmessage.Testnet,
|
||||||
RPCPort: "16210",
|
RPCPort: "16210",
|
||||||
DefaultPort: "16211",
|
DefaultPort: "16211",
|
||||||
DNSSeeds: []string{"testnet-8-dnsseed.daglabs-dev.com"},
|
DNSSeeds: []string{"testnet-9-dnsseed.daglabs-dev.com"},
|
||||||
|
|
||||||
// DAG parameters
|
// DAG parameters
|
||||||
GenesisBlock: &testnetGenesisBlock,
|
GenesisBlock: &testnetGenesisBlock,
|
||||||
@ -339,6 +346,8 @@ var TestnetParams = Params{
|
|||||||
PruningProofM: defaultPruningProofM,
|
PruningProofM: defaultPruningProofM,
|
||||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||||
IgnoreHeaderMass: true,
|
IgnoreHeaderMass: true,
|
||||||
|
|
||||||
|
MaxBlockLevel: 250,
|
||||||
}
|
}
|
||||||
|
|
||||||
// SimnetParams defines the network parameters for the simulation test Kaspa
|
// SimnetParams defines the network parameters for the simulation test Kaspa
|
||||||
@ -402,6 +411,8 @@ var SimnetParams = Params{
|
|||||||
CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength,
|
CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength,
|
||||||
PruningProofM: defaultPruningProofM,
|
PruningProofM: defaultPruningProofM,
|
||||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||||
|
|
||||||
|
MaxBlockLevel: 250,
|
||||||
}
|
}
|
||||||
|
|
||||||
// DevnetParams defines the network parameters for the development Kaspa network.
|
// DevnetParams defines the network parameters for the development Kaspa network.
|
||||||
@ -462,6 +473,8 @@ var DevnetParams = Params{
|
|||||||
PruningProofM: defaultPruningProofM,
|
PruningProofM: defaultPruningProofM,
|
||||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||||
IgnoreHeaderMass: true,
|
IgnoreHeaderMass: true,
|
||||||
|
|
||||||
|
MaxBlockLevel: 250,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrDuplicateNet describes an error where the parameters for a Kaspa
|
// ErrDuplicateNet describes an error where the parameters for a Kaspa
|
||||||
|
Loading…
x
Reference in New Issue
Block a user