mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-06-06 06:06:49 +00:00
Set MaxBlockLevels for non-mainnet networks to 250 (#1952)
* Make MaxBlockLevel a DAG params instead of a constant. * Change the testnet network name to 9. * Fix TestBlockWindow. * Set MaxBlockLevels for non-mainnet networks to 250. * Revert "Fix TestBlockWindow." This reverts commit 30a7892f53e0bb8d0d24435a68f0561a8efab575. * Fix TestPruning.
This commit is contained in:
parent
3f7e482291
commit
28d0f1ea2e
@ -6,7 +6,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
|
||||
parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
@ -158,7 +157,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
dagTraversalManager := dagTraversalManagers[0]
|
||||
|
||||
// Processes
|
||||
parentsManager := parentssanager.New(config.GenesisHash)
|
||||
parentsManager := parentssanager.New(config.GenesisHash, config.MaxBlockLevel)
|
||||
blockParentBuilder := blockparentbuilder.New(
|
||||
dbManager,
|
||||
blockHeaderStore,
|
||||
@ -168,6 +167,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
pruningStore,
|
||||
|
||||
config.GenesisHash,
|
||||
config.MaxBlockLevel,
|
||||
)
|
||||
pastMedianTimeManager := f.pastMedianTimeConsructor(
|
||||
config.TimestampDeviationTolerance,
|
||||
@ -304,6 +304,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
config.TimestampDeviationTolerance,
|
||||
config.TargetTimePerBlock,
|
||||
config.IgnoreHeaderMass,
|
||||
config.MaxBlockLevel,
|
||||
|
||||
dbManager,
|
||||
difficultyManager,
|
||||
@ -370,6 +371,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
blockProcessor := blockprocessor.New(
|
||||
genesisHash,
|
||||
config.TargetTimePerBlock,
|
||||
config.MaxBlockLevel,
|
||||
dbManager,
|
||||
consensusStateManager,
|
||||
pruningManager,
|
||||
@ -417,6 +419,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
genesisHash,
|
||||
config.K,
|
||||
config.PruningProofM,
|
||||
config.MaxBlockLevel,
|
||||
)
|
||||
|
||||
c := &consensus{
|
||||
@ -568,16 +571,16 @@ func dagStores(config *Config,
|
||||
pruningWindowSizePlusFinalityDepthForCache, pruningWindowSizeForCaches int,
|
||||
preallocateCaches bool) ([]model.BlockRelationStore, []model.ReachabilityDataStore, []model.GHOSTDAGDataStore) {
|
||||
|
||||
blockRelationStores := make([]model.BlockRelationStore, constants.MaxBlockLevel+1)
|
||||
reachabilityDataStores := make([]model.ReachabilityDataStore, constants.MaxBlockLevel+1)
|
||||
ghostdagDataStores := make([]model.GHOSTDAGDataStore, constants.MaxBlockLevel+1)
|
||||
blockRelationStores := make([]model.BlockRelationStore, config.MaxBlockLevel+1)
|
||||
reachabilityDataStores := make([]model.ReachabilityDataStore, config.MaxBlockLevel+1)
|
||||
ghostdagDataStores := make([]model.GHOSTDAGDataStore, config.MaxBlockLevel+1)
|
||||
|
||||
ghostdagDataCacheSize := pruningWindowSizeForCaches * 2
|
||||
if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize {
|
||||
ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize
|
||||
}
|
||||
|
||||
for i := 0; i <= constants.MaxBlockLevel; i++ {
|
||||
for i := 0; i <= config.MaxBlockLevel; i++ {
|
||||
prefixBucket := prefixBucket.Bucket([]byte{byte(i)})
|
||||
if i == 0 {
|
||||
blockRelationStores[i] = blockrelationstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
|
||||
@ -606,12 +609,12 @@ func (f *factory) dagProcesses(config *Config,
|
||||
[]model.DAGTraversalManager,
|
||||
) {
|
||||
|
||||
reachabilityManagers := make([]model.ReachabilityManager, constants.MaxBlockLevel+1)
|
||||
dagTopologyManagers := make([]model.DAGTopologyManager, constants.MaxBlockLevel+1)
|
||||
ghostdagManagers := make([]model.GHOSTDAGManager, constants.MaxBlockLevel+1)
|
||||
dagTraversalManagers := make([]model.DAGTraversalManager, constants.MaxBlockLevel+1)
|
||||
reachabilityManagers := make([]model.ReachabilityManager, config.MaxBlockLevel+1)
|
||||
dagTopologyManagers := make([]model.DAGTopologyManager, config.MaxBlockLevel+1)
|
||||
ghostdagManagers := make([]model.GHOSTDAGManager, config.MaxBlockLevel+1)
|
||||
dagTraversalManagers := make([]model.DAGTraversalManager, config.MaxBlockLevel+1)
|
||||
|
||||
for i := 0; i <= constants.MaxBlockLevel; i++ {
|
||||
for i := 0; i <= config.MaxBlockLevel; i++ {
|
||||
reachabilityManagers[i] = reachabilitymanager.New(
|
||||
dbManager,
|
||||
ghostdagDataStores[i],
|
||||
|
@ -69,7 +69,7 @@ type BaseBlockHeader interface {
|
||||
BlueScore() uint64
|
||||
BlueWork() *big.Int
|
||||
PruningPoint() *DomainHash
|
||||
BlockLevel() int
|
||||
BlockLevel(maxBlockLevel int) int
|
||||
Equal(other BaseBlockHeader) bool
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@ type blockParentBuilder struct {
|
||||
pruningStore model.PruningStore
|
||||
|
||||
genesisHash *externalapi.DomainHash
|
||||
maxBlockLevel int
|
||||
}
|
||||
|
||||
// New creates a new instance of a BlockParentBuilder
|
||||
@ -30,6 +31,7 @@ func New(
|
||||
pruningStore model.PruningStore,
|
||||
|
||||
genesisHash *externalapi.DomainHash,
|
||||
maxBlockLevel int,
|
||||
) model.BlockParentBuilder {
|
||||
return &blockParentBuilder{
|
||||
databaseContext: databaseContext,
|
||||
@ -40,6 +42,7 @@ func New(
|
||||
reachabilityDataStore: reachabilityDataStore,
|
||||
pruningStore: pruningStore,
|
||||
genesisHash: genesisHash,
|
||||
maxBlockLevel: maxBlockLevel,
|
||||
}
|
||||
}
|
||||
|
||||
@ -102,7 +105,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
|
||||
// all the block levels they occupy
|
||||
for _, directParentHeader := range directParentHeaders {
|
||||
directParentHash := consensushashing.HeaderHash(directParentHeader)
|
||||
blockLevel := directParentHeader.BlockLevel()
|
||||
blockLevel := directParentHeader.BlockLevel(bpb.maxBlockLevel)
|
||||
for i := 0; i <= blockLevel; i++ {
|
||||
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
|
||||
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
type blockProcessor struct {
|
||||
genesisHash *externalapi.DomainHash
|
||||
targetTimePerBlock time.Duration
|
||||
maxBlockLevel int
|
||||
databaseContext model.DBManager
|
||||
blockLogger *blocklogger.BlockLogger
|
||||
|
||||
@ -52,6 +53,7 @@ type blockProcessor struct {
|
||||
func New(
|
||||
genesisHash *externalapi.DomainHash,
|
||||
targetTimePerBlock time.Duration,
|
||||
maxBlockLevel int,
|
||||
databaseContext model.DBManager,
|
||||
|
||||
consensusStateManager model.ConsensusStateManager,
|
||||
@ -86,6 +88,7 @@ func New(
|
||||
return &blockProcessor{
|
||||
genesisHash: genesisHash,
|
||||
targetTimePerBlock: targetTimePerBlock,
|
||||
maxBlockLevel: maxBlockLevel,
|
||||
databaseContext: databaseContext,
|
||||
blockLogger: blocklogger.NewBlockLogger(),
|
||||
pruningManager: pruningManager,
|
||||
|
@ -259,7 +259,7 @@ func (bp *blockProcessor) updateReachabilityReindexRoot(stagingArea *model.Stagi
|
||||
return err
|
||||
}
|
||||
|
||||
headersSelectedTipHeaderBlockLevel := headersSelectedTipHeader.BlockLevel()
|
||||
headersSelectedTipHeaderBlockLevel := headersSelectedTipHeader.BlockLevel(bp.maxBlockLevel)
|
||||
for blockLevel := 0; blockLevel <= headersSelectedTipHeaderBlockLevel; blockLevel++ {
|
||||
err := bp.reachabilityManagers[blockLevel].UpdateReindexRoot(stagingArea, headersSelectedTip)
|
||||
if err != nil {
|
||||
|
@ -62,7 +62,7 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
|
||||
return err
|
||||
}
|
||||
if !hasReachabilityData {
|
||||
blockLevel := header.BlockLevel()
|
||||
blockLevel := header.BlockLevel(v.maxBlockLevel)
|
||||
for i := 0; i <= blockLevel; i++ {
|
||||
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
|
||||
if err != nil {
|
||||
|
@ -23,6 +23,7 @@ type blockValidator struct {
|
||||
timestampDeviationTolerance int
|
||||
targetTimePerBlock time.Duration
|
||||
ignoreHeaderMass bool
|
||||
maxBlockLevel int
|
||||
|
||||
databaseContext model.DBReader
|
||||
difficultyManager model.DifficultyManager
|
||||
@ -60,6 +61,7 @@ func New(powMax *big.Int,
|
||||
timestampDeviationTolerance int,
|
||||
targetTimePerBlock time.Duration,
|
||||
ignoreHeaderMass bool,
|
||||
maxBlockLevel int,
|
||||
|
||||
databaseContext model.DBReader,
|
||||
|
||||
@ -97,6 +99,7 @@ func New(powMax *big.Int,
|
||||
mergeSetSizeLimit: mergeSetSizeLimit,
|
||||
maxBlockParents: maxBlockParents,
|
||||
ignoreHeaderMass: ignoreHeaderMass,
|
||||
maxBlockLevel: maxBlockLevel,
|
||||
|
||||
timestampDeviationTolerance: timestampDeviationTolerance,
|
||||
targetTimePerBlock: targetTimePerBlock,
|
||||
|
@ -69,7 +69,7 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
|
||||
header externalapi.BlockHeader,
|
||||
isBlockWithTrustedData bool) error {
|
||||
|
||||
for level := 0; level <= header.BlockLevel(); level++ {
|
||||
for level := 0; level <= header.BlockLevel(v.maxBlockLevel); level++ {
|
||||
var parents []*externalapi.DomainHash
|
||||
for _, parent := range v.parentsManager.ParentsAtLevel(header, level) {
|
||||
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
|
||||
@ -118,7 +118,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
|
||||
return err
|
||||
}
|
||||
|
||||
blockLevel := header.BlockLevel()
|
||||
blockLevel := header.BlockLevel(v.maxBlockLevel)
|
||||
for i := 1; i <= blockLevel; i++ {
|
||||
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
|
||||
if err != nil {
|
||||
|
@ -3,17 +3,18 @@ package parentssanager
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
)
|
||||
|
||||
type parentsManager struct {
|
||||
genesisHash *externalapi.DomainHash
|
||||
maxBlockLevel int
|
||||
}
|
||||
|
||||
// New instantiates a new ParentsManager
|
||||
func New(genesisHash *externalapi.DomainHash) model.ParentsManager {
|
||||
func New(genesisHash *externalapi.DomainHash, maxBlockLevel int) model.ParentsManager {
|
||||
return &parentsManager{
|
||||
genesisHash: genesisHash,
|
||||
maxBlockLevel: maxBlockLevel,
|
||||
}
|
||||
}
|
||||
|
||||
@ -31,7 +32,7 @@ func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, le
|
||||
}
|
||||
|
||||
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {
|
||||
numParents := constants.MaxBlockLevel + 1
|
||||
numParents := pm.maxBlockLevel + 1
|
||||
parents := make([]externalapi.BlockLevelParents, numParents)
|
||||
for i := 0; i < numParents; i++ {
|
||||
parents[i] = pm.ParentsAtLevel(blockHeader, i)
|
||||
|
@ -38,8 +38,8 @@ func TestPruning(t *testing.T) {
|
||||
"dag-for-test-pruning.json": {
|
||||
dagconfig.MainnetParams.Name: "503",
|
||||
dagconfig.TestnetParams.Name: "502",
|
||||
dagconfig.DevnetParams.Name: "503",
|
||||
dagconfig.SimnetParams.Name: "502",
|
||||
dagconfig.DevnetParams.Name: "502",
|
||||
dagconfig.SimnetParams.Name: "503",
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/reachabilitymanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
@ -41,6 +40,7 @@ type pruningProofManager struct {
|
||||
genesisHash *externalapi.DomainHash
|
||||
k externalapi.KType
|
||||
pruningProofM uint64
|
||||
maxBlockLevel int
|
||||
|
||||
cachedPruningPoint *externalapi.DomainHash
|
||||
cachedProof *externalapi.PruningPointProof
|
||||
@ -66,6 +66,7 @@ func New(
|
||||
genesisHash *externalapi.DomainHash,
|
||||
k externalapi.KType,
|
||||
pruningProofM uint64,
|
||||
maxBlockLevel int,
|
||||
) model.PruningProofManager {
|
||||
|
||||
return &pruningProofManager{
|
||||
@ -86,6 +87,7 @@ func New(
|
||||
genesisHash: genesisHash,
|
||||
k: k,
|
||||
pruningProofM: pruningProofM,
|
||||
maxBlockLevel: maxBlockLevel,
|
||||
}
|
||||
}
|
||||
|
||||
@ -134,7 +136,7 @@ func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.Stagin
|
||||
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
|
||||
headersByLevel := make(map[int][]externalapi.BlockHeader)
|
||||
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
|
||||
pruningPointLevel := pruningPointHeader.BlockLevel()
|
||||
pruningPointLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel)
|
||||
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
|
||||
var selectedTip *externalapi.DomainHash
|
||||
if blockLevel <= pruningPointLevel {
|
||||
@ -310,7 +312,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
|
||||
level0Headers := pruningPointProof.Headers[0]
|
||||
pruningPointHeader := level0Headers[len(level0Headers)-1]
|
||||
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
|
||||
pruningPointBlockLevel := pruningPointHeader.BlockLevel()
|
||||
pruningPointBlockLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel)
|
||||
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
|
||||
if maxLevel >= len(pruningPointProof.Headers) {
|
||||
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
|
||||
@ -354,9 +356,9 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
|
||||
var selectedTip *externalapi.DomainHash
|
||||
for i, header := range headers {
|
||||
blockHash := consensushashing.HeaderHash(header)
|
||||
if header.BlockLevel() < blockLevel {
|
||||
if header.BlockLevel(ppm.maxBlockLevel) < blockLevel {
|
||||
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
||||
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
|
||||
"expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel)
|
||||
}
|
||||
|
||||
blockHeaderStore.Stage(stagingArea, blockHash, header)
|
||||
@ -581,9 +583,9 @@ func (ppm *pruningProofManager) dagProcesses(
|
||||
[]model.GHOSTDAGManager,
|
||||
) {
|
||||
|
||||
reachabilityManagers := make([]model.ReachabilityManager, constants.MaxBlockLevel+1)
|
||||
dagTopologyManagers := make([]model.DAGTopologyManager, constants.MaxBlockLevel+1)
|
||||
ghostdagManagers := make([]model.GHOSTDAGManager, constants.MaxBlockLevel+1)
|
||||
reachabilityManagers := make([]model.ReachabilityManager, ppm.maxBlockLevel+1)
|
||||
dagTopologyManagers := make([]model.DAGTopologyManager, ppm.maxBlockLevel+1)
|
||||
ghostdagManagers := make([]model.GHOSTDAGManager, ppm.maxBlockLevel+1)
|
||||
|
||||
for i := 0; i <= maxLevel; i++ {
|
||||
reachabilityManagers[i] = reachabilitymanager.New(
|
||||
@ -627,9 +629,9 @@ func (ppm *pruningProofManager) ApplyPruningPointProof(pruningPointProof *extern
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
blockHash := consensushashing.HeaderHash(header)
|
||||
if header.BlockLevel() < blockLevel {
|
||||
if header.BlockLevel(ppm.maxBlockLevel) < blockLevel {
|
||||
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
|
||||
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
|
||||
"expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel)
|
||||
}
|
||||
|
||||
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)
|
||||
|
@ -179,9 +179,9 @@ func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader {
|
||||
return bh.clone()
|
||||
}
|
||||
|
||||
func (bh *blockHeader) BlockLevel() int {
|
||||
func (bh *blockHeader) BlockLevel(maxBlockLevel int) int {
|
||||
if !bh.isBlockLevelCached {
|
||||
bh.blockLevel = pow.BlockLevel(bh)
|
||||
bh.blockLevel = pow.BlockLevel(bh, maxBlockLevel)
|
||||
bh.isBlockLevelCached = true
|
||||
}
|
||||
|
||||
|
@ -35,9 +35,4 @@ const (
|
||||
// LockTimeThreshold is the number below which a lock time is
|
||||
// interpreted to be a DAA score.
|
||||
LockTimeThreshold = 5e11 // Tue Nov 5 00:53:20 1985 UTC
|
||||
|
||||
// MaxBlockLevel is the maximum possible block level.
|
||||
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
|
||||
// This means that any block that has a level lower or equal to genesis will be level 0.
|
||||
MaxBlockLevel = 225
|
||||
)
|
||||
|
@ -3,7 +3,6 @@ package pow
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/serialization"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
@ -96,15 +95,15 @@ func toBig(hash *externalapi.DomainHash) *big.Int {
|
||||
}
|
||||
|
||||
// BlockLevel returns the block level of the given header.
|
||||
func BlockLevel(header externalapi.BlockHeader) int {
|
||||
func BlockLevel(header externalapi.BlockHeader, maxBlockLevel int) int {
|
||||
// Genesis is defined to be the root of all blocks at all levels, so we define it to be the maximal
|
||||
// block level.
|
||||
if len(header.DirectParents()) == 0 {
|
||||
return constants.MaxBlockLevel
|
||||
return maxBlockLevel
|
||||
}
|
||||
|
||||
proofOfWorkValue := NewState(header.ToMutable()).CalculateProofOfWorkValue()
|
||||
level := constants.MaxBlockLevel - proofOfWorkValue.BitLen()
|
||||
level := maxBlockLevel - proofOfWorkValue.BitLen()
|
||||
// If the block has a level lower than genesis make it zero.
|
||||
if level < 0 {
|
||||
level = 0
|
||||
|
@ -185,6 +185,9 @@ type Params struct {
|
||||
DisallowDirectBlocksOnTopOfGenesis bool
|
||||
|
||||
IgnoreHeaderMass bool
|
||||
|
||||
// MaxBlockLevel is the maximum possible block level.
|
||||
MaxBlockLevel int
|
||||
}
|
||||
|
||||
// NormalizeRPCServerAddress returns addr with the current network default
|
||||
@ -279,16 +282,20 @@ var MainnetParams = Params{
|
||||
PruningProofM: defaultPruningProofM,
|
||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||
DisallowDirectBlocksOnTopOfGenesis: true,
|
||||
|
||||
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
|
||||
// This means that any block that has a level lower or equal to genesis will be level 0.
|
||||
MaxBlockLevel: 225,
|
||||
}
|
||||
|
||||
// TestnetParams defines the network parameters for the test Kaspa network.
|
||||
var TestnetParams = Params{
|
||||
K: defaultGHOSTDAGK,
|
||||
Name: "kaspa-testnet-8",
|
||||
Name: "kaspa-testnet-9",
|
||||
Net: appmessage.Testnet,
|
||||
RPCPort: "16210",
|
||||
DefaultPort: "16211",
|
||||
DNSSeeds: []string{"testnet-8-dnsseed.daglabs-dev.com"},
|
||||
DNSSeeds: []string{"testnet-9-dnsseed.daglabs-dev.com"},
|
||||
|
||||
// DAG parameters
|
||||
GenesisBlock: &testnetGenesisBlock,
|
||||
@ -339,6 +346,8 @@ var TestnetParams = Params{
|
||||
PruningProofM: defaultPruningProofM,
|
||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||
IgnoreHeaderMass: true,
|
||||
|
||||
MaxBlockLevel: 250,
|
||||
}
|
||||
|
||||
// SimnetParams defines the network parameters for the simulation test Kaspa
|
||||
@ -402,6 +411,8 @@ var SimnetParams = Params{
|
||||
CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength,
|
||||
PruningProofM: defaultPruningProofM,
|
||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||
|
||||
MaxBlockLevel: 250,
|
||||
}
|
||||
|
||||
// DevnetParams defines the network parameters for the development Kaspa network.
|
||||
@ -462,6 +473,8 @@ var DevnetParams = Params{
|
||||
PruningProofM: defaultPruningProofM,
|
||||
DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore,
|
||||
IgnoreHeaderMass: true,
|
||||
|
||||
MaxBlockLevel: 250,
|
||||
}
|
||||
|
||||
// ErrDuplicateNet describes an error where the parameters for a Kaspa
|
||||
|
Loading…
x
Reference in New Issue
Block a user