mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-03-30 15:08:33 +00:00
Validate each level parents (#1827)
* Create BlockParentBuilder. * Implement BuildParents. * Explictly set level 0 blocks to be the same as direct parents. * Add checkIndirectParents to validateBlockHeaderInContext. * Fix test_block_builder.go and BlockLevelParents::Equal. * Don't check indirect parents for blocks with trusted data. * Handle pruned blocks when building block level parents. * Fix bad deletions from unprocessedXxxParents. * Fix merge errors. * Fix bad pruning point parent replaces. * Fix duplicates in newBlockLevelParents. * Skip checkIndirectParents * Get rid of staging constant IDs * Fix BuildParents * Fix tests * Add comments * Change order of directParentHashes * Get rid of maybeAddDirectParentParents * Add comments * Add blockToReferences type * Use ParentsAtLevel Co-authored-by: stasatdaglabs <stas@daglabs.com>
This commit is contained in:
parent
0053ee788d
commit
afaac28da1
@ -12,7 +12,7 @@ type acceptanceDataStagingShard struct {
|
||||
}
|
||||
|
||||
func (ads *acceptanceDataStore) stagingShard(stagingArea *model.StagingArea) *acceptanceDataStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDAcceptanceData, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(ads.shardID, func() model.StagingShard {
|
||||
return &acceptanceDataStagingShard{
|
||||
store: ads,
|
||||
toAdd: make(map[externalapi.DomainHash]externalapi.AcceptanceData),
|
||||
|
@ -1,12 +1,11 @@
|
||||
package acceptancedatastore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@ -14,15 +13,17 @@ var bucketName = []byte("acceptance-data")
|
||||
|
||||
// acceptanceDataStore represents a store of AcceptanceData
|
||||
type acceptanceDataStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
bucket model.DBBucket
|
||||
shardID model.StagingShardID
|
||||
cache *lrucache.LRUCache
|
||||
bucket model.DBBucket
|
||||
}
|
||||
|
||||
// New instantiates a new AcceptanceDataStore
|
||||
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.AcceptanceDataStore {
|
||||
func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.AcceptanceDataStore {
|
||||
return &acceptanceDataStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
|
||||
shardID: staging.GenerateShardingID(),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
bucket: prefixBucket.Bucket(bucketName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ type blockHeaderStagingShard struct {
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) stagingShard(stagingArea *model.StagingArea) *blockHeaderStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDBlockHeader, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(bhs.shardID, func() model.StagingShard {
|
||||
return &blockHeaderStagingShard{
|
||||
store: bhs,
|
||||
toAdd: make(map[externalapi.DomainHash]externalapi.BlockHeader),
|
||||
|
@ -2,12 +2,11 @@ package blockheaderstore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
)
|
||||
|
||||
var bucketName = []byte("block-headers")
|
||||
@ -15,6 +14,7 @@ var countKeyName = []byte("block-headers-count")
|
||||
|
||||
// blockHeaderStore represents a store of blocks
|
||||
type blockHeaderStore struct {
|
||||
shardID model.StagingShardID
|
||||
cache *lrucache.LRUCache
|
||||
countCached uint64
|
||||
bucket model.DBBucket
|
||||
@ -22,11 +22,12 @@ type blockHeaderStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new BlockHeaderStore
|
||||
func New(dbContext model.DBReader, prefix *prefix.Prefix, cacheSize int, preallocate bool) (model.BlockHeaderStore, error) {
|
||||
func New(dbContext model.DBReader, prefixBucket model.DBBucket, cacheSize int, preallocate bool) (model.BlockHeaderStore, error) {
|
||||
blockHeaderStore := &blockHeaderStore{
|
||||
shardID: staging.GenerateShardingID(),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
|
||||
countKey: database.MakeBucket(prefix.Serialize()).Key(countKeyName),
|
||||
bucket: prefixBucket.Bucket(bucketName),
|
||||
countKey: prefixBucket.Key(countKeyName),
|
||||
}
|
||||
|
||||
err := blockHeaderStore.initializeCount(dbContext)
|
||||
|
@ -11,7 +11,7 @@ type blockRelationStagingShard struct {
|
||||
}
|
||||
|
||||
func (brs *blockRelationStore) stagingShard(stagingArea *model.StagingArea) *blockRelationStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDBlockRelation, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(brs.shardID, func() model.StagingShard {
|
||||
return &blockRelationStagingShard{
|
||||
store: brs,
|
||||
toAdd: make(map[externalapi.DomainHash]*model.BlockRelations),
|
||||
|
@ -2,27 +2,28 @@ package blockrelationstore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
)
|
||||
|
||||
var bucketName = []byte("block-relations")
|
||||
|
||||
// blockRelationStore represents a store of BlockRelations
|
||||
type blockRelationStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
bucket model.DBBucket
|
||||
shardID model.StagingShardID
|
||||
cache *lrucache.LRUCache
|
||||
bucket model.DBBucket
|
||||
}
|
||||
|
||||
// New instantiates a new BlockRelationStore
|
||||
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.BlockRelationStore {
|
||||
func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlockRelationStore {
|
||||
return &blockRelationStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
|
||||
shardID: staging.GenerateShardingID(),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
bucket: prefixBucket.Bucket(bucketName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ type blockStatusStagingShard struct {
|
||||
}
|
||||
|
||||
func (bss *blockStatusStore) stagingShard(stagingArea *model.StagingArea) *blockStatusStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDBlockStatus, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(bss.shardID, func() model.StagingShard {
|
||||
return &blockStatusStagingShard{
|
||||
store: bss,
|
||||
toAdd: make(map[externalapi.DomainHash]externalapi.BlockStatus),
|
||||
|
@ -2,27 +2,28 @@ package blockstatusstore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
)
|
||||
|
||||
var bucketName = []byte("block-statuses")
|
||||
|
||||
// blockStatusStore represents a store of BlockStatuses
|
||||
type blockStatusStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
bucket model.DBBucket
|
||||
shardID model.StagingShardID
|
||||
cache *lrucache.LRUCache
|
||||
bucket model.DBBucket
|
||||
}
|
||||
|
||||
// New instantiates a new BlockStatusStore
|
||||
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.BlockStatusStore {
|
||||
func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlockStatusStore {
|
||||
return &blockStatusStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
|
||||
shardID: staging.GenerateShardingID(),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
bucket: prefixBucket.Bucket(bucketName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ type blockStagingShard struct {
|
||||
}
|
||||
|
||||
func (bs *blockStore) stagingShard(stagingArea *model.StagingArea) *blockStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDBlock, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(bs.shardID, func() model.StagingShard {
|
||||
return &blockStagingShard{
|
||||
store: bs,
|
||||
toAdd: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
||||
|
@ -2,12 +2,11 @@ package blockstore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -15,6 +14,7 @@ var bucketName = []byte("blocks")
|
||||
|
||||
// blockStore represents a store of blocks
|
||||
type blockStore struct {
|
||||
shardID model.StagingShardID
|
||||
cache *lrucache.LRUCache
|
||||
countCached uint64
|
||||
bucket model.DBBucket
|
||||
@ -22,11 +22,12 @@ type blockStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new BlockStore
|
||||
func New(dbContext model.DBReader, prefix *prefix.Prefix, cacheSize int, preallocate bool) (model.BlockStore, error) {
|
||||
func New(dbContext model.DBReader, prefixBucket model.DBBucket, cacheSize int, preallocate bool) (model.BlockStore, error) {
|
||||
blockStore := &blockStore{
|
||||
shardID: staging.GenerateShardingID(),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
|
||||
countKey: database.MakeBucket(prefix.Serialize()).Key([]byte("blocks-count")),
|
||||
bucket: prefixBucket.Bucket(bucketName),
|
||||
countKey: prefixBucket.Key([]byte("blocks-count")),
|
||||
}
|
||||
|
||||
err := blockStore.initializeCount(dbContext)
|
||||
|
@ -12,7 +12,7 @@ type consensusStateStagingShard struct {
|
||||
}
|
||||
|
||||
func (bs *consensusStateStore) stagingShard(stagingArea *model.StagingArea) *consensusStateStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDConsensusState, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(bs.shardID, func() model.StagingShard {
|
||||
return &consensusStateStagingShard{
|
||||
store: bs,
|
||||
tipsStaging: nil,
|
||||
|
@ -1,17 +1,17 @@
|
||||
package consensusstatestore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxolrucache"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
)
|
||||
|
||||
var importingPruningPointUTXOSetKeyName = []byte("importing-pruning-point-utxo-set")
|
||||
|
||||
// consensusStateStore represents a store for the current consensus state
|
||||
type consensusStateStore struct {
|
||||
shardID model.StagingShardID
|
||||
virtualUTXOSetCache *utxolrucache.LRUCache
|
||||
tipsCache []*externalapi.DomainHash
|
||||
tipsKey model.DBKey
|
||||
@ -20,12 +20,13 @@ type consensusStateStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new ConsensusStateStore
|
||||
func New(prefix *prefix.Prefix, utxoSetCacheSize int, preallocate bool) model.ConsensusStateStore {
|
||||
func New(prefixBucket model.DBBucket, utxoSetCacheSize int, preallocate bool) model.ConsensusStateStore {
|
||||
return &consensusStateStore{
|
||||
shardID: staging.GenerateShardingID(),
|
||||
virtualUTXOSetCache: utxolrucache.New(utxoSetCacheSize, preallocate),
|
||||
tipsKey: database.MakeBucket(prefix.Serialize()).Key(tipsKeyName),
|
||||
importingPruningPointUTXOSetKey: database.MakeBucket(prefix.Serialize()).Key(importingPruningPointUTXOSetKeyName),
|
||||
utxoSetBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoSetBucketName),
|
||||
tipsKey: prefixBucket.Key(tipsKeyName),
|
||||
importingPruningPointUTXOSetKey: prefixBucket.Key(importingPruningPointUTXOSetKeyName),
|
||||
utxoSetBucket: prefixBucket.Bucket(utxoSetBucketName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ type daaBlocksStagingShard struct {
|
||||
}
|
||||
|
||||
func (daas *daaBlocksStore) stagingShard(stagingArea *model.StagingArea) *daaBlocksStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDDAABlocks, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(daas.shardID, func() model.StagingShard {
|
||||
return &daaBlocksStagingShard{
|
||||
store: daas,
|
||||
daaScoreToAdd: make(map[externalapi.DomainHash]uint64),
|
||||
|
@ -1,12 +1,11 @@
|
||||
package daablocksstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
)
|
||||
|
||||
var daaScoreBucketName = []byte("daa-score")
|
||||
@ -14,6 +13,7 @@ var daaAddedBlocksBucketName = []byte("daa-added-blocks")
|
||||
|
||||
// daaBlocksStore represents a store of DAABlocksStore
|
||||
type daaBlocksStore struct {
|
||||
shardID model.StagingShardID
|
||||
daaScoreLRUCache *lrucache.LRUCache
|
||||
daaAddedBlocksLRUCache *lrucache.LRUCache
|
||||
daaScoreBucket model.DBBucket
|
||||
@ -21,12 +21,13 @@ type daaBlocksStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new DAABlocksStore
|
||||
func New(prefix *prefix.Prefix, daaScoreCacheSize int, daaAddedBlocksCacheSize int, preallocate bool) model.DAABlocksStore {
|
||||
func New(prefixBucket model.DBBucket, daaScoreCacheSize int, daaAddedBlocksCacheSize int, preallocate bool) model.DAABlocksStore {
|
||||
return &daaBlocksStore{
|
||||
shardID: staging.GenerateShardingID(),
|
||||
daaScoreLRUCache: lrucache.New(daaScoreCacheSize, preallocate),
|
||||
daaAddedBlocksLRUCache: lrucache.New(daaAddedBlocksCacheSize, preallocate),
|
||||
daaScoreBucket: database.MakeBucket(prefix.Serialize()).Bucket(daaScoreBucketName),
|
||||
daaAddedBlocksBucket: database.MakeBucket(prefix.Serialize()).Bucket(daaAddedBlocksBucketName),
|
||||
daaScoreBucket: prefixBucket.Bucket(daaScoreBucketName),
|
||||
daaAddedBlocksBucket: prefixBucket.Bucket(daaAddedBlocksBucketName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ type daaWindowStagingShard struct {
|
||||
}
|
||||
|
||||
func (daaws *daaWindowStore) stagingShard(stagingArea *model.StagingArea) *daaWindowStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDDAAWindow, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(daaws.shardID, func() model.StagingShard {
|
||||
return &daaWindowStagingShard{
|
||||
store: daaws,
|
||||
toAdd: make(map[dbKey]*externalapi.BlockGHOSTDAGDataHashPair),
|
||||
|
@ -3,26 +3,27 @@ package daawindowstore
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucachehashpairtoblockghostdagdatahashpair"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
)
|
||||
|
||||
var bucketName = []byte("daa-window")
|
||||
|
||||
type daaWindowStore struct {
|
||||
cache *lrucachehashpairtoblockghostdagdatahashpair.LRUCache
|
||||
bucket model.DBBucket
|
||||
shardID model.StagingShardID
|
||||
cache *lrucachehashpairtoblockghostdagdatahashpair.LRUCache
|
||||
bucket model.DBBucket
|
||||
}
|
||||
|
||||
// New instantiates a new BlocksWithTrustedDataDAAWindowStore
|
||||
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.BlocksWithTrustedDataDAAWindowStore {
|
||||
func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlocksWithTrustedDataDAAWindowStore {
|
||||
return &daaWindowStore{
|
||||
cache: lrucachehashpairtoblockghostdagdatahashpair.New(cacheSize, preallocate),
|
||||
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
|
||||
shardID: staging.GenerateShardingID(),
|
||||
cache: lrucachehashpairtoblockghostdagdatahashpair.New(cacheSize, preallocate),
|
||||
bucket: prefixBucket.Bucket(bucketName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ type finalityStagingShard struct {
|
||||
}
|
||||
|
||||
func (fs *finalityStore) stagingShard(stagingArea *model.StagingArea) *finalityStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDFinality, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(fs.shardID, func() model.StagingShard {
|
||||
return &finalityStagingShard{
|
||||
store: fs,
|
||||
toAdd: make(map[externalapi.DomainHash]*externalapi.DomainHash),
|
||||
|
@ -1,25 +1,24 @@
|
||||
package finalitystore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
)
|
||||
|
||||
var bucketName = []byte("finality-points")
|
||||
|
||||
type finalityStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
bucket model.DBBucket
|
||||
shardID model.StagingShardID
|
||||
cache *lrucache.LRUCache
|
||||
bucket model.DBBucket
|
||||
}
|
||||
|
||||
// New instantiates a new FinalityStore
|
||||
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.FinalityStore {
|
||||
func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.FinalityStore {
|
||||
return &finalityStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
|
||||
bucket: prefixBucket.Bucket(bucketName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ type ghostdagDataStagingShard struct {
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) stagingShard(stagingArea *model.StagingArea) *ghostdagDataStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDGHOSTDAG, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(gds.shardID, func() model.StagingShard {
|
||||
return &ghostdagDataStagingShard{
|
||||
store: gds,
|
||||
toAdd: make(map[key]*externalapi.BlockGHOSTDAGData),
|
||||
|
@ -2,12 +2,11 @@ package ghostdagdatastore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheghostdagdata"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
)
|
||||
|
||||
var ghostdagDataBucketName = []byte("block-ghostdag-data")
|
||||
@ -15,17 +14,19 @@ var trustedDataBucketName = []byte("block-with-trusted-data-ghostdag-data")
|
||||
|
||||
// ghostdagDataStore represents a store of BlockGHOSTDAGData
|
||||
type ghostdagDataStore struct {
|
||||
shardID model.StagingShardID
|
||||
cache *lrucacheghostdagdata.LRUCache
|
||||
ghostdagDataBucket model.DBBucket
|
||||
trustedDataBucket model.DBBucket
|
||||
}
|
||||
|
||||
// New instantiates a new GHOSTDAGDataStore
|
||||
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.GHOSTDAGDataStore {
|
||||
func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.GHOSTDAGDataStore {
|
||||
return &ghostdagDataStore{
|
||||
shardID: staging.GenerateShardingID(),
|
||||
cache: lrucacheghostdagdata.New(cacheSize, preallocate),
|
||||
ghostdagDataBucket: database.MakeBucket(prefix.Serialize()).Bucket(ghostdagDataBucketName),
|
||||
trustedDataBucket: database.MakeBucket(prefix.Serialize()).Bucket(trustedDataBucketName),
|
||||
ghostdagDataBucket: prefixBucket.Bucket(ghostdagDataBucketName),
|
||||
trustedDataBucket: prefixBucket.Bucket(trustedDataBucketName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ type headersSelectedChainStagingShard struct {
|
||||
}
|
||||
|
||||
func (hscs *headersSelectedChainStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedChainStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDHeadersSelectedChain, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(hscs.shardID, func() model.StagingShard {
|
||||
return &headersSelectedChainStagingShard{
|
||||
store: hscs,
|
||||
addedByHash: make(map[externalapi.DomainHash]uint64),
|
||||
|
@ -2,7 +2,7 @@ package headersselectedchainstore
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
|
||||
@ -18,6 +18,7 @@ var bucketChainBlockIndexByHashName = []byte("chain-block-index-by-hash")
|
||||
var highestChainBlockIndexKeyName = []byte("highest-chain-block-index")
|
||||
|
||||
type headersSelectedChainStore struct {
|
||||
shardID model.StagingShardID
|
||||
cacheByIndex *lrucacheuint64tohash.LRUCache
|
||||
cacheByHash *lrucache.LRUCache
|
||||
cacheHighestChainBlockIndex uint64
|
||||
@ -27,13 +28,14 @@ type headersSelectedChainStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new HeadersSelectedChainStore
|
||||
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.HeadersSelectedChainStore {
|
||||
func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.HeadersSelectedChainStore {
|
||||
return &headersSelectedChainStore{
|
||||
shardID: staging.GenerateShardingID(),
|
||||
cacheByIndex: lrucacheuint64tohash.New(cacheSize, preallocate),
|
||||
cacheByHash: lrucache.New(cacheSize, preallocate),
|
||||
bucketChainBlockHashByIndex: database.MakeBucket(prefix.Serialize()).Bucket(bucketChainBlockHashByIndexName),
|
||||
bucketChainBlockIndexByHash: database.MakeBucket(prefix.Serialize()).Bucket(bucketChainBlockIndexByHashName),
|
||||
highestChainBlockIndexKey: database.MakeBucket(prefix.Serialize()).Key(highestChainBlockIndexKeyName),
|
||||
bucketChainBlockHashByIndex: prefixBucket.Bucket(bucketChainBlockHashByIndexName),
|
||||
bucketChainBlockIndexByHash: prefixBucket.Bucket(bucketChainBlockIndexByHashName),
|
||||
highestChainBlockIndexKey: prefixBucket.Key(highestChainBlockIndexKeyName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ type headersSelectedTipStagingShard struct {
|
||||
}
|
||||
|
||||
func (hsts *headerSelectedTipStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedTipStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDHeadersSelectedTip, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(hsts.shardID, func() model.StagingShard {
|
||||
return &headersSelectedTipStagingShard{
|
||||
store: hsts,
|
||||
newSelectedTip: nil,
|
||||
|
@ -2,24 +2,25 @@ package headersselectedtipstore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
)
|
||||
|
||||
var keyName = []byte("headers-selected-tip")
|
||||
|
||||
type headerSelectedTipStore struct {
|
||||
cache *externalapi.DomainHash
|
||||
key model.DBKey
|
||||
shardID model.StagingShardID
|
||||
cache *externalapi.DomainHash
|
||||
key model.DBKey
|
||||
}
|
||||
|
||||
// New instantiates a new HeaderSelectedTipStore
|
||||
func New(prefix *prefix.Prefix) model.HeaderSelectedTipStore {
|
||||
func New(prefixBucket model.DBBucket) model.HeaderSelectedTipStore {
|
||||
return &headerSelectedTipStore{
|
||||
key: database.MakeBucket(prefix.Serialize()).Key(keyName),
|
||||
shardID: staging.GenerateShardingID(),
|
||||
key: prefixBucket.Key(keyName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ type multisetStagingShard struct {
|
||||
}
|
||||
|
||||
func (ms *multisetStore) stagingShard(stagingArea *model.StagingArea) *multisetStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDMultiset, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(ms.shardID, func() model.StagingShard {
|
||||
return &multisetStagingShard{
|
||||
store: ms,
|
||||
toAdd: make(map[externalapi.DomainHash]model.Multiset),
|
||||
|
@ -2,27 +2,28 @@ package multisetstore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
)
|
||||
|
||||
var bucketName = []byte("multisets")
|
||||
|
||||
// multisetStore represents a store of Multisets
|
||||
type multisetStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
bucket model.DBBucket
|
||||
shardID model.StagingShardID
|
||||
cache *lrucache.LRUCache
|
||||
bucket model.DBBucket
|
||||
}
|
||||
|
||||
// New instantiates a new MultisetStore
|
||||
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.MultisetStore {
|
||||
func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.MultisetStore {
|
||||
return &multisetStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
|
||||
shardID: staging.GenerateShardingID(),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
bucket: prefixBucket.Bucket(bucketName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ type pruningStagingShard struct {
|
||||
}
|
||||
|
||||
func (ps *pruningStore) stagingShard(stagingArea *model.StagingArea) *pruningStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDPruning, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(ps.shardID, func() model.StagingShard {
|
||||
return &pruningStagingShard{
|
||||
store: ps,
|
||||
pruningPointByIndex: map[uint64]*externalapi.DomainHash{},
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheuint64tohash"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
)
|
||||
|
||||
var currentPruningPointIndexKeyName = []byte("pruning-block-index")
|
||||
@ -20,6 +20,7 @@ var pruningPointByIndexBucketName = []byte("pruning-point-by-index")
|
||||
|
||||
// pruningStore represents a store for the current pruning state
|
||||
type pruningStore struct {
|
||||
shardID model.StagingShardID
|
||||
pruningPointByIndexCache *lrucacheuint64tohash.LRUCache
|
||||
currentPruningPointIndexCache *uint64
|
||||
pruningPointCandidateCache *externalapi.DomainHash
|
||||
@ -34,16 +35,17 @@ type pruningStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new PruningStore
|
||||
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.PruningStore {
|
||||
func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.PruningStore {
|
||||
return &pruningStore{
|
||||
shardID: staging.GenerateShardingID(),
|
||||
pruningPointByIndexCache: lrucacheuint64tohash.New(cacheSize, preallocate),
|
||||
currentPruningPointIndexKey: database.MakeBucket(prefix.Serialize()).Key(currentPruningPointIndexKeyName),
|
||||
candidatePruningPointHashKey: database.MakeBucket(prefix.Serialize()).Key(candidatePruningPointHashKeyName),
|
||||
pruningPointUTXOSetBucket: database.MakeBucket(prefix.Serialize()).Bucket(pruningPointUTXOSetBucketName),
|
||||
importedPruningPointUTXOsBucket: database.MakeBucket(prefix.Serialize()).Bucket(importedPruningPointUTXOsBucketName),
|
||||
updatingPruningPointUTXOSetKey: database.MakeBucket(prefix.Serialize()).Key(updatingPruningPointUTXOSetKeyName),
|
||||
importedPruningPointMultisetKey: database.MakeBucket(prefix.Serialize()).Key(importedPruningPointMultisetKeyName),
|
||||
pruningPointByIndexBucket: database.MakeBucket(prefix.Serialize()).Bucket(pruningPointByIndexBucketName),
|
||||
currentPruningPointIndexKey: prefixBucket.Key(currentPruningPointIndexKeyName),
|
||||
candidatePruningPointHashKey: prefixBucket.Key(candidatePruningPointHashKeyName),
|
||||
pruningPointUTXOSetBucket: prefixBucket.Bucket(pruningPointUTXOSetBucketName),
|
||||
importedPruningPointUTXOsBucket: prefixBucket.Bucket(importedPruningPointUTXOsBucketName),
|
||||
updatingPruningPointUTXOSetKey: prefixBucket.Key(updatingPruningPointUTXOSetKeyName),
|
||||
importedPruningPointMultisetKey: prefixBucket.Key(importedPruningPointMultisetKeyName),
|
||||
pruningPointByIndexBucket: prefixBucket.Bucket(pruningPointByIndexBucketName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ type reachabilityDataStagingShard struct {
|
||||
}
|
||||
|
||||
func (rds *reachabilityDataStore) stagingShard(stagingArea *model.StagingArea) *reachabilityDataStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDReachabilityData, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(rds.shardID, func() model.StagingShard {
|
||||
return &reachabilityDataStagingShard{
|
||||
store: rds,
|
||||
reachabilityData: make(map[externalapi.DomainHash]model.ReachabilityData),
|
||||
|
@ -2,12 +2,11 @@ package reachabilitydatastore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
)
|
||||
|
||||
var reachabilityDataBucketName = []byte("reachability-data")
|
||||
@ -15,6 +14,7 @@ var reachabilityReindexRootKeyName = []byte("reachability-reindex-root")
|
||||
|
||||
// reachabilityDataStore represents a store of ReachabilityData
|
||||
type reachabilityDataStore struct {
|
||||
shardID model.StagingShardID
|
||||
reachabilityDataCache *lrucache.LRUCache
|
||||
reachabilityReindexRootCache *externalapi.DomainHash
|
||||
|
||||
@ -23,11 +23,12 @@ type reachabilityDataStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new ReachabilityDataStore
|
||||
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.ReachabilityDataStore {
|
||||
func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.ReachabilityDataStore {
|
||||
return &reachabilityDataStore{
|
||||
shardID: staging.GenerateShardingID(),
|
||||
reachabilityDataCache: lrucache.New(cacheSize, preallocate),
|
||||
reachabilityDataBucket: database.MakeBucket(prefix.Serialize()).Bucket(reachabilityDataBucketName),
|
||||
reachabilityReindexRootKey: database.MakeBucket(prefix.Serialize()).Key(reachabilityReindexRootKeyName),
|
||||
reachabilityDataBucket: prefixBucket.Bucket(reachabilityDataBucketName),
|
||||
reachabilityReindexRootKey: prefixBucket.Key(reachabilityReindexRootKeyName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@ type utxoDiffStagingShard struct {
|
||||
}
|
||||
|
||||
func (uds *utxoDiffStore) stagingShard(stagingArea *model.StagingArea) *utxoDiffStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDUTXODiff, func() model.StagingShard {
|
||||
return stagingArea.GetOrCreateShard(uds.shardID, func() model.StagingShard {
|
||||
return &utxoDiffStagingShard{
|
||||
store: uds,
|
||||
utxoDiffToAdd: make(map[externalapi.DomainHash]externalapi.UTXODiff),
|
||||
|
@ -2,12 +2,11 @@ package utxodiffstore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
|
||||
"github.com/kaspanet/kaspad/util/staging"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -16,6 +15,7 @@ var utxoDiffChildBucketName = []byte("utxo-diff-children")
|
||||
|
||||
// utxoDiffStore represents a store of UTXODiffs
|
||||
type utxoDiffStore struct {
|
||||
shardID model.StagingShardID
|
||||
utxoDiffCache *lrucache.LRUCache
|
||||
utxoDiffChildCache *lrucache.LRUCache
|
||||
utxoDiffBucket model.DBBucket
|
||||
@ -23,12 +23,13 @@ type utxoDiffStore struct {
|
||||
}
|
||||
|
||||
// New instantiates a new UTXODiffStore
|
||||
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.UTXODiffStore {
|
||||
func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.UTXODiffStore {
|
||||
return &utxoDiffStore{
|
||||
shardID: staging.GenerateShardingID(),
|
||||
utxoDiffCache: lrucache.New(cacheSize, preallocate),
|
||||
utxoDiffChildCache: lrucache.New(cacheSize, preallocate),
|
||||
utxoDiffBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoDiffBucketName),
|
||||
utxoDiffChildBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoDiffChildBucketName),
|
||||
utxoDiffBucket: prefixBucket.Bucket(utxoDiffBucketName),
|
||||
utxoDiffChildBucket: prefixBucket.Bucket(utxoDiffChildBucketName),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,7 @@ package consensus
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
@ -103,6 +104,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
externalapi.Consensus, error) {
|
||||
|
||||
dbManager := consensusdatabase.New(db)
|
||||
prefixBucket := consensusdatabase.MakeBucket(dbPrefix.Serialize())
|
||||
|
||||
pruningWindowSizeForCaches := int(config.PruningDepth())
|
||||
|
||||
@ -118,24 +120,24 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
pruningWindowSizePlusFinalityDepthForCache := int(config.PruningDepth() + config.FinalityDepth())
|
||||
|
||||
// Data Structures
|
||||
daaWindowStore := daawindowstore.New(dbPrefix, 10_000, preallocateCaches)
|
||||
acceptanceDataStore := acceptancedatastore.New(dbPrefix, 200, preallocateCaches)
|
||||
blockStore, err := blockstore.New(dbManager, dbPrefix, 200, preallocateCaches)
|
||||
daaWindowStore := daawindowstore.New(prefixBucket, 10_000, preallocateCaches)
|
||||
acceptanceDataStore := acceptancedatastore.New(prefixBucket, 200, preallocateCaches)
|
||||
blockStore, err := blockstore.New(dbManager, prefixBucket, 200, preallocateCaches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockHeaderStore, err := blockheaderstore.New(dbManager, dbPrefix, 10_000, preallocateCaches)
|
||||
blockHeaderStore, err := blockheaderstore.New(dbManager, prefixBucket, 10_000, preallocateCaches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockRelationStore := blockrelationstore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
|
||||
blockRelationStore := blockrelationstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
|
||||
|
||||
blockStatusStore := blockstatusstore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
|
||||
multisetStore := multisetstore.New(dbPrefix, 200, preallocateCaches)
|
||||
pruningStore := pruningstore.New(dbPrefix, 2, preallocateCaches)
|
||||
reachabilityDataStore := reachabilitydatastore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
|
||||
utxoDiffStore := utxodiffstore.New(dbPrefix, 200, preallocateCaches)
|
||||
consensusStateStore := consensusstatestore.New(dbPrefix, 10_000, preallocateCaches)
|
||||
blockStatusStore := blockstatusstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
|
||||
multisetStore := multisetstore.New(prefixBucket, 200, preallocateCaches)
|
||||
pruningStore := pruningstore.New(prefixBucket, 2, preallocateCaches)
|
||||
reachabilityDataStore := reachabilitydatastore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
|
||||
utxoDiffStore := utxodiffstore.New(prefixBucket, 200, preallocateCaches)
|
||||
consensusStateStore := consensusstatestore.New(prefixBucket, 10_000, preallocateCaches)
|
||||
|
||||
// Some tests artificially decrease the pruningWindowSize, thus making the GhostDagStore cache too small for a
|
||||
// a single DifficultyAdjustmentWindow. To alleviate this problem we make sure that the cache size is at least
|
||||
@ -144,12 +146,12 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize {
|
||||
ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize
|
||||
}
|
||||
ghostdagDataStore := ghostdagdatastore.New(dbPrefix, ghostdagDataCacheSize, preallocateCaches)
|
||||
ghostdagDataStore := ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches)
|
||||
|
||||
headersSelectedTipStore := headersselectedtipstore.New(dbPrefix)
|
||||
finalityStore := finalitystore.New(dbPrefix, 200, preallocateCaches)
|
||||
headersSelectedChainStore := headersselectedchainstore.New(dbPrefix, pruningWindowSizeForCaches, preallocateCaches)
|
||||
daaBlocksStore := daablocksstore.New(dbPrefix, pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches)
|
||||
headersSelectedTipStore := headersselectedtipstore.New(prefixBucket)
|
||||
finalityStore := finalitystore.New(prefixBucket, 200, preallocateCaches)
|
||||
headersSelectedChainStore := headersselectedchainstore.New(prefixBucket, pruningWindowSizeForCaches, preallocateCaches)
|
||||
daaBlocksStore := daablocksstore.New(prefixBucket, pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches)
|
||||
|
||||
// Processes
|
||||
reachabilityManager := reachabilitymanager.New(
|
||||
@ -161,6 +163,13 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
reachabilityManager,
|
||||
blockRelationStore,
|
||||
ghostdagDataStore)
|
||||
blockParentBuilder := blockparentbuilder.New(
|
||||
dbManager,
|
||||
blockHeaderStore,
|
||||
dagTopologyManager,
|
||||
reachabilityDataStore,
|
||||
pruningStore,
|
||||
)
|
||||
ghostdagManager := f.ghostdagConstructor(
|
||||
dbManager,
|
||||
dagTopologyManager,
|
||||
@ -316,6 +325,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
mergeDepthManager,
|
||||
reachabilityManager,
|
||||
finalityManager,
|
||||
blockParentBuilder,
|
||||
pruningManager,
|
||||
|
||||
pruningStore,
|
||||
@ -355,6 +365,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
|
||||
ghostdagManager,
|
||||
transactionValidator,
|
||||
finalityManager,
|
||||
blockParentBuilder,
|
||||
pruningManager,
|
||||
|
||||
acceptanceDataStore,
|
||||
|
@ -58,6 +58,7 @@ type BlockHeader interface {
|
||||
type BaseBlockHeader interface {
|
||||
Version() uint16
|
||||
Parents() []BlockLevelParents
|
||||
ParentsAtLevel(level int) BlockLevelParents
|
||||
DirectParents() BlockLevelParents
|
||||
HashMerkleRoot() *DomainHash
|
||||
AcceptedIDMerkleRoot() *DomainHash
|
||||
|
@ -6,7 +6,22 @@ type BlockLevelParents []*DomainHash
|
||||
|
||||
// Equal returns true if this BlockLevelParents is equal to `other`
|
||||
func (sl BlockLevelParents) Equal(other BlockLevelParents) bool {
|
||||
return HashesEqual(sl, other)
|
||||
if len(sl) != len(other) {
|
||||
return false
|
||||
}
|
||||
for _, thisHash := range sl {
|
||||
found := false
|
||||
for _, otherHash := range other {
|
||||
if thisHash.Equal(otherHash) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Clone creates a clone of this BlockLevelParents
|
||||
@ -14,6 +29,16 @@ func (sl BlockLevelParents) Clone() BlockLevelParents {
|
||||
return CloneHashes(sl)
|
||||
}
|
||||
|
||||
// Contains returns true if this BlockLevelParents contains the given blockHash
|
||||
func (sl BlockLevelParents) Contains(blockHash *DomainHash) bool {
|
||||
for _, blockLevelParent := range sl {
|
||||
if blockLevelParent.Equal(blockHash) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ParentsEqual returns true if all the BlockLevelParents in `a` and `b` are
|
||||
// equal pairwise
|
||||
func ParentsEqual(a, b []BlockLevelParents) bool {
|
||||
|
@ -0,0 +1,9 @@
|
||||
package model
|
||||
|
||||
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// BlockParentBuilder exposes a method to build super-block parents for
|
||||
// a given set of direct parents
|
||||
type BlockParentBuilder interface {
|
||||
BuildParents(stagingArea *StagingArea, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error)
|
||||
}
|
@ -9,29 +9,7 @@ type StagingShard interface {
|
||||
}
|
||||
|
||||
// StagingShardID is used to identify each of the store's staging shards
|
||||
type StagingShardID byte
|
||||
|
||||
// StagingShardID constants
|
||||
const (
|
||||
StagingShardIDAcceptanceData StagingShardID = iota
|
||||
StagingShardIDBlockHeader
|
||||
StagingShardIDBlockRelation
|
||||
StagingShardIDBlockStatus
|
||||
StagingShardIDBlock
|
||||
StagingShardIDConsensusState
|
||||
StagingShardIDDAABlocks
|
||||
StagingShardIDFinality
|
||||
StagingShardIDGHOSTDAG
|
||||
StagingShardIDHeadersSelectedChain
|
||||
StagingShardIDHeadersSelectedTip
|
||||
StagingShardIDMultiset
|
||||
StagingShardIDPruning
|
||||
StagingShardIDReachabilityData
|
||||
StagingShardIDUTXODiff
|
||||
StagingShardIDDAAWindow
|
||||
// Always leave StagingShardIDLen as the last constant
|
||||
StagingShardIDLen
|
||||
)
|
||||
type StagingShardID uint64
|
||||
|
||||
// StagingArea is single changeset inside the consensus database, similar to a transaction in a classic database.
|
||||
// Each StagingArea consists of multiple StagingShards, one for each dataStore that has any changes within it.
|
||||
@ -41,16 +19,14 @@ const (
|
||||
// When the StagingArea is being Committed, it goes over all it's shards, and commits those one-by-one.
|
||||
// Since Commit happens in a DatabaseTransaction, a StagingArea is atomic.
|
||||
type StagingArea struct {
|
||||
// shards is deliberately an array and not a map, as an optimization - since it's being read a lot of time, and
|
||||
// reads from maps are relatively slow.
|
||||
shards [StagingShardIDLen]StagingShard
|
||||
shards []StagingShard
|
||||
isCommitted bool
|
||||
}
|
||||
|
||||
// NewStagingArea creates a new, empty staging area.
|
||||
func NewStagingArea() *StagingArea {
|
||||
return &StagingArea{
|
||||
shards: [StagingShardIDLen]StagingShard{},
|
||||
shards: []StagingShard{},
|
||||
isCommitted: false,
|
||||
}
|
||||
}
|
||||
@ -58,6 +34,9 @@ func NewStagingArea() *StagingArea {
|
||||
// GetOrCreateShard attempts to retrieve a shard with the given name.
|
||||
// If it does not exist - a new shard is created using `createFunc`.
|
||||
func (sa *StagingArea) GetOrCreateShard(shardID StagingShardID, createFunc func() StagingShard) StagingShard {
|
||||
for uint64(len(sa.shards)) <= uint64(shardID) {
|
||||
sa.shards = append(sa.shards, nil)
|
||||
}
|
||||
if sa.shards[shardID] == nil {
|
||||
sa.shards[shardID] = createFunc()
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ type blockBuilder struct {
|
||||
transactionValidator model.TransactionValidator
|
||||
finalityManager model.FinalityManager
|
||||
pruningManager model.PruningManager
|
||||
blockParentBuilder model.BlockParentBuilder
|
||||
|
||||
acceptanceDataStore model.AcceptanceDataStore
|
||||
blockRelationStore model.BlockRelationStore
|
||||
@ -49,6 +50,7 @@ func New(
|
||||
ghostdagManager model.GHOSTDAGManager,
|
||||
transactionValidator model.TransactionValidator,
|
||||
finalityManager model.FinalityManager,
|
||||
blockParentBuilder model.BlockParentBuilder,
|
||||
pruningManager model.PruningManager,
|
||||
|
||||
acceptanceDataStore model.AcceptanceDataStore,
|
||||
@ -69,6 +71,7 @@ func New(
|
||||
ghostdagManager: ghostdagManager,
|
||||
transactionValidator: transactionValidator,
|
||||
finalityManager: finalityManager,
|
||||
blockParentBuilder: blockParentBuilder,
|
||||
pruningManager: pruningManager,
|
||||
|
||||
acceptanceDataStore: acceptanceDataStore,
|
||||
@ -235,7 +238,7 @@ func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea) ([]exter
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []externalapi.BlockLevelParents{virtualBlockRelations.Parents}, nil
|
||||
return bb.blockParentBuilder.BuildParents(stagingArea, virtualBlockRelations.Parents)
|
||||
}
|
||||
|
||||
func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) {
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/pkg/errors"
|
||||
"math/big"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type testBlockBuilder struct {
|
||||
@ -82,7 +83,16 @@ func (bb *testBlockBuilder) buildUTXOInvalidHeader(stagingArea *model.StagingAre
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parents := []externalapi.BlockLevelParents{parentHashes}
|
||||
parents, err := bb.blockParentBuilder.BuildParents(stagingArea, parentHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, blockLevelParents := range parents {
|
||||
sort.Slice(blockLevelParents, func(i, j int) bool {
|
||||
return blockLevelParents[i].Less(blockLevelParents[j])
|
||||
})
|
||||
}
|
||||
|
||||
bb.nonceCounter++
|
||||
return blockheader.NewImmutableBlockHeader(
|
||||
|
@ -0,0 +1,219 @@
|
||||
package blockparentbuilder
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type blockParentBuilder struct {
|
||||
databaseContext model.DBManager
|
||||
blockHeaderStore model.BlockHeaderStore
|
||||
dagTopologyManager model.DAGTopologyManager
|
||||
reachabilityDataStore model.ReachabilityDataStore
|
||||
pruningStore model.PruningStore
|
||||
}
|
||||
|
||||
// New creates a new instance of a BlockParentBuilder
|
||||
func New(
|
||||
databaseContext model.DBManager,
|
||||
blockHeaderStore model.BlockHeaderStore,
|
||||
dagTopologyManager model.DAGTopologyManager,
|
||||
reachabilityDataStore model.ReachabilityDataStore,
|
||||
pruningStore model.PruningStore,
|
||||
) model.BlockParentBuilder {
|
||||
return &blockParentBuilder{
|
||||
databaseContext: databaseContext,
|
||||
blockHeaderStore: blockHeaderStore,
|
||||
dagTopologyManager: dagTopologyManager,
|
||||
reachabilityDataStore: reachabilityDataStore,
|
||||
pruningStore: pruningStore,
|
||||
}
|
||||
}
|
||||
|
||||
func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
|
||||
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
|
||||
|
||||
// Late on we'll mutate direct parent hashes, so we first clone it.
|
||||
directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes))
|
||||
copy(directParentHashesCopy, directParentHashes)
|
||||
|
||||
pruningPoint, err := bpb.pruningStore.PruningPoint(bpb.databaseContext, stagingArea)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The first candidates to be added should be from a parent in the future of the pruning
|
||||
// point, so later on we'll know that every block that doesn't have reachability data
|
||||
// (i.e. pruned) is necessarily in the past of the current candidates and cannot be
|
||||
// considered as a valid candidate.
|
||||
// This is why we sort the direct parent headers in a way that the first one will be
|
||||
// in the future of the pruning point.
|
||||
directParentHeaders := make([]externalapi.BlockHeader, len(directParentHashesCopy))
|
||||
firstParentInFutureOfPruningPointIndex := 0
|
||||
foundFirstParentInFutureOfPruningPoint := false
|
||||
for i, directParentHash := range directParentHashesCopy {
|
||||
isInFutureOfPruningPoint, err := bpb.dagTopologyManager.IsAncestorOf(stagingArea, pruningPoint, directParentHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isInFutureOfPruningPoint {
|
||||
continue
|
||||
}
|
||||
|
||||
firstParentInFutureOfPruningPointIndex = i
|
||||
foundFirstParentInFutureOfPruningPoint = true
|
||||
break
|
||||
}
|
||||
|
||||
if !foundFirstParentInFutureOfPruningPoint {
|
||||
return nil, errors.New("BuildParents should get at least one parent in the future of the pruning point")
|
||||
}
|
||||
|
||||
oldFirstDirectParent := directParentHashesCopy[0]
|
||||
directParentHashesCopy[0] = directParentHashesCopy[firstParentInFutureOfPruningPointIndex]
|
||||
directParentHashesCopy[firstParentInFutureOfPruningPointIndex] = oldFirstDirectParent
|
||||
|
||||
for i, directParentHash := range directParentHashesCopy {
|
||||
directParentHeader, err := bpb.blockHeaderStore.BlockHeader(bpb.databaseContext, stagingArea, directParentHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directParentHeaders[i] = directParentHeader
|
||||
}
|
||||
|
||||
type blockToReferences map[externalapi.DomainHash][]*externalapi.DomainHash
|
||||
candidatesByLevelToReferenceBlocksMap := make(map[int]blockToReferences)
|
||||
|
||||
// Direct parents are guaranteed to be in one other's anticones so add them all to
|
||||
// all the block levels they occupy
|
||||
for _, directParentHeader := range directParentHeaders {
|
||||
directParentHash := consensushashing.HeaderHash(directParentHeader)
|
||||
proofOfWorkValue := pow.CalculateProofOfWorkValue(directParentHeader.ToMutable())
|
||||
for blockLevel := 0; ; blockLevel++ {
|
||||
if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists {
|
||||
candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
|
||||
}
|
||||
candidatesByLevelToReferenceBlocksMap[blockLevel][*directParentHash] = []*externalapi.DomainHash{directParentHash}
|
||||
if proofOfWorkValue.Bit(blockLevel+1) != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
virtualGenesisChildren, err := bpb.dagTopologyManager.Children(stagingArea, model.VirtualGenesisBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
virtualGenesisChildrenHeaders := make(map[externalapi.DomainHash]externalapi.BlockHeader, len(virtualGenesisChildren))
|
||||
for _, child := range virtualGenesisChildren {
|
||||
virtualGenesisChildrenHeaders[*child], err = bpb.blockHeaderStore.BlockHeader(bpb.databaseContext, stagingArea, child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, directParentHeader := range directParentHeaders {
|
||||
for blockLevel, blockLevelParentsInHeader := range directParentHeader.Parents() {
|
||||
isEmptyLevel := false
|
||||
if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists {
|
||||
candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
|
||||
isEmptyLevel = true
|
||||
}
|
||||
|
||||
for _, parent := range blockLevelParentsInHeader {
|
||||
hasReachabilityData, err := bpb.reachabilityDataStore.HasReachabilityData(bpb.databaseContext, stagingArea, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reference blocks are the blocks that are used in reachability queries to check if
|
||||
// a candidate is in the future of another candidate. In most cases this is just the
|
||||
// block itself, but in the case where a block doesn't have reachability data we need
|
||||
// to use some blocks in its future as reference instead.
|
||||
// If we make sure to add a parent in the future of the pruning point first, we can
|
||||
// know that any pruned candidate that is in the past of some blocks in the pruning
|
||||
// point anticone should have should be a parent (in the relevant level) of one of
|
||||
// the virtual genesis children in the pruning point anticone. So we can check which
|
||||
// virtual genesis children have this block as parent and use those block as
|
||||
// reference blocks.
|
||||
var referenceBlocks []*externalapi.DomainHash
|
||||
if hasReachabilityData {
|
||||
referenceBlocks = []*externalapi.DomainHash{parent}
|
||||
} else {
|
||||
for childHash, childHeader := range virtualGenesisChildrenHeaders {
|
||||
childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse
|
||||
if childHeader.ParentsAtLevel(blockLevel).Contains(parent) {
|
||||
referenceBlocks = append(referenceBlocks, &childHash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if isEmptyLevel {
|
||||
candidatesByLevelToReferenceBlocksMap[blockLevel][*parent] = referenceBlocks
|
||||
continue
|
||||
}
|
||||
|
||||
if !hasReachabilityData {
|
||||
continue
|
||||
}
|
||||
|
||||
toRemove := hashset.New()
|
||||
isAncestorOfAnyCandidate := false
|
||||
for candidate, candidateReferences := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
|
||||
candidate := candidate // Assign to a new pointer to avoid `range` pointer reuse
|
||||
isInFutureOfCurrentCandidate, err := bpb.dagTopologyManager.IsAnyAncestorOf(stagingArea, candidateReferences, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isInFutureOfCurrentCandidate {
|
||||
toRemove.Add(&candidate)
|
||||
continue
|
||||
}
|
||||
|
||||
if isAncestorOfAnyCandidate {
|
||||
continue
|
||||
}
|
||||
|
||||
isAncestorOfCurrentCandidate, err := bpb.dagTopologyManager.IsAncestorOfAny(stagingArea, parent, candidateReferences)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isAncestorOfCurrentCandidate {
|
||||
isAncestorOfAnyCandidate = true
|
||||
}
|
||||
}
|
||||
|
||||
if toRemove.Length() > 0 {
|
||||
for hash := range toRemove {
|
||||
delete(candidatesByLevelToReferenceBlocksMap[blockLevel], hash)
|
||||
}
|
||||
}
|
||||
|
||||
// We should add the block as a candidate if it's in the future of another candidate
|
||||
// or in the anticone of all candidates.
|
||||
if !isAncestorOfAnyCandidate || toRemove.Length() > 0 {
|
||||
candidatesByLevelToReferenceBlocksMap[blockLevel][*parent] = referenceBlocks
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap))
|
||||
for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ {
|
||||
levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel]))
|
||||
for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
|
||||
block := block // Assign to a new pointer to avoid `range` pointer reuse
|
||||
levelBlocks = append(levelBlocks, &block)
|
||||
}
|
||||
parents[blockLevel] = levelBlocks
|
||||
}
|
||||
return parents, nil
|
||||
}
|
@ -26,7 +26,6 @@ func addBlock(tc testapi.TestConsensus, parentHashes []*externalapi.DomainHash,
|
||||
}
|
||||
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
|
||||
_, err = tc.ValidateAndInsertBlock(block, true)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateAndInsertBlock: %+v", err)
|
||||
@ -75,7 +74,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
|
||||
t.Fatalf("GetHashesBetween: %+v", err)
|
||||
}
|
||||
|
||||
for _, blocksHash := range missingHeaderHashes {
|
||||
for i, blocksHash := range missingHeaderHashes {
|
||||
blockInfo, err := tcSyncee.GetBlockInfo(blocksHash)
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockInfo: %+v", err)
|
||||
@ -92,7 +91,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
|
||||
|
||||
_, err = tcSyncee.ValidateAndInsertBlock(&externalapi.DomainBlock{Header: header}, false)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateAndInsertBlock: %+v", err)
|
||||
t.Fatalf("ValidateAndInsertBlock %d: %+v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -68,6 +68,13 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
|
||||
}
|
||||
}
|
||||
|
||||
if !isBlockWithTrustedData {
|
||||
err = v.checkIndirectParents(stagingArea, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = v.mergeDepthManager.CheckBoundedMergeDepth(stagingArea, blockHash, isBlockWithTrustedData)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -183,6 +190,19 @@ func (v *blockValidator) checkMergeSizeLimit(stagingArea *model.StagingArea, has
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error {
|
||||
expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DirectParents())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
areParentsEqual := externalapi.ParentsEqual(header.Parents(), expectedParents)
|
||||
if !areParentsEqual {
|
||||
return errors.Wrapf(ruleerrors.ErrUnexpectedParents, "unexpected indirect block parents")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *blockValidator) checkDAAScore(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash,
|
||||
header externalapi.BlockHeader) error {
|
||||
|
||||
|
@ -35,6 +35,7 @@ type blockValidator struct {
|
||||
pruningStore model.PruningStore
|
||||
reachabilityManager model.ReachabilityManager
|
||||
finalityManager model.FinalityManager
|
||||
blockParentBuilder model.BlockParentBuilder
|
||||
pruningManager model.PruningManager
|
||||
|
||||
blockStore model.BlockStore
|
||||
@ -69,6 +70,7 @@ func New(powMax *big.Int,
|
||||
mergeDepthManager model.MergeDepthManager,
|
||||
reachabilityManager model.ReachabilityManager,
|
||||
finalityManager model.FinalityManager,
|
||||
blockParentBuilder model.BlockParentBuilder,
|
||||
pruningManager model.PruningManager,
|
||||
|
||||
pruningStore model.PruningStore,
|
||||
@ -104,6 +106,7 @@ func New(powMax *big.Int,
|
||||
mergeDepthManager: mergeDepthManager,
|
||||
reachabilityManager: reachabilityManager,
|
||||
finalityManager: finalityManager,
|
||||
blockParentBuilder: blockParentBuilder,
|
||||
pruningManager: pruningManager,
|
||||
|
||||
pruningStore: pruningStore,
|
||||
|
@ -75,7 +75,7 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
|
||||
|
||||
if !exists {
|
||||
if !isBlockWithTrustedData {
|
||||
return errors.Errorf("only block with prefilled information can have some missing parents")
|
||||
return errors.Errorf("direct parent %s is missing: only block with prefilled information can have some missing parents", currentParent)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -132,37 +132,37 @@ func TestBlockWindow(t *testing.T) {
|
||||
{
|
||||
parents: []string{"H", "F"},
|
||||
id: "I",
|
||||
expectedWindow: []string{"F", "C", "D", "H", "B", "G"},
|
||||
expectedWindow: []string{"F", "C", "H", "D", "B", "G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "J",
|
||||
expectedWindow: []string{"I", "F", "C", "D", "H", "B", "G"},
|
||||
expectedWindow: []string{"I", "F", "C", "H", "D", "B", "G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J"},
|
||||
id: "K",
|
||||
expectedWindow: []string{"J", "I", "F", "C", "D", "H", "B", "G"},
|
||||
expectedWindow: []string{"J", "I", "F", "C", "H", "D", "B", "G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K"},
|
||||
id: "L",
|
||||
expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "B", "G"},
|
||||
expectedWindow: []string{"K", "J", "I", "F", "C", "H", "D", "B", "G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L"},
|
||||
id: "M",
|
||||
expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "B", "G"},
|
||||
expectedWindow: []string{"L", "K", "J", "I", "F", "C", "H", "D", "B", "G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "N",
|
||||
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "B"},
|
||||
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "H", "D", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N"},
|
||||
id: "O",
|
||||
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"},
|
||||
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "H", "D"},
|
||||
},
|
||||
},
|
||||
dagconfig.DevnetParams.Name: {
|
||||
@ -184,12 +184,12 @@ func TestBlockWindow(t *testing.T) {
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "E",
|
||||
expectedWindow: []string{"D", "C", "B"},
|
||||
expectedWindow: []string{"C", "D", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "F",
|
||||
expectedWindow: []string{"D", "C", "B"},
|
||||
expectedWindow: []string{"C", "D", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
@ -204,37 +204,37 @@ func TestBlockWindow(t *testing.T) {
|
||||
{
|
||||
parents: []string{"H", "F"},
|
||||
id: "I",
|
||||
expectedWindow: []string{"F", "H", "D", "C", "B", "G"},
|
||||
expectedWindow: []string{"F", "C", "D", "H", "G", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "J",
|
||||
expectedWindow: []string{"I", "F", "H", "D", "C", "B", "G"},
|
||||
expectedWindow: []string{"I", "F", "C", "D", "H", "G", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J"},
|
||||
id: "K",
|
||||
expectedWindow: []string{"J", "I", "F", "H", "D", "C", "B", "G"},
|
||||
expectedWindow: []string{"J", "I", "F", "C", "D", "H", "G", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K"},
|
||||
id: "L",
|
||||
expectedWindow: []string{"K", "J", "I", "F", "H", "D", "C", "B", "G"},
|
||||
expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "G", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L"},
|
||||
id: "M",
|
||||
expectedWindow: []string{"L", "K", "J", "I", "F", "H", "D", "C", "B", "G"},
|
||||
expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "G", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "N",
|
||||
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "H", "D", "C", "B"},
|
||||
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N"},
|
||||
id: "O",
|
||||
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "H", "D", "C"},
|
||||
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"},
|
||||
},
|
||||
},
|
||||
dagconfig.SimnetParams.Name: {
|
||||
@ -276,37 +276,37 @@ func TestBlockWindow(t *testing.T) {
|
||||
{
|
||||
parents: []string{"H", "F"},
|
||||
id: "I",
|
||||
expectedWindow: []string{"F", "D", "H", "C", "G", "B"},
|
||||
expectedWindow: []string{"F", "D", "C", "H", "G", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "J",
|
||||
expectedWindow: []string{"I", "F", "D", "H", "C", "G", "B"},
|
||||
expectedWindow: []string{"I", "F", "D", "C", "H", "G", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J"},
|
||||
id: "K",
|
||||
expectedWindow: []string{"J", "I", "F", "D", "H", "C", "G", "B"},
|
||||
expectedWindow: []string{"J", "I", "F", "D", "C", "H", "G", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K"},
|
||||
id: "L",
|
||||
expectedWindow: []string{"K", "J", "I", "F", "D", "H", "C", "G", "B"},
|
||||
expectedWindow: []string{"K", "J", "I", "F", "D", "C", "H", "G", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L"},
|
||||
id: "M",
|
||||
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "H", "C", "G", "B"},
|
||||
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "C", "H", "G", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "N",
|
||||
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "H", "C", "G"},
|
||||
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "C", "H", "G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N"},
|
||||
id: "O",
|
||||
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "H", "C"},
|
||||
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "H"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -37,8 +37,8 @@ func TestPruning(t *testing.T) {
|
||||
},
|
||||
"dag-for-test-pruning.json": {
|
||||
dagconfig.MainnetParams.Name: "502",
|
||||
dagconfig.TestnetParams.Name: "502",
|
||||
dagconfig.DevnetParams.Name: "503",
|
||||
dagconfig.TestnetParams.Name: "503",
|
||||
dagconfig.DevnetParams.Name: "502",
|
||||
dagconfig.SimnetParams.Name: "502",
|
||||
},
|
||||
}
|
||||
|
@ -200,6 +200,8 @@ var (
|
||||
//ErrPruningPointViolation indicates that the pruning point isn't in the block past.
|
||||
ErrPruningPointViolation = newRuleError("ErrPruningPointViolation")
|
||||
|
||||
ErrUnexpectedParents = newRuleError("ErrUnexpectedParents")
|
||||
|
||||
ErrUnexpectedPruningPoint = newRuleError("ErrUnexpectedPruningPoint")
|
||||
|
||||
ErrInvalidPruningPointsChain = newRuleError("ErrInvalidPruningPointsChain")
|
||||
|
@ -56,11 +56,16 @@ func (bh *blockHeader) Parents() []externalapi.BlockLevelParents {
|
||||
return bh.parents
|
||||
}
|
||||
|
||||
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
|
||||
if len(bh.parents) == 0 {
|
||||
func (bh *blockHeader) ParentsAtLevel(level int) externalapi.BlockLevelParents {
|
||||
if len(bh.parents) <= level {
|
||||
return externalapi.BlockLevelParents{}
|
||||
}
|
||||
return bh.parents[0]
|
||||
|
||||
return bh.parents[level]
|
||||
}
|
||||
|
||||
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
|
||||
return bh.ParentsAtLevel(0)
|
||||
}
|
||||
|
||||
func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash {
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network
|
||||
func CheckProofOfWorkWithTarget(header externalapi.MutableBlockHeader, target *big.Int) bool {
|
||||
// The block pow must be less than the claimed target
|
||||
powNum := calcPowValue(header)
|
||||
powNum := CalculateProofOfWorkValue(header)
|
||||
|
||||
// The block hash must be less or equal than the claimed target.
|
||||
return powNum.Cmp(target) <= 0
|
||||
@ -27,7 +27,8 @@ func CheckProofOfWorkByBits(header externalapi.MutableBlockHeader) bool {
|
||||
return CheckProofOfWorkWithTarget(header, difficulty.CompactToBig(header.Bits()))
|
||||
}
|
||||
|
||||
func calcPowValue(header externalapi.MutableBlockHeader) *big.Int {
|
||||
// CalculateProofOfWorkValue hashes the given header and returns its big.Int value
|
||||
func CalculateProofOfWorkValue(header externalapi.MutableBlockHeader) *big.Int {
|
||||
// Zero out the time and nonce.
|
||||
timestamp, nonce := header.TimeInMilliseconds(), header.Nonce()
|
||||
header.SetTimeInMilliseconds(0)
|
||||
|
@ -3,6 +3,7 @@ package staging
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// CommitAllChanges creates a transaction in `databaseContext`, and commits all changes in `stagingArea` through it.
|
||||
@ -22,3 +23,10 @@ func CommitAllChanges(databaseContext model.DBManager, stagingArea *model.Stagin
|
||||
|
||||
return dbTx.Commit()
|
||||
}
|
||||
|
||||
var lastShardingID uint64
|
||||
|
||||
// GenerateShardingID generates a unique staging sharding ID.
|
||||
func GenerateShardingID() model.StagingShardID {
|
||||
return model.StagingShardID(atomic.AddUint64(&lastShardingID, 1))
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user