Validate each level parents (#1827)

* Create BlockParentBuilder.

* Implement BuildParents.

* Explictly set level 0 blocks to be the same as direct parents.

* Add checkIndirectParents to validateBlockHeaderInContext.

* Fix test_block_builder.go and BlockLevelParents::Equal.

* Don't check indirect parents for blocks with trusted data.

* Handle pruned blocks when building block level parents.

* Fix bad deletions from unprocessedXxxParents.

* Fix merge errors.

* Fix bad pruning point parent replaces.

* Fix duplicates in newBlockLevelParents.

* Skip checkIndirectParents

* Get rid of staging constant IDs

* Fix BuildParents

* Fix tests

* Add comments

* Change order of directParentHashes

* Get rid of maybeAddDirectParentParents

* Add comments

* Add blockToReferences type

* Use ParentsAtLevel

Co-authored-by: stasatdaglabs <stas@daglabs.com>
This commit is contained in:
Ori Newman 2021-09-13 14:22:00 +03:00 committed by GitHub
parent 0053ee788d
commit afaac28da1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 504 additions and 193 deletions

View File

@ -12,7 +12,7 @@ type acceptanceDataStagingShard struct {
} }
func (ads *acceptanceDataStore) stagingShard(stagingArea *model.StagingArea) *acceptanceDataStagingShard { func (ads *acceptanceDataStore) stagingShard(stagingArea *model.StagingArea) *acceptanceDataStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDAcceptanceData, func() model.StagingShard { return stagingArea.GetOrCreateShard(ads.shardID, func() model.StagingShard {
return &acceptanceDataStagingShard{ return &acceptanceDataStagingShard{
store: ads, store: ads,
toAdd: make(map[externalapi.DomainHash]externalapi.AcceptanceData), toAdd: make(map[externalapi.DomainHash]externalapi.AcceptanceData),

View File

@ -1,12 +1,11 @@
package acceptancedatastore package acceptancedatastore
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
) )
@ -14,15 +13,17 @@ var bucketName = []byte("acceptance-data")
// acceptanceDataStore represents a store of AcceptanceData // acceptanceDataStore represents a store of AcceptanceData
type acceptanceDataStore struct { type acceptanceDataStore struct {
shardID model.StagingShardID
cache *lrucache.LRUCache cache *lrucache.LRUCache
bucket model.DBBucket bucket model.DBBucket
} }
// New instantiates a new AcceptanceDataStore // New instantiates a new AcceptanceDataStore
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.AcceptanceDataStore { func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.AcceptanceDataStore {
return &acceptanceDataStore{ return &acceptanceDataStore{
shardID: staging.GenerateShardingID(),
cache: lrucache.New(cacheSize, preallocate), cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), bucket: prefixBucket.Bucket(bucketName),
} }
} }

View File

@ -12,7 +12,7 @@ type blockHeaderStagingShard struct {
} }
func (bhs *blockHeaderStore) stagingShard(stagingArea *model.StagingArea) *blockHeaderStagingShard { func (bhs *blockHeaderStore) stagingShard(stagingArea *model.StagingArea) *blockHeaderStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDBlockHeader, func() model.StagingShard { return stagingArea.GetOrCreateShard(bhs.shardID, func() model.StagingShard {
return &blockHeaderStagingShard{ return &blockHeaderStagingShard{
store: bhs, store: bhs,
toAdd: make(map[externalapi.DomainHash]externalapi.BlockHeader), toAdd: make(map[externalapi.DomainHash]externalapi.BlockHeader),

View File

@ -2,12 +2,11 @@ package blockheaderstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
) )
var bucketName = []byte("block-headers") var bucketName = []byte("block-headers")
@ -15,6 +14,7 @@ var countKeyName = []byte("block-headers-count")
// blockHeaderStore represents a store of blocks // blockHeaderStore represents a store of blocks
type blockHeaderStore struct { type blockHeaderStore struct {
shardID model.StagingShardID
cache *lrucache.LRUCache cache *lrucache.LRUCache
countCached uint64 countCached uint64
bucket model.DBBucket bucket model.DBBucket
@ -22,11 +22,12 @@ type blockHeaderStore struct {
} }
// New instantiates a new BlockHeaderStore // New instantiates a new BlockHeaderStore
func New(dbContext model.DBReader, prefix *prefix.Prefix, cacheSize int, preallocate bool) (model.BlockHeaderStore, error) { func New(dbContext model.DBReader, prefixBucket model.DBBucket, cacheSize int, preallocate bool) (model.BlockHeaderStore, error) {
blockHeaderStore := &blockHeaderStore{ blockHeaderStore := &blockHeaderStore{
shardID: staging.GenerateShardingID(),
cache: lrucache.New(cacheSize, preallocate), cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), bucket: prefixBucket.Bucket(bucketName),
countKey: database.MakeBucket(prefix.Serialize()).Key(countKeyName), countKey: prefixBucket.Key(countKeyName),
} }
err := blockHeaderStore.initializeCount(dbContext) err := blockHeaderStore.initializeCount(dbContext)

View File

@ -11,7 +11,7 @@ type blockRelationStagingShard struct {
} }
func (brs *blockRelationStore) stagingShard(stagingArea *model.StagingArea) *blockRelationStagingShard { func (brs *blockRelationStore) stagingShard(stagingArea *model.StagingArea) *blockRelationStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDBlockRelation, func() model.StagingShard { return stagingArea.GetOrCreateShard(brs.shardID, func() model.StagingShard {
return &blockRelationStagingShard{ return &blockRelationStagingShard{
store: brs, store: brs,
toAdd: make(map[externalapi.DomainHash]*model.BlockRelations), toAdd: make(map[externalapi.DomainHash]*model.BlockRelations),

View File

@ -2,27 +2,28 @@ package blockrelationstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
) )
var bucketName = []byte("block-relations") var bucketName = []byte("block-relations")
// blockRelationStore represents a store of BlockRelations // blockRelationStore represents a store of BlockRelations
type blockRelationStore struct { type blockRelationStore struct {
shardID model.StagingShardID
cache *lrucache.LRUCache cache *lrucache.LRUCache
bucket model.DBBucket bucket model.DBBucket
} }
// New instantiates a new BlockRelationStore // New instantiates a new BlockRelationStore
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.BlockRelationStore { func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlockRelationStore {
return &blockRelationStore{ return &blockRelationStore{
shardID: staging.GenerateShardingID(),
cache: lrucache.New(cacheSize, preallocate), cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), bucket: prefixBucket.Bucket(bucketName),
} }
} }

View File

@ -11,7 +11,7 @@ type blockStatusStagingShard struct {
} }
func (bss *blockStatusStore) stagingShard(stagingArea *model.StagingArea) *blockStatusStagingShard { func (bss *blockStatusStore) stagingShard(stagingArea *model.StagingArea) *blockStatusStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDBlockStatus, func() model.StagingShard { return stagingArea.GetOrCreateShard(bss.shardID, func() model.StagingShard {
return &blockStatusStagingShard{ return &blockStatusStagingShard{
store: bss, store: bss,
toAdd: make(map[externalapi.DomainHash]externalapi.BlockStatus), toAdd: make(map[externalapi.DomainHash]externalapi.BlockStatus),

View File

@ -2,27 +2,28 @@ package blockstatusstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
) )
var bucketName = []byte("block-statuses") var bucketName = []byte("block-statuses")
// blockStatusStore represents a store of BlockStatuses // blockStatusStore represents a store of BlockStatuses
type blockStatusStore struct { type blockStatusStore struct {
shardID model.StagingShardID
cache *lrucache.LRUCache cache *lrucache.LRUCache
bucket model.DBBucket bucket model.DBBucket
} }
// New instantiates a new BlockStatusStore // New instantiates a new BlockStatusStore
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.BlockStatusStore { func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlockStatusStore {
return &blockStatusStore{ return &blockStatusStore{
shardID: staging.GenerateShardingID(),
cache: lrucache.New(cacheSize, preallocate), cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), bucket: prefixBucket.Bucket(bucketName),
} }
} }

View File

@ -12,7 +12,7 @@ type blockStagingShard struct {
} }
func (bs *blockStore) stagingShard(stagingArea *model.StagingArea) *blockStagingShard { func (bs *blockStore) stagingShard(stagingArea *model.StagingArea) *blockStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDBlock, func() model.StagingShard { return stagingArea.GetOrCreateShard(bs.shardID, func() model.StagingShard {
return &blockStagingShard{ return &blockStagingShard{
store: bs, store: bs,
toAdd: make(map[externalapi.DomainHash]*externalapi.DomainBlock), toAdd: make(map[externalapi.DomainHash]*externalapi.DomainBlock),

View File

@ -2,12 +2,11 @@ package blockstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -15,6 +14,7 @@ var bucketName = []byte("blocks")
// blockStore represents a store of blocks // blockStore represents a store of blocks
type blockStore struct { type blockStore struct {
shardID model.StagingShardID
cache *lrucache.LRUCache cache *lrucache.LRUCache
countCached uint64 countCached uint64
bucket model.DBBucket bucket model.DBBucket
@ -22,11 +22,12 @@ type blockStore struct {
} }
// New instantiates a new BlockStore // New instantiates a new BlockStore
func New(dbContext model.DBReader, prefix *prefix.Prefix, cacheSize int, preallocate bool) (model.BlockStore, error) { func New(dbContext model.DBReader, prefixBucket model.DBBucket, cacheSize int, preallocate bool) (model.BlockStore, error) {
blockStore := &blockStore{ blockStore := &blockStore{
shardID: staging.GenerateShardingID(),
cache: lrucache.New(cacheSize, preallocate), cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), bucket: prefixBucket.Bucket(bucketName),
countKey: database.MakeBucket(prefix.Serialize()).Key([]byte("blocks-count")), countKey: prefixBucket.Key([]byte("blocks-count")),
} }
err := blockStore.initializeCount(dbContext) err := blockStore.initializeCount(dbContext)

View File

@ -12,7 +12,7 @@ type consensusStateStagingShard struct {
} }
func (bs *consensusStateStore) stagingShard(stagingArea *model.StagingArea) *consensusStateStagingShard { func (bs *consensusStateStore) stagingShard(stagingArea *model.StagingArea) *consensusStateStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDConsensusState, func() model.StagingShard { return stagingArea.GetOrCreateShard(bs.shardID, func() model.StagingShard {
return &consensusStateStagingShard{ return &consensusStateStagingShard{
store: bs, store: bs,
tipsStaging: nil, tipsStaging: nil,

View File

@ -1,17 +1,17 @@
package consensusstatestore package consensusstatestore
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxolrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/utxolrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
) )
var importingPruningPointUTXOSetKeyName = []byte("importing-pruning-point-utxo-set") var importingPruningPointUTXOSetKeyName = []byte("importing-pruning-point-utxo-set")
// consensusStateStore represents a store for the current consensus state // consensusStateStore represents a store for the current consensus state
type consensusStateStore struct { type consensusStateStore struct {
shardID model.StagingShardID
virtualUTXOSetCache *utxolrucache.LRUCache virtualUTXOSetCache *utxolrucache.LRUCache
tipsCache []*externalapi.DomainHash tipsCache []*externalapi.DomainHash
tipsKey model.DBKey tipsKey model.DBKey
@ -20,12 +20,13 @@ type consensusStateStore struct {
} }
// New instantiates a new ConsensusStateStore // New instantiates a new ConsensusStateStore
func New(prefix *prefix.Prefix, utxoSetCacheSize int, preallocate bool) model.ConsensusStateStore { func New(prefixBucket model.DBBucket, utxoSetCacheSize int, preallocate bool) model.ConsensusStateStore {
return &consensusStateStore{ return &consensusStateStore{
shardID: staging.GenerateShardingID(),
virtualUTXOSetCache: utxolrucache.New(utxoSetCacheSize, preallocate), virtualUTXOSetCache: utxolrucache.New(utxoSetCacheSize, preallocate),
tipsKey: database.MakeBucket(prefix.Serialize()).Key(tipsKeyName), tipsKey: prefixBucket.Key(tipsKeyName),
importingPruningPointUTXOSetKey: database.MakeBucket(prefix.Serialize()).Key(importingPruningPointUTXOSetKeyName), importingPruningPointUTXOSetKey: prefixBucket.Key(importingPruningPointUTXOSetKeyName),
utxoSetBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoSetBucketName), utxoSetBucket: prefixBucket.Bucket(utxoSetBucketName),
} }
} }

View File

@ -15,7 +15,7 @@ type daaBlocksStagingShard struct {
} }
func (daas *daaBlocksStore) stagingShard(stagingArea *model.StagingArea) *daaBlocksStagingShard { func (daas *daaBlocksStore) stagingShard(stagingArea *model.StagingArea) *daaBlocksStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDDAABlocks, func() model.StagingShard { return stagingArea.GetOrCreateShard(daas.shardID, func() model.StagingShard {
return &daaBlocksStagingShard{ return &daaBlocksStagingShard{
store: daas, store: daas,
daaScoreToAdd: make(map[externalapi.DomainHash]uint64), daaScoreToAdd: make(map[externalapi.DomainHash]uint64),

View File

@ -1,12 +1,11 @@
package daablocksstore package daablocksstore
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization" "github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
) )
var daaScoreBucketName = []byte("daa-score") var daaScoreBucketName = []byte("daa-score")
@ -14,6 +13,7 @@ var daaAddedBlocksBucketName = []byte("daa-added-blocks")
// daaBlocksStore represents a store of DAABlocksStore // daaBlocksStore represents a store of DAABlocksStore
type daaBlocksStore struct { type daaBlocksStore struct {
shardID model.StagingShardID
daaScoreLRUCache *lrucache.LRUCache daaScoreLRUCache *lrucache.LRUCache
daaAddedBlocksLRUCache *lrucache.LRUCache daaAddedBlocksLRUCache *lrucache.LRUCache
daaScoreBucket model.DBBucket daaScoreBucket model.DBBucket
@ -21,12 +21,13 @@ type daaBlocksStore struct {
} }
// New instantiates a new DAABlocksStore // New instantiates a new DAABlocksStore
func New(prefix *prefix.Prefix, daaScoreCacheSize int, daaAddedBlocksCacheSize int, preallocate bool) model.DAABlocksStore { func New(prefixBucket model.DBBucket, daaScoreCacheSize int, daaAddedBlocksCacheSize int, preallocate bool) model.DAABlocksStore {
return &daaBlocksStore{ return &daaBlocksStore{
shardID: staging.GenerateShardingID(),
daaScoreLRUCache: lrucache.New(daaScoreCacheSize, preallocate), daaScoreLRUCache: lrucache.New(daaScoreCacheSize, preallocate),
daaAddedBlocksLRUCache: lrucache.New(daaAddedBlocksCacheSize, preallocate), daaAddedBlocksLRUCache: lrucache.New(daaAddedBlocksCacheSize, preallocate),
daaScoreBucket: database.MakeBucket(prefix.Serialize()).Bucket(daaScoreBucketName), daaScoreBucket: prefixBucket.Bucket(daaScoreBucketName),
daaAddedBlocksBucket: database.MakeBucket(prefix.Serialize()).Bucket(daaAddedBlocksBucketName), daaAddedBlocksBucket: prefixBucket.Bucket(daaAddedBlocksBucketName),
} }
} }

View File

@ -25,7 +25,7 @@ type daaWindowStagingShard struct {
} }
func (daaws *daaWindowStore) stagingShard(stagingArea *model.StagingArea) *daaWindowStagingShard { func (daaws *daaWindowStore) stagingShard(stagingArea *model.StagingArea) *daaWindowStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDDAAWindow, func() model.StagingShard { return stagingArea.GetOrCreateShard(daaws.shardID, func() model.StagingShard {
return &daaWindowStagingShard{ return &daaWindowStagingShard{
store: daaws, store: daaws,
toAdd: make(map[dbKey]*externalapi.BlockGHOSTDAGDataHashPair), toAdd: make(map[dbKey]*externalapi.BlockGHOSTDAGDataHashPair),

View File

@ -3,26 +3,27 @@ package daawindowstore
import ( import (
"encoding/binary" "encoding/binary"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucachehashpairtoblockghostdagdatahashpair" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucachehashpairtoblockghostdagdatahashpair"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
) )
var bucketName = []byte("daa-window") var bucketName = []byte("daa-window")
type daaWindowStore struct { type daaWindowStore struct {
shardID model.StagingShardID
cache *lrucachehashpairtoblockghostdagdatahashpair.LRUCache cache *lrucachehashpairtoblockghostdagdatahashpair.LRUCache
bucket model.DBBucket bucket model.DBBucket
} }
// New instantiates a new BlocksWithTrustedDataDAAWindowStore // New instantiates a new BlocksWithTrustedDataDAAWindowStore
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.BlocksWithTrustedDataDAAWindowStore { func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlocksWithTrustedDataDAAWindowStore {
return &daaWindowStore{ return &daaWindowStore{
shardID: staging.GenerateShardingID(),
cache: lrucachehashpairtoblockghostdagdatahashpair.New(cacheSize, preallocate), cache: lrucachehashpairtoblockghostdagdatahashpair.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), bucket: prefixBucket.Bucket(bucketName),
} }
} }

View File

@ -11,7 +11,7 @@ type finalityStagingShard struct {
} }
func (fs *finalityStore) stagingShard(stagingArea *model.StagingArea) *finalityStagingShard { func (fs *finalityStore) stagingShard(stagingArea *model.StagingArea) *finalityStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDFinality, func() model.StagingShard { return stagingArea.GetOrCreateShard(fs.shardID, func() model.StagingShard {
return &finalityStagingShard{ return &finalityStagingShard{
store: fs, store: fs,
toAdd: make(map[externalapi.DomainHash]*externalapi.DomainHash), toAdd: make(map[externalapi.DomainHash]*externalapi.DomainHash),

View File

@ -1,25 +1,24 @@
package finalitystore package finalitystore
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
) )
var bucketName = []byte("finality-points") var bucketName = []byte("finality-points")
type finalityStore struct { type finalityStore struct {
shardID model.StagingShardID
cache *lrucache.LRUCache cache *lrucache.LRUCache
bucket model.DBBucket bucket model.DBBucket
} }
// New instantiates a new FinalityStore // New instantiates a new FinalityStore
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.FinalityStore { func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.FinalityStore {
return &finalityStore{ return &finalityStore{
cache: lrucache.New(cacheSize, preallocate), cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), bucket: prefixBucket.Bucket(bucketName),
} }
} }

View File

@ -23,7 +23,7 @@ type ghostdagDataStagingShard struct {
} }
func (gds *ghostdagDataStore) stagingShard(stagingArea *model.StagingArea) *ghostdagDataStagingShard { func (gds *ghostdagDataStore) stagingShard(stagingArea *model.StagingArea) *ghostdagDataStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDGHOSTDAG, func() model.StagingShard { return stagingArea.GetOrCreateShard(gds.shardID, func() model.StagingShard {
return &ghostdagDataStagingShard{ return &ghostdagDataStagingShard{
store: gds, store: gds,
toAdd: make(map[key]*externalapi.BlockGHOSTDAGData), toAdd: make(map[key]*externalapi.BlockGHOSTDAGData),

View File

@ -2,12 +2,11 @@ package ghostdagdatastore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheghostdagdata" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheghostdagdata"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
) )
var ghostdagDataBucketName = []byte("block-ghostdag-data") var ghostdagDataBucketName = []byte("block-ghostdag-data")
@ -15,17 +14,19 @@ var trustedDataBucketName = []byte("block-with-trusted-data-ghostdag-data")
// ghostdagDataStore represents a store of BlockGHOSTDAGData // ghostdagDataStore represents a store of BlockGHOSTDAGData
type ghostdagDataStore struct { type ghostdagDataStore struct {
shardID model.StagingShardID
cache *lrucacheghostdagdata.LRUCache cache *lrucacheghostdagdata.LRUCache
ghostdagDataBucket model.DBBucket ghostdagDataBucket model.DBBucket
trustedDataBucket model.DBBucket trustedDataBucket model.DBBucket
} }
// New instantiates a new GHOSTDAGDataStore // New instantiates a new GHOSTDAGDataStore
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.GHOSTDAGDataStore { func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.GHOSTDAGDataStore {
return &ghostdagDataStore{ return &ghostdagDataStore{
shardID: staging.GenerateShardingID(),
cache: lrucacheghostdagdata.New(cacheSize, preallocate), cache: lrucacheghostdagdata.New(cacheSize, preallocate),
ghostdagDataBucket: database.MakeBucket(prefix.Serialize()).Bucket(ghostdagDataBucketName), ghostdagDataBucket: prefixBucket.Bucket(ghostdagDataBucketName),
trustedDataBucket: database.MakeBucket(prefix.Serialize()).Bucket(trustedDataBucketName), trustedDataBucket: prefixBucket.Bucket(trustedDataBucketName),
} }
} }

View File

@ -15,7 +15,7 @@ type headersSelectedChainStagingShard struct {
} }
func (hscs *headersSelectedChainStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedChainStagingShard { func (hscs *headersSelectedChainStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedChainStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDHeadersSelectedChain, func() model.StagingShard { return stagingArea.GetOrCreateShard(hscs.shardID, func() model.StagingShard {
return &headersSelectedChainStagingShard{ return &headersSelectedChainStagingShard{
store: hscs, store: hscs,
addedByHash: make(map[externalapi.DomainHash]uint64), addedByHash: make(map[externalapi.DomainHash]uint64),

View File

@ -2,7 +2,7 @@ package headersselectedchainstore
import ( import (
"encoding/binary" "encoding/binary"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
"github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization" "github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
@ -18,6 +18,7 @@ var bucketChainBlockIndexByHashName = []byte("chain-block-index-by-hash")
var highestChainBlockIndexKeyName = []byte("highest-chain-block-index") var highestChainBlockIndexKeyName = []byte("highest-chain-block-index")
type headersSelectedChainStore struct { type headersSelectedChainStore struct {
shardID model.StagingShardID
cacheByIndex *lrucacheuint64tohash.LRUCache cacheByIndex *lrucacheuint64tohash.LRUCache
cacheByHash *lrucache.LRUCache cacheByHash *lrucache.LRUCache
cacheHighestChainBlockIndex uint64 cacheHighestChainBlockIndex uint64
@ -27,13 +28,14 @@ type headersSelectedChainStore struct {
} }
// New instantiates a new HeadersSelectedChainStore // New instantiates a new HeadersSelectedChainStore
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.HeadersSelectedChainStore { func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.HeadersSelectedChainStore {
return &headersSelectedChainStore{ return &headersSelectedChainStore{
shardID: staging.GenerateShardingID(),
cacheByIndex: lrucacheuint64tohash.New(cacheSize, preallocate), cacheByIndex: lrucacheuint64tohash.New(cacheSize, preallocate),
cacheByHash: lrucache.New(cacheSize, preallocate), cacheByHash: lrucache.New(cacheSize, preallocate),
bucketChainBlockHashByIndex: database.MakeBucket(prefix.Serialize()).Bucket(bucketChainBlockHashByIndexName), bucketChainBlockHashByIndex: prefixBucket.Bucket(bucketChainBlockHashByIndexName),
bucketChainBlockIndexByHash: database.MakeBucket(prefix.Serialize()).Bucket(bucketChainBlockIndexByHashName), bucketChainBlockIndexByHash: prefixBucket.Bucket(bucketChainBlockIndexByHashName),
highestChainBlockIndexKey: database.MakeBucket(prefix.Serialize()).Key(highestChainBlockIndexKeyName), highestChainBlockIndexKey: prefixBucket.Key(highestChainBlockIndexKeyName),
} }
} }

View File

@ -11,7 +11,7 @@ type headersSelectedTipStagingShard struct {
} }
func (hsts *headerSelectedTipStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedTipStagingShard { func (hsts *headerSelectedTipStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedTipStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDHeadersSelectedTip, func() model.StagingShard { return stagingArea.GetOrCreateShard(hsts.shardID, func() model.StagingShard {
return &headersSelectedTipStagingShard{ return &headersSelectedTipStagingShard{
store: hsts, store: hsts,
newSelectedTip: nil, newSelectedTip: nil,

View File

@ -2,24 +2,25 @@ package headersselectedtipstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
) )
var keyName = []byte("headers-selected-tip") var keyName = []byte("headers-selected-tip")
type headerSelectedTipStore struct { type headerSelectedTipStore struct {
shardID model.StagingShardID
cache *externalapi.DomainHash cache *externalapi.DomainHash
key model.DBKey key model.DBKey
} }
// New instantiates a new HeaderSelectedTipStore // New instantiates a new HeaderSelectedTipStore
func New(prefix *prefix.Prefix) model.HeaderSelectedTipStore { func New(prefixBucket model.DBBucket) model.HeaderSelectedTipStore {
return &headerSelectedTipStore{ return &headerSelectedTipStore{
key: database.MakeBucket(prefix.Serialize()).Key(keyName), shardID: staging.GenerateShardingID(),
key: prefixBucket.Key(keyName),
} }
} }

View File

@ -12,7 +12,7 @@ type multisetStagingShard struct {
} }
func (ms *multisetStore) stagingShard(stagingArea *model.StagingArea) *multisetStagingShard { func (ms *multisetStore) stagingShard(stagingArea *model.StagingArea) *multisetStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDMultiset, func() model.StagingShard { return stagingArea.GetOrCreateShard(ms.shardID, func() model.StagingShard {
return &multisetStagingShard{ return &multisetStagingShard{
store: ms, store: ms,
toAdd: make(map[externalapi.DomainHash]model.Multiset), toAdd: make(map[externalapi.DomainHash]model.Multiset),

View File

@ -2,27 +2,28 @@ package multisetstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
) )
var bucketName = []byte("multisets") var bucketName = []byte("multisets")
// multisetStore represents a store of Multisets // multisetStore represents a store of Multisets
type multisetStore struct { type multisetStore struct {
shardID model.StagingShardID
cache *lrucache.LRUCache cache *lrucache.LRUCache
bucket model.DBBucket bucket model.DBBucket
} }
// New instantiates a new MultisetStore // New instantiates a new MultisetStore
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.MultisetStore { func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.MultisetStore {
return &multisetStore{ return &multisetStore{
shardID: staging.GenerateShardingID(),
cache: lrucache.New(cacheSize, preallocate), cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), bucket: prefixBucket.Bucket(bucketName),
} }
} }

View File

@ -15,7 +15,7 @@ type pruningStagingShard struct {
} }
func (ps *pruningStore) stagingShard(stagingArea *model.StagingArea) *pruningStagingShard { func (ps *pruningStore) stagingShard(stagingArea *model.StagingArea) *pruningStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDPruning, func() model.StagingShard { return stagingArea.GetOrCreateShard(ps.shardID, func() model.StagingShard {
return &pruningStagingShard{ return &pruningStagingShard{
store: ps, store: ps,
pruningPointByIndex: map[uint64]*externalapi.DomainHash{}, pruningPointByIndex: map[uint64]*externalapi.DomainHash{},

View File

@ -9,7 +9,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheuint64tohash" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheuint64tohash"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
) )
var currentPruningPointIndexKeyName = []byte("pruning-block-index") var currentPruningPointIndexKeyName = []byte("pruning-block-index")
@ -20,6 +20,7 @@ var pruningPointByIndexBucketName = []byte("pruning-point-by-index")
// pruningStore represents a store for the current pruning state // pruningStore represents a store for the current pruning state
type pruningStore struct { type pruningStore struct {
shardID model.StagingShardID
pruningPointByIndexCache *lrucacheuint64tohash.LRUCache pruningPointByIndexCache *lrucacheuint64tohash.LRUCache
currentPruningPointIndexCache *uint64 currentPruningPointIndexCache *uint64
pruningPointCandidateCache *externalapi.DomainHash pruningPointCandidateCache *externalapi.DomainHash
@ -34,16 +35,17 @@ type pruningStore struct {
} }
// New instantiates a new PruningStore // New instantiates a new PruningStore
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.PruningStore { func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.PruningStore {
return &pruningStore{ return &pruningStore{
shardID: staging.GenerateShardingID(),
pruningPointByIndexCache: lrucacheuint64tohash.New(cacheSize, preallocate), pruningPointByIndexCache: lrucacheuint64tohash.New(cacheSize, preallocate),
currentPruningPointIndexKey: database.MakeBucket(prefix.Serialize()).Key(currentPruningPointIndexKeyName), currentPruningPointIndexKey: prefixBucket.Key(currentPruningPointIndexKeyName),
candidatePruningPointHashKey: database.MakeBucket(prefix.Serialize()).Key(candidatePruningPointHashKeyName), candidatePruningPointHashKey: prefixBucket.Key(candidatePruningPointHashKeyName),
pruningPointUTXOSetBucket: database.MakeBucket(prefix.Serialize()).Bucket(pruningPointUTXOSetBucketName), pruningPointUTXOSetBucket: prefixBucket.Bucket(pruningPointUTXOSetBucketName),
importedPruningPointUTXOsBucket: database.MakeBucket(prefix.Serialize()).Bucket(importedPruningPointUTXOsBucketName), importedPruningPointUTXOsBucket: prefixBucket.Bucket(importedPruningPointUTXOsBucketName),
updatingPruningPointUTXOSetKey: database.MakeBucket(prefix.Serialize()).Key(updatingPruningPointUTXOSetKeyName), updatingPruningPointUTXOSetKey: prefixBucket.Key(updatingPruningPointUTXOSetKeyName),
importedPruningPointMultisetKey: database.MakeBucket(prefix.Serialize()).Key(importedPruningPointMultisetKeyName), importedPruningPointMultisetKey: prefixBucket.Key(importedPruningPointMultisetKeyName),
pruningPointByIndexBucket: database.MakeBucket(prefix.Serialize()).Bucket(pruningPointByIndexBucketName), pruningPointByIndexBucket: prefixBucket.Bucket(pruningPointByIndexBucketName),
} }
} }

View File

@ -12,7 +12,7 @@ type reachabilityDataStagingShard struct {
} }
func (rds *reachabilityDataStore) stagingShard(stagingArea *model.StagingArea) *reachabilityDataStagingShard { func (rds *reachabilityDataStore) stagingShard(stagingArea *model.StagingArea) *reachabilityDataStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDReachabilityData, func() model.StagingShard { return stagingArea.GetOrCreateShard(rds.shardID, func() model.StagingShard {
return &reachabilityDataStagingShard{ return &reachabilityDataStagingShard{
store: rds, store: rds,
reachabilityData: make(map[externalapi.DomainHash]model.ReachabilityData), reachabilityData: make(map[externalapi.DomainHash]model.ReachabilityData),

View File

@ -2,12 +2,11 @@ package reachabilitydatastore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
) )
var reachabilityDataBucketName = []byte("reachability-data") var reachabilityDataBucketName = []byte("reachability-data")
@ -15,6 +14,7 @@ var reachabilityReindexRootKeyName = []byte("reachability-reindex-root")
// reachabilityDataStore represents a store of ReachabilityData // reachabilityDataStore represents a store of ReachabilityData
type reachabilityDataStore struct { type reachabilityDataStore struct {
shardID model.StagingShardID
reachabilityDataCache *lrucache.LRUCache reachabilityDataCache *lrucache.LRUCache
reachabilityReindexRootCache *externalapi.DomainHash reachabilityReindexRootCache *externalapi.DomainHash
@ -23,11 +23,12 @@ type reachabilityDataStore struct {
} }
// New instantiates a new ReachabilityDataStore // New instantiates a new ReachabilityDataStore
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.ReachabilityDataStore { func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.ReachabilityDataStore {
return &reachabilityDataStore{ return &reachabilityDataStore{
shardID: staging.GenerateShardingID(),
reachabilityDataCache: lrucache.New(cacheSize, preallocate), reachabilityDataCache: lrucache.New(cacheSize, preallocate),
reachabilityDataBucket: database.MakeBucket(prefix.Serialize()).Bucket(reachabilityDataBucketName), reachabilityDataBucket: prefixBucket.Bucket(reachabilityDataBucketName),
reachabilityReindexRootKey: database.MakeBucket(prefix.Serialize()).Key(reachabilityReindexRootKeyName), reachabilityReindexRootKey: prefixBucket.Key(reachabilityReindexRootKeyName),
} }
} }

View File

@ -13,7 +13,7 @@ type utxoDiffStagingShard struct {
} }
func (uds *utxoDiffStore) stagingShard(stagingArea *model.StagingArea) *utxoDiffStagingShard { func (uds *utxoDiffStore) stagingShard(stagingArea *model.StagingArea) *utxoDiffStagingShard {
return stagingArea.GetOrCreateShard(model.StagingShardIDUTXODiff, func() model.StagingShard { return stagingArea.GetOrCreateShard(uds.shardID, func() model.StagingShard {
return &utxoDiffStagingShard{ return &utxoDiffStagingShard{
store: uds, store: uds,
utxoDiffToAdd: make(map[externalapi.DomainHash]externalapi.UTXODiff), utxoDiffToAdd: make(map[externalapi.DomainHash]externalapi.UTXODiff),

View File

@ -2,12 +2,11 @@ package utxodiffstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix" "github.com/kaspanet/kaspad/util/staging"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -16,6 +15,7 @@ var utxoDiffChildBucketName = []byte("utxo-diff-children")
// utxoDiffStore represents a store of UTXODiffs // utxoDiffStore represents a store of UTXODiffs
type utxoDiffStore struct { type utxoDiffStore struct {
shardID model.StagingShardID
utxoDiffCache *lrucache.LRUCache utxoDiffCache *lrucache.LRUCache
utxoDiffChildCache *lrucache.LRUCache utxoDiffChildCache *lrucache.LRUCache
utxoDiffBucket model.DBBucket utxoDiffBucket model.DBBucket
@ -23,12 +23,13 @@ type utxoDiffStore struct {
} }
// New instantiates a new UTXODiffStore // New instantiates a new UTXODiffStore
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.UTXODiffStore { func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.UTXODiffStore {
return &utxoDiffStore{ return &utxoDiffStore{
shardID: staging.GenerateShardingID(),
utxoDiffCache: lrucache.New(cacheSize, preallocate), utxoDiffCache: lrucache.New(cacheSize, preallocate),
utxoDiffChildCache: lrucache.New(cacheSize, preallocate), utxoDiffChildCache: lrucache.New(cacheSize, preallocate),
utxoDiffBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoDiffBucketName), utxoDiffBucket: prefixBucket.Bucket(utxoDiffBucketName),
utxoDiffChildBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoDiffChildBucketName), utxoDiffChildBucket: prefixBucket.Bucket(utxoDiffChildBucketName),
} }
} }

View File

@ -2,6 +2,7 @@ package consensus
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore" "github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore"
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
"io/ioutil" "io/ioutil"
"os" "os"
"sync" "sync"
@ -103,6 +104,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
externalapi.Consensus, error) { externalapi.Consensus, error) {
dbManager := consensusdatabase.New(db) dbManager := consensusdatabase.New(db)
prefixBucket := consensusdatabase.MakeBucket(dbPrefix.Serialize())
pruningWindowSizeForCaches := int(config.PruningDepth()) pruningWindowSizeForCaches := int(config.PruningDepth())
@ -118,24 +120,24 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
pruningWindowSizePlusFinalityDepthForCache := int(config.PruningDepth() + config.FinalityDepth()) pruningWindowSizePlusFinalityDepthForCache := int(config.PruningDepth() + config.FinalityDepth())
// Data Structures // Data Structures
daaWindowStore := daawindowstore.New(dbPrefix, 10_000, preallocateCaches) daaWindowStore := daawindowstore.New(prefixBucket, 10_000, preallocateCaches)
acceptanceDataStore := acceptancedatastore.New(dbPrefix, 200, preallocateCaches) acceptanceDataStore := acceptancedatastore.New(prefixBucket, 200, preallocateCaches)
blockStore, err := blockstore.New(dbManager, dbPrefix, 200, preallocateCaches) blockStore, err := blockstore.New(dbManager, prefixBucket, 200, preallocateCaches)
if err != nil { if err != nil {
return nil, err return nil, err
} }
blockHeaderStore, err := blockheaderstore.New(dbManager, dbPrefix, 10_000, preallocateCaches) blockHeaderStore, err := blockheaderstore.New(dbManager, prefixBucket, 10_000, preallocateCaches)
if err != nil { if err != nil {
return nil, err return nil, err
} }
blockRelationStore := blockrelationstore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches) blockRelationStore := blockrelationstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
blockStatusStore := blockstatusstore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches) blockStatusStore := blockstatusstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
multisetStore := multisetstore.New(dbPrefix, 200, preallocateCaches) multisetStore := multisetstore.New(prefixBucket, 200, preallocateCaches)
pruningStore := pruningstore.New(dbPrefix, 2, preallocateCaches) pruningStore := pruningstore.New(prefixBucket, 2, preallocateCaches)
reachabilityDataStore := reachabilitydatastore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches) reachabilityDataStore := reachabilitydatastore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
utxoDiffStore := utxodiffstore.New(dbPrefix, 200, preallocateCaches) utxoDiffStore := utxodiffstore.New(prefixBucket, 200, preallocateCaches)
consensusStateStore := consensusstatestore.New(dbPrefix, 10_000, preallocateCaches) consensusStateStore := consensusstatestore.New(prefixBucket, 10_000, preallocateCaches)
// Some tests artificially decrease the pruningWindowSize, thus making the GhostDagStore cache too small for a // Some tests artificially decrease the pruningWindowSize, thus making the GhostDagStore cache too small for a
// a single DifficultyAdjustmentWindow. To alleviate this problem we make sure that the cache size is at least // a single DifficultyAdjustmentWindow. To alleviate this problem we make sure that the cache size is at least
@ -144,12 +146,12 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize { if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize {
ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize
} }
ghostdagDataStore := ghostdagdatastore.New(dbPrefix, ghostdagDataCacheSize, preallocateCaches) ghostdagDataStore := ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches)
headersSelectedTipStore := headersselectedtipstore.New(dbPrefix) headersSelectedTipStore := headersselectedtipstore.New(prefixBucket)
finalityStore := finalitystore.New(dbPrefix, 200, preallocateCaches) finalityStore := finalitystore.New(prefixBucket, 200, preallocateCaches)
headersSelectedChainStore := headersselectedchainstore.New(dbPrefix, pruningWindowSizeForCaches, preallocateCaches) headersSelectedChainStore := headersselectedchainstore.New(prefixBucket, pruningWindowSizeForCaches, preallocateCaches)
daaBlocksStore := daablocksstore.New(dbPrefix, pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches) daaBlocksStore := daablocksstore.New(prefixBucket, pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches)
// Processes // Processes
reachabilityManager := reachabilitymanager.New( reachabilityManager := reachabilitymanager.New(
@ -161,6 +163,13 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
reachabilityManager, reachabilityManager,
blockRelationStore, blockRelationStore,
ghostdagDataStore) ghostdagDataStore)
blockParentBuilder := blockparentbuilder.New(
dbManager,
blockHeaderStore,
dagTopologyManager,
reachabilityDataStore,
pruningStore,
)
ghostdagManager := f.ghostdagConstructor( ghostdagManager := f.ghostdagConstructor(
dbManager, dbManager,
dagTopologyManager, dagTopologyManager,
@ -316,6 +325,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
mergeDepthManager, mergeDepthManager,
reachabilityManager, reachabilityManager,
finalityManager, finalityManager,
blockParentBuilder,
pruningManager, pruningManager,
pruningStore, pruningStore,
@ -355,6 +365,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
ghostdagManager, ghostdagManager,
transactionValidator, transactionValidator,
finalityManager, finalityManager,
blockParentBuilder,
pruningManager, pruningManager,
acceptanceDataStore, acceptanceDataStore,

View File

@ -58,6 +58,7 @@ type BlockHeader interface {
type BaseBlockHeader interface { type BaseBlockHeader interface {
Version() uint16 Version() uint16
Parents() []BlockLevelParents Parents() []BlockLevelParents
ParentsAtLevel(level int) BlockLevelParents
DirectParents() BlockLevelParents DirectParents() BlockLevelParents
HashMerkleRoot() *DomainHash HashMerkleRoot() *DomainHash
AcceptedIDMerkleRoot() *DomainHash AcceptedIDMerkleRoot() *DomainHash

View File

@ -6,7 +6,22 @@ type BlockLevelParents []*DomainHash
// Equal returns true if this BlockLevelParents is equal to `other` // Equal returns true if this BlockLevelParents is equal to `other`
func (sl BlockLevelParents) Equal(other BlockLevelParents) bool { func (sl BlockLevelParents) Equal(other BlockLevelParents) bool {
return HashesEqual(sl, other) if len(sl) != len(other) {
return false
}
for _, thisHash := range sl {
found := false
for _, otherHash := range other {
if thisHash.Equal(otherHash) {
found = true
break
}
}
if !found {
return false
}
}
return true
} }
// Clone creates a clone of this BlockLevelParents // Clone creates a clone of this BlockLevelParents
@ -14,6 +29,16 @@ func (sl BlockLevelParents) Clone() BlockLevelParents {
return CloneHashes(sl) return CloneHashes(sl)
} }
// Contains returns true if this BlockLevelParents contains the given blockHash
func (sl BlockLevelParents) Contains(blockHash *DomainHash) bool {
for _, blockLevelParent := range sl {
if blockLevelParent.Equal(blockHash) {
return true
}
}
return false
}
// ParentsEqual returns true if all the BlockLevelParents in `a` and `b` are // ParentsEqual returns true if all the BlockLevelParents in `a` and `b` are
// equal pairwise // equal pairwise
func ParentsEqual(a, b []BlockLevelParents) bool { func ParentsEqual(a, b []BlockLevelParents) bool {

View File

@ -0,0 +1,9 @@
package model
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// BlockParentBuilder exposes a method to build super-block parents for
// a given set of direct parents
type BlockParentBuilder interface {
BuildParents(stagingArea *StagingArea, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error)
}

View File

@ -9,29 +9,7 @@ type StagingShard interface {
} }
// StagingShardID is used to identify each of the store's staging shards // StagingShardID is used to identify each of the store's staging shards
type StagingShardID byte type StagingShardID uint64
// StagingShardID constants
const (
StagingShardIDAcceptanceData StagingShardID = iota
StagingShardIDBlockHeader
StagingShardIDBlockRelation
StagingShardIDBlockStatus
StagingShardIDBlock
StagingShardIDConsensusState
StagingShardIDDAABlocks
StagingShardIDFinality
StagingShardIDGHOSTDAG
StagingShardIDHeadersSelectedChain
StagingShardIDHeadersSelectedTip
StagingShardIDMultiset
StagingShardIDPruning
StagingShardIDReachabilityData
StagingShardIDUTXODiff
StagingShardIDDAAWindow
// Always leave StagingShardIDLen as the last constant
StagingShardIDLen
)
// StagingArea is single changeset inside the consensus database, similar to a transaction in a classic database. // StagingArea is single changeset inside the consensus database, similar to a transaction in a classic database.
// Each StagingArea consists of multiple StagingShards, one for each dataStore that has any changes within it. // Each StagingArea consists of multiple StagingShards, one for each dataStore that has any changes within it.
@ -41,16 +19,14 @@ const (
// When the StagingArea is being Committed, it goes over all it's shards, and commits those one-by-one. // When the StagingArea is being Committed, it goes over all it's shards, and commits those one-by-one.
// Since Commit happens in a DatabaseTransaction, a StagingArea is atomic. // Since Commit happens in a DatabaseTransaction, a StagingArea is atomic.
type StagingArea struct { type StagingArea struct {
// shards is deliberately an array and not a map, as an optimization - since it's being read a lot of time, and shards []StagingShard
// reads from maps are relatively slow.
shards [StagingShardIDLen]StagingShard
isCommitted bool isCommitted bool
} }
// NewStagingArea creates a new, empty staging area. // NewStagingArea creates a new, empty staging area.
func NewStagingArea() *StagingArea { func NewStagingArea() *StagingArea {
return &StagingArea{ return &StagingArea{
shards: [StagingShardIDLen]StagingShard{}, shards: []StagingShard{},
isCommitted: false, isCommitted: false,
} }
} }
@ -58,6 +34,9 @@ func NewStagingArea() *StagingArea {
// GetOrCreateShard attempts to retrieve a shard with the given name. // GetOrCreateShard attempts to retrieve a shard with the given name.
// If it does not exist - a new shard is created using `createFunc`. // If it does not exist - a new shard is created using `createFunc`.
func (sa *StagingArea) GetOrCreateShard(shardID StagingShardID, createFunc func() StagingShard) StagingShard { func (sa *StagingArea) GetOrCreateShard(shardID StagingShardID, createFunc func() StagingShard) StagingShard {
for uint64(len(sa.shards)) <= uint64(shardID) {
sa.shards = append(sa.shards, nil)
}
if sa.shards[shardID] == nil { if sa.shards[shardID] == nil {
sa.shards[shardID] = createFunc() sa.shards[shardID] = createFunc()
} }

View File

@ -29,6 +29,7 @@ type blockBuilder struct {
transactionValidator model.TransactionValidator transactionValidator model.TransactionValidator
finalityManager model.FinalityManager finalityManager model.FinalityManager
pruningManager model.PruningManager pruningManager model.PruningManager
blockParentBuilder model.BlockParentBuilder
acceptanceDataStore model.AcceptanceDataStore acceptanceDataStore model.AcceptanceDataStore
blockRelationStore model.BlockRelationStore blockRelationStore model.BlockRelationStore
@ -49,6 +50,7 @@ func New(
ghostdagManager model.GHOSTDAGManager, ghostdagManager model.GHOSTDAGManager,
transactionValidator model.TransactionValidator, transactionValidator model.TransactionValidator,
finalityManager model.FinalityManager, finalityManager model.FinalityManager,
blockParentBuilder model.BlockParentBuilder,
pruningManager model.PruningManager, pruningManager model.PruningManager,
acceptanceDataStore model.AcceptanceDataStore, acceptanceDataStore model.AcceptanceDataStore,
@ -69,6 +71,7 @@ func New(
ghostdagManager: ghostdagManager, ghostdagManager: ghostdagManager,
transactionValidator: transactionValidator, transactionValidator: transactionValidator,
finalityManager: finalityManager, finalityManager: finalityManager,
blockParentBuilder: blockParentBuilder,
pruningManager: pruningManager, pruningManager: pruningManager,
acceptanceDataStore: acceptanceDataStore, acceptanceDataStore: acceptanceDataStore,
@ -235,7 +238,7 @@ func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea) ([]exter
if err != nil { if err != nil {
return nil, err return nil, err
} }
return []externalapi.BlockLevelParents{virtualBlockRelations.Parents}, nil return bb.blockParentBuilder.BuildParents(stagingArea, virtualBlockRelations.Parents)
} }
func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) { func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) {

View File

@ -12,6 +12,7 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger" "github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors" "github.com/pkg/errors"
"math/big" "math/big"
"sort"
) )
type testBlockBuilder struct { type testBlockBuilder struct {
@ -82,7 +83,16 @@ func (bb *testBlockBuilder) buildUTXOInvalidHeader(stagingArea *model.StagingAre
return nil, err return nil, err
} }
parents := []externalapi.BlockLevelParents{parentHashes} parents, err := bb.blockParentBuilder.BuildParents(stagingArea, parentHashes)
if err != nil {
return nil, err
}
for _, blockLevelParents := range parents {
sort.Slice(blockLevelParents, func(i, j int) bool {
return blockLevelParents[i].Less(blockLevelParents[j])
})
}
bb.nonceCounter++ bb.nonceCounter++
return blockheader.NewImmutableBlockHeader( return blockheader.NewImmutableBlockHeader(

View File

@ -0,0 +1,219 @@
package blockparentbuilder
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/pkg/errors"
)
type blockParentBuilder struct {
databaseContext model.DBManager
blockHeaderStore model.BlockHeaderStore
dagTopologyManager model.DAGTopologyManager
reachabilityDataStore model.ReachabilityDataStore
pruningStore model.PruningStore
}
// New creates a new instance of a BlockParentBuilder
func New(
databaseContext model.DBManager,
blockHeaderStore model.BlockHeaderStore,
dagTopologyManager model.DAGTopologyManager,
reachabilityDataStore model.ReachabilityDataStore,
pruningStore model.PruningStore,
) model.BlockParentBuilder {
return &blockParentBuilder{
databaseContext: databaseContext,
blockHeaderStore: blockHeaderStore,
dagTopologyManager: dagTopologyManager,
reachabilityDataStore: reachabilityDataStore,
pruningStore: pruningStore,
}
}
func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
// Late on we'll mutate direct parent hashes, so we first clone it.
directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes))
copy(directParentHashesCopy, directParentHashes)
pruningPoint, err := bpb.pruningStore.PruningPoint(bpb.databaseContext, stagingArea)
if err != nil {
return nil, err
}
// The first candidates to be added should be from a parent in the future of the pruning
// point, so later on we'll know that every block that doesn't have reachability data
// (i.e. pruned) is necessarily in the past of the current candidates and cannot be
// considered as a valid candidate.
// This is why we sort the direct parent headers in a way that the first one will be
// in the future of the pruning point.
directParentHeaders := make([]externalapi.BlockHeader, len(directParentHashesCopy))
firstParentInFutureOfPruningPointIndex := 0
foundFirstParentInFutureOfPruningPoint := false
for i, directParentHash := range directParentHashesCopy {
isInFutureOfPruningPoint, err := bpb.dagTopologyManager.IsAncestorOf(stagingArea, pruningPoint, directParentHash)
if err != nil {
return nil, err
}
if !isInFutureOfPruningPoint {
continue
}
firstParentInFutureOfPruningPointIndex = i
foundFirstParentInFutureOfPruningPoint = true
break
}
if !foundFirstParentInFutureOfPruningPoint {
return nil, errors.New("BuildParents should get at least one parent in the future of the pruning point")
}
oldFirstDirectParent := directParentHashesCopy[0]
directParentHashesCopy[0] = directParentHashesCopy[firstParentInFutureOfPruningPointIndex]
directParentHashesCopy[firstParentInFutureOfPruningPointIndex] = oldFirstDirectParent
for i, directParentHash := range directParentHashesCopy {
directParentHeader, err := bpb.blockHeaderStore.BlockHeader(bpb.databaseContext, stagingArea, directParentHash)
if err != nil {
return nil, err
}
directParentHeaders[i] = directParentHeader
}
type blockToReferences map[externalapi.DomainHash][]*externalapi.DomainHash
candidatesByLevelToReferenceBlocksMap := make(map[int]blockToReferences)
// Direct parents are guaranteed to be in one other's anticones so add them all to
// all the block levels they occupy
for _, directParentHeader := range directParentHeaders {
directParentHash := consensushashing.HeaderHash(directParentHeader)
proofOfWorkValue := pow.CalculateProofOfWorkValue(directParentHeader.ToMutable())
for blockLevel := 0; ; blockLevel++ {
if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists {
candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
}
candidatesByLevelToReferenceBlocksMap[blockLevel][*directParentHash] = []*externalapi.DomainHash{directParentHash}
if proofOfWorkValue.Bit(blockLevel+1) != 0 {
break
}
}
}
virtualGenesisChildren, err := bpb.dagTopologyManager.Children(stagingArea, model.VirtualGenesisBlockHash)
if err != nil {
return nil, err
}
virtualGenesisChildrenHeaders := make(map[externalapi.DomainHash]externalapi.BlockHeader, len(virtualGenesisChildren))
for _, child := range virtualGenesisChildren {
virtualGenesisChildrenHeaders[*child], err = bpb.blockHeaderStore.BlockHeader(bpb.databaseContext, stagingArea, child)
if err != nil {
return nil, err
}
}
for _, directParentHeader := range directParentHeaders {
for blockLevel, blockLevelParentsInHeader := range directParentHeader.Parents() {
isEmptyLevel := false
if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists {
candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
isEmptyLevel = true
}
for _, parent := range blockLevelParentsInHeader {
hasReachabilityData, err := bpb.reachabilityDataStore.HasReachabilityData(bpb.databaseContext, stagingArea, parent)
if err != nil {
return nil, err
}
// Reference blocks are the blocks that are used in reachability queries to check if
// a candidate is in the future of another candidate. In most cases this is just the
// block itself, but in the case where a block doesn't have reachability data we need
// to use some blocks in its future as reference instead.
// If we make sure to add a parent in the future of the pruning point first, we can
// know that any pruned candidate that is in the past of some blocks in the pruning
// point anticone should have should be a parent (in the relevant level) of one of
// the virtual genesis children in the pruning point anticone. So we can check which
// virtual genesis children have this block as parent and use those block as
// reference blocks.
var referenceBlocks []*externalapi.DomainHash
if hasReachabilityData {
referenceBlocks = []*externalapi.DomainHash{parent}
} else {
for childHash, childHeader := range virtualGenesisChildrenHeaders {
childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse
if childHeader.ParentsAtLevel(blockLevel).Contains(parent) {
referenceBlocks = append(referenceBlocks, &childHash)
}
}
}
if isEmptyLevel {
candidatesByLevelToReferenceBlocksMap[blockLevel][*parent] = referenceBlocks
continue
}
if !hasReachabilityData {
continue
}
toRemove := hashset.New()
isAncestorOfAnyCandidate := false
for candidate, candidateReferences := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
candidate := candidate // Assign to a new pointer to avoid `range` pointer reuse
isInFutureOfCurrentCandidate, err := bpb.dagTopologyManager.IsAnyAncestorOf(stagingArea, candidateReferences, parent)
if err != nil {
return nil, err
}
if isInFutureOfCurrentCandidate {
toRemove.Add(&candidate)
continue
}
if isAncestorOfAnyCandidate {
continue
}
isAncestorOfCurrentCandidate, err := bpb.dagTopologyManager.IsAncestorOfAny(stagingArea, parent, candidateReferences)
if err != nil {
return nil, err
}
if isAncestorOfCurrentCandidate {
isAncestorOfAnyCandidate = true
}
}
if toRemove.Length() > 0 {
for hash := range toRemove {
delete(candidatesByLevelToReferenceBlocksMap[blockLevel], hash)
}
}
// We should add the block as a candidate if it's in the future of another candidate
// or in the anticone of all candidates.
if !isAncestorOfAnyCandidate || toRemove.Length() > 0 {
candidatesByLevelToReferenceBlocksMap[blockLevel][*parent] = referenceBlocks
}
}
}
}
parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap))
for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ {
levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel]))
for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
block := block // Assign to a new pointer to avoid `range` pointer reuse
levelBlocks = append(levelBlocks, &block)
}
parents[blockLevel] = levelBlocks
}
return parents, nil
}

View File

@ -26,7 +26,6 @@ func addBlock(tc testapi.TestConsensus, parentHashes []*externalapi.DomainHash,
} }
blockHash := consensushashing.BlockHash(block) blockHash := consensushashing.BlockHash(block)
_, err = tc.ValidateAndInsertBlock(block, true) _, err = tc.ValidateAndInsertBlock(block, true)
if err != nil { if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err) t.Fatalf("ValidateAndInsertBlock: %+v", err)
@ -75,7 +74,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
t.Fatalf("GetHashesBetween: %+v", err) t.Fatalf("GetHashesBetween: %+v", err)
} }
for _, blocksHash := range missingHeaderHashes { for i, blocksHash := range missingHeaderHashes {
blockInfo, err := tcSyncee.GetBlockInfo(blocksHash) blockInfo, err := tcSyncee.GetBlockInfo(blocksHash)
if err != nil { if err != nil {
t.Fatalf("GetBlockInfo: %+v", err) t.Fatalf("GetBlockInfo: %+v", err)
@ -92,7 +91,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
_, err = tcSyncee.ValidateAndInsertBlock(&externalapi.DomainBlock{Header: header}, false) _, err = tcSyncee.ValidateAndInsertBlock(&externalapi.DomainBlock{Header: header}, false)
if err != nil { if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err) t.Fatalf("ValidateAndInsertBlock %d: %+v", i, err)
} }
} }

View File

@ -68,6 +68,13 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
} }
} }
if !isBlockWithTrustedData {
err = v.checkIndirectParents(stagingArea, header)
if err != nil {
return err
}
}
err = v.mergeDepthManager.CheckBoundedMergeDepth(stagingArea, blockHash, isBlockWithTrustedData) err = v.mergeDepthManager.CheckBoundedMergeDepth(stagingArea, blockHash, isBlockWithTrustedData)
if err != nil { if err != nil {
return err return err
@ -183,6 +190,19 @@ func (v *blockValidator) checkMergeSizeLimit(stagingArea *model.StagingArea, has
return nil return nil
} }
func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error {
expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DirectParents())
if err != nil {
return err
}
areParentsEqual := externalapi.ParentsEqual(header.Parents(), expectedParents)
if !areParentsEqual {
return errors.Wrapf(ruleerrors.ErrUnexpectedParents, "unexpected indirect block parents")
}
return nil
}
func (v *blockValidator) checkDAAScore(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, func (v *blockValidator) checkDAAScore(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash,
header externalapi.BlockHeader) error { header externalapi.BlockHeader) error {

View File

@ -35,6 +35,7 @@ type blockValidator struct {
pruningStore model.PruningStore pruningStore model.PruningStore
reachabilityManager model.ReachabilityManager reachabilityManager model.ReachabilityManager
finalityManager model.FinalityManager finalityManager model.FinalityManager
blockParentBuilder model.BlockParentBuilder
pruningManager model.PruningManager pruningManager model.PruningManager
blockStore model.BlockStore blockStore model.BlockStore
@ -69,6 +70,7 @@ func New(powMax *big.Int,
mergeDepthManager model.MergeDepthManager, mergeDepthManager model.MergeDepthManager,
reachabilityManager model.ReachabilityManager, reachabilityManager model.ReachabilityManager,
finalityManager model.FinalityManager, finalityManager model.FinalityManager,
blockParentBuilder model.BlockParentBuilder,
pruningManager model.PruningManager, pruningManager model.PruningManager,
pruningStore model.PruningStore, pruningStore model.PruningStore,
@ -104,6 +106,7 @@ func New(powMax *big.Int,
mergeDepthManager: mergeDepthManager, mergeDepthManager: mergeDepthManager,
reachabilityManager: reachabilityManager, reachabilityManager: reachabilityManager,
finalityManager: finalityManager, finalityManager: finalityManager,
blockParentBuilder: blockParentBuilder,
pruningManager: pruningManager, pruningManager: pruningManager,
pruningStore: pruningStore, pruningStore: pruningStore,

View File

@ -75,7 +75,7 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
if !exists { if !exists {
if !isBlockWithTrustedData { if !isBlockWithTrustedData {
return errors.Errorf("only block with prefilled information can have some missing parents") return errors.Errorf("direct parent %s is missing: only block with prefilled information can have some missing parents", currentParent)
} }
continue continue
} }

View File

@ -132,37 +132,37 @@ func TestBlockWindow(t *testing.T) {
{ {
parents: []string{"H", "F"}, parents: []string{"H", "F"},
id: "I", id: "I",
expectedWindow: []string{"F", "C", "D", "H", "B", "G"}, expectedWindow: []string{"F", "C", "H", "D", "B", "G"},
}, },
{ {
parents: []string{"I"}, parents: []string{"I"},
id: "J", id: "J",
expectedWindow: []string{"I", "F", "C", "D", "H", "B", "G"}, expectedWindow: []string{"I", "F", "C", "H", "D", "B", "G"},
}, },
{ {
parents: []string{"J"}, parents: []string{"J"},
id: "K", id: "K",
expectedWindow: []string{"J", "I", "F", "C", "D", "H", "B", "G"}, expectedWindow: []string{"J", "I", "F", "C", "H", "D", "B", "G"},
}, },
{ {
parents: []string{"K"}, parents: []string{"K"},
id: "L", id: "L",
expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "B", "G"}, expectedWindow: []string{"K", "J", "I", "F", "C", "H", "D", "B", "G"},
}, },
{ {
parents: []string{"L"}, parents: []string{"L"},
id: "M", id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "B", "G"}, expectedWindow: []string{"L", "K", "J", "I", "F", "C", "H", "D", "B", "G"},
}, },
{ {
parents: []string{"M"}, parents: []string{"M"},
id: "N", id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "B"}, expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "H", "D", "B"},
}, },
{ {
parents: []string{"N"}, parents: []string{"N"},
id: "O", id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"}, expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "H", "D"},
}, },
}, },
dagconfig.DevnetParams.Name: { dagconfig.DevnetParams.Name: {
@ -184,12 +184,12 @@ func TestBlockWindow(t *testing.T) {
{ {
parents: []string{"C", "D"}, parents: []string{"C", "D"},
id: "E", id: "E",
expectedWindow: []string{"D", "C", "B"}, expectedWindow: []string{"C", "D", "B"},
}, },
{ {
parents: []string{"C", "D"}, parents: []string{"C", "D"},
id: "F", id: "F",
expectedWindow: []string{"D", "C", "B"}, expectedWindow: []string{"C", "D", "B"},
}, },
{ {
parents: []string{"A"}, parents: []string{"A"},
@ -204,37 +204,37 @@ func TestBlockWindow(t *testing.T) {
{ {
parents: []string{"H", "F"}, parents: []string{"H", "F"},
id: "I", id: "I",
expectedWindow: []string{"F", "H", "D", "C", "B", "G"}, expectedWindow: []string{"F", "C", "D", "H", "G", "B"},
}, },
{ {
parents: []string{"I"}, parents: []string{"I"},
id: "J", id: "J",
expectedWindow: []string{"I", "F", "H", "D", "C", "B", "G"}, expectedWindow: []string{"I", "F", "C", "D", "H", "G", "B"},
}, },
{ {
parents: []string{"J"}, parents: []string{"J"},
id: "K", id: "K",
expectedWindow: []string{"J", "I", "F", "H", "D", "C", "B", "G"}, expectedWindow: []string{"J", "I", "F", "C", "D", "H", "G", "B"},
}, },
{ {
parents: []string{"K"}, parents: []string{"K"},
id: "L", id: "L",
expectedWindow: []string{"K", "J", "I", "F", "H", "D", "C", "B", "G"}, expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "G", "B"},
}, },
{ {
parents: []string{"L"}, parents: []string{"L"},
id: "M", id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "H", "D", "C", "B", "G"}, expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "G", "B"},
}, },
{ {
parents: []string{"M"}, parents: []string{"M"},
id: "N", id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "H", "D", "C", "B"}, expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "G"},
}, },
{ {
parents: []string{"N"}, parents: []string{"N"},
id: "O", id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "H", "D", "C"}, expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"},
}, },
}, },
dagconfig.SimnetParams.Name: { dagconfig.SimnetParams.Name: {
@ -276,37 +276,37 @@ func TestBlockWindow(t *testing.T) {
{ {
parents: []string{"H", "F"}, parents: []string{"H", "F"},
id: "I", id: "I",
expectedWindow: []string{"F", "D", "H", "C", "G", "B"}, expectedWindow: []string{"F", "D", "C", "H", "G", "B"},
}, },
{ {
parents: []string{"I"}, parents: []string{"I"},
id: "J", id: "J",
expectedWindow: []string{"I", "F", "D", "H", "C", "G", "B"}, expectedWindow: []string{"I", "F", "D", "C", "H", "G", "B"},
}, },
{ {
parents: []string{"J"}, parents: []string{"J"},
id: "K", id: "K",
expectedWindow: []string{"J", "I", "F", "D", "H", "C", "G", "B"}, expectedWindow: []string{"J", "I", "F", "D", "C", "H", "G", "B"},
}, },
{ {
parents: []string{"K"}, parents: []string{"K"},
id: "L", id: "L",
expectedWindow: []string{"K", "J", "I", "F", "D", "H", "C", "G", "B"}, expectedWindow: []string{"K", "J", "I", "F", "D", "C", "H", "G", "B"},
}, },
{ {
parents: []string{"L"}, parents: []string{"L"},
id: "M", id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "H", "C", "G", "B"}, expectedWindow: []string{"L", "K", "J", "I", "F", "D", "C", "H", "G", "B"},
}, },
{ {
parents: []string{"M"}, parents: []string{"M"},
id: "N", id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "H", "C", "G"}, expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "C", "H", "G"},
}, },
{ {
parents: []string{"N"}, parents: []string{"N"},
id: "O", id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "H", "C"}, expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "H"},
}, },
}, },
} }

View File

@ -37,8 +37,8 @@ func TestPruning(t *testing.T) {
}, },
"dag-for-test-pruning.json": { "dag-for-test-pruning.json": {
dagconfig.MainnetParams.Name: "502", dagconfig.MainnetParams.Name: "502",
dagconfig.TestnetParams.Name: "502", dagconfig.TestnetParams.Name: "503",
dagconfig.DevnetParams.Name: "503", dagconfig.DevnetParams.Name: "502",
dagconfig.SimnetParams.Name: "502", dagconfig.SimnetParams.Name: "502",
}, },
} }

View File

@ -200,6 +200,8 @@ var (
//ErrPruningPointViolation indicates that the pruning point isn't in the block past. //ErrPruningPointViolation indicates that the pruning point isn't in the block past.
ErrPruningPointViolation = newRuleError("ErrPruningPointViolation") ErrPruningPointViolation = newRuleError("ErrPruningPointViolation")
ErrUnexpectedParents = newRuleError("ErrUnexpectedParents")
ErrUnexpectedPruningPoint = newRuleError("ErrUnexpectedPruningPoint") ErrUnexpectedPruningPoint = newRuleError("ErrUnexpectedPruningPoint")
ErrInvalidPruningPointsChain = newRuleError("ErrInvalidPruningPointsChain") ErrInvalidPruningPointsChain = newRuleError("ErrInvalidPruningPointsChain")

View File

@ -56,11 +56,16 @@ func (bh *blockHeader) Parents() []externalapi.BlockLevelParents {
return bh.parents return bh.parents
} }
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents { func (bh *blockHeader) ParentsAtLevel(level int) externalapi.BlockLevelParents {
if len(bh.parents) == 0 { if len(bh.parents) <= level {
return externalapi.BlockLevelParents{} return externalapi.BlockLevelParents{}
} }
return bh.parents[0]
return bh.parents[level]
}
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
return bh.ParentsAtLevel(0)
} }
func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash { func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash {

View File

@ -15,7 +15,7 @@ import (
// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network // it does not check if the difficulty itself is valid or less than the maximum for the appropriate network
func CheckProofOfWorkWithTarget(header externalapi.MutableBlockHeader, target *big.Int) bool { func CheckProofOfWorkWithTarget(header externalapi.MutableBlockHeader, target *big.Int) bool {
// The block pow must be less than the claimed target // The block pow must be less than the claimed target
powNum := calcPowValue(header) powNum := CalculateProofOfWorkValue(header)
// The block hash must be less or equal than the claimed target. // The block hash must be less or equal than the claimed target.
return powNum.Cmp(target) <= 0 return powNum.Cmp(target) <= 0
@ -27,7 +27,8 @@ func CheckProofOfWorkByBits(header externalapi.MutableBlockHeader) bool {
return CheckProofOfWorkWithTarget(header, difficulty.CompactToBig(header.Bits())) return CheckProofOfWorkWithTarget(header, difficulty.CompactToBig(header.Bits()))
} }
func calcPowValue(header externalapi.MutableBlockHeader) *big.Int { // CalculateProofOfWorkValue hashes the given header and returns its big.Int value
func CalculateProofOfWorkValue(header externalapi.MutableBlockHeader) *big.Int {
// Zero out the time and nonce. // Zero out the time and nonce.
timestamp, nonce := header.TimeInMilliseconds(), header.Nonce() timestamp, nonce := header.TimeInMilliseconds(), header.Nonce()
header.SetTimeInMilliseconds(0) header.SetTimeInMilliseconds(0)

View File

@ -3,6 +3,7 @@ package staging
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/infrastructure/logger" "github.com/kaspanet/kaspad/infrastructure/logger"
"sync/atomic"
) )
// CommitAllChanges creates a transaction in `databaseContext`, and commits all changes in `stagingArea` through it. // CommitAllChanges creates a transaction in `databaseContext`, and commits all changes in `stagingArea` through it.
@ -22,3 +23,10 @@ func CommitAllChanges(databaseContext model.DBManager, stagingArea *model.Stagin
return dbTx.Commit() return dbTx.Commit()
} }
var lastShardingID uint64
// GenerateShardingID generates a unique staging sharding ID.
func GenerateShardingID() model.StagingShardID {
return model.StagingShardID(atomic.AddUint64(&lastShardingID, 1))
}