mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-09-13 21:10:12 +00:00

* Add StagingArea struct * Implemented staging areas in blockStore * Move blockStagingShard to separate folder * Apply staging shard to acceptanceDataStore * Update blockHeaderStore with StagingArea * Add StagingArea to BlockRelationStore * Add StagingArea to blockStatusStore * Add StagingArea to consensusStateStore * Add StagingArea to daaBlocksStore * Add StagingArea to finalityStore * Add StagingArea to ghostdagDataStore * Add StagingArea to headersSelectedChainStore and headersSelectedTipStore * Add StagingArea to multisetStore * Add StagingArea to pruningStore * Add StagingArea to reachabilityDataStore * Add StagingArea to utxoDiffStore * Fix forgotten compilation error * Update reachability manager and some more things with StagingArea * Add StagingArea to dagTopologyManager, and some more * Add StagingArea to GHOSTDAGManager, and some more * Add StagingArea to difficultyManager, and some more * Add StagingArea to dagTraversalManager, and some more * Add StagingArea to headerTipsManager, and some more * Add StagingArea to constnsusStateManager, pastMedianTimeManager * Add StagingArea to transactionValidator * Add StagingArea to finalityManager * Add StagingArea to mergeDepthManager * Add StagingArea to pruningManager * Add StagingArea to rest of ValidateAndInsertBlock * Add StagingArea to blockValidator * Add StagingArea to coinbaseManager * Add StagingArea to syncManager * Add StagingArea to blockBuilder * Update consensus with StagingArea * Add StagingArea to ghostdag2 * Fix remaining compilation errors * Update names of stagingShards * Fix forgotten stagingArea passing * Mark stagingShard.isCommited = true once commited * Move isStaged to stagingShard, so that it's available without going through store * Make blockHeaderStore count be avilable from stagingShard * Fix remaining forgotten stagingArea passing * commitAllChanges should call dbTx.Commit in the end * Fix all tests tests in blockValidator * Fix all tests in consensusStateManager and some more * Fix all tests in pruningManager * Add many missing stagingAreas in tests * Fix many tests * Fix most of all other tests * Fix ghostdag_test.go * Add comment to StagingArea * Make list of StagingShards an array * Add comment to StagingShardID * Make sure all staging shards are pointer-receiver * Undo bucket rename in block_store * Typo: isCommited -> isCommitted * Add comment explaining why stagingArea.shards is an array
90 lines
2.7 KiB
Go
90 lines
2.7 KiB
Go
package multisetstore
|
|
|
|
import (
|
|
"github.com/golang/protobuf/proto"
|
|
"github.com/kaspanet/kaspad/domain/consensus/database"
|
|
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
|
)
|
|
|
|
var bucket = database.MakeBucket([]byte("multisets"))
|
|
|
|
// multisetStore represents a store of Multisets
|
|
type multisetStore struct {
|
|
cache *lrucache.LRUCache
|
|
}
|
|
|
|
// New instantiates a new MultisetStore
|
|
func New(cacheSize int, preallocate bool) model.MultisetStore {
|
|
return &multisetStore{
|
|
cache: lrucache.New(cacheSize, preallocate),
|
|
}
|
|
}
|
|
|
|
// Stage stages the given multiset for the given blockHash
|
|
func (ms *multisetStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, multiset model.Multiset) {
|
|
stagingShard := ms.stagingShard(stagingArea)
|
|
|
|
stagingShard.toAdd[*blockHash] = multiset.Clone()
|
|
}
|
|
|
|
func (ms *multisetStore) IsStaged(stagingArea *model.StagingArea) bool {
|
|
return ms.stagingShard(stagingArea).isStaged()
|
|
}
|
|
|
|
// Get gets the multiset associated with the given blockHash
|
|
func (ms *multisetStore) Get(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.Multiset, error) {
|
|
stagingShard := ms.stagingShard(stagingArea)
|
|
|
|
if multiset, ok := stagingShard.toAdd[*blockHash]; ok {
|
|
return multiset.Clone(), nil
|
|
}
|
|
|
|
if multiset, ok := ms.cache.Get(blockHash); ok {
|
|
return multiset.(model.Multiset).Clone(), nil
|
|
}
|
|
|
|
multisetBytes, err := dbContext.Get(ms.hashAsKey(blockHash))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
multiset, err := ms.deserializeMultiset(multisetBytes)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
ms.cache.Add(blockHash, multiset)
|
|
return multiset.Clone(), nil
|
|
}
|
|
|
|
// Delete deletes the multiset associated with the given blockHash
|
|
func (ms *multisetStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) {
|
|
stagingShard := ms.stagingShard(stagingArea)
|
|
|
|
if _, ok := stagingShard.toAdd[*blockHash]; ok {
|
|
delete(stagingShard.toAdd, *blockHash)
|
|
return
|
|
}
|
|
stagingShard.toDelete[*blockHash] = struct{}{}
|
|
}
|
|
|
|
func (ms *multisetStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
|
return bucket.Key(hash.ByteSlice())
|
|
}
|
|
|
|
func (ms *multisetStore) serializeMultiset(multiset model.Multiset) ([]byte, error) {
|
|
return proto.Marshal(serialization.MultisetToDBMultiset(multiset))
|
|
}
|
|
|
|
func (ms *multisetStore) deserializeMultiset(multisetBytes []byte) (model.Multiset, error) {
|
|
dbMultiset := &serialization.DbMultiset{}
|
|
err := proto.Unmarshal(multisetBytes, dbMultiset)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return serialization.DBMultisetToMultiset(dbMultiset)
|
|
}
|