mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-05-23 07:16:47 +00:00

* Add TestValidateAndInsertPruningPointWithSideBlocks * Optimize infrastracture bucket paths * Update infrastracture tests * Refactor the consensus/database layer * Remove utils/dbkeys * Use consensus/database in consensus instead of infrastructure * Fix a bug in dbBucketToDatabaseBucket and MakeBucket combination Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com> Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
98 lines
3.1 KiB
Go
98 lines
3.1 KiB
Go
package ghostdagdatastore
|
|
|
|
import (
|
|
"github.com/golang/protobuf/proto"
|
|
"github.com/kaspanet/kaspad/domain/consensus/database"
|
|
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
|
)
|
|
|
|
var bucket = database.MakeBucket([]byte("block-ghostdag-data"))
|
|
|
|
// ghostdagDataStore represents a store of BlockGHOSTDAGData
|
|
type ghostdagDataStore struct {
|
|
staging map[externalapi.DomainHash]*model.BlockGHOSTDAGData
|
|
cache *lrucache.LRUCache
|
|
}
|
|
|
|
// New instantiates a new GHOSTDAGDataStore
|
|
func New(cacheSize int) model.GHOSTDAGDataStore {
|
|
return &ghostdagDataStore{
|
|
staging: make(map[externalapi.DomainHash]*model.BlockGHOSTDAGData),
|
|
cache: lrucache.New(cacheSize),
|
|
}
|
|
}
|
|
|
|
// Stage stages the given blockGHOSTDAGData for the given blockHash
|
|
func (gds *ghostdagDataStore) Stage(blockHash *externalapi.DomainHash, blockGHOSTDAGData *model.BlockGHOSTDAGData) {
|
|
gds.staging[*blockHash] = blockGHOSTDAGData
|
|
}
|
|
|
|
func (gds *ghostdagDataStore) IsStaged() bool {
|
|
return len(gds.staging) != 0
|
|
}
|
|
|
|
func (gds *ghostdagDataStore) Discard() {
|
|
gds.staging = make(map[externalapi.DomainHash]*model.BlockGHOSTDAGData)
|
|
}
|
|
|
|
func (gds *ghostdagDataStore) Commit(dbTx model.DBTransaction) error {
|
|
for hash, blockGHOSTDAGData := range gds.staging {
|
|
blockGhostdagDataBytes, err := gds.serializeBlockGHOSTDAGData(blockGHOSTDAGData)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = dbTx.Put(gds.hashAsKey(&hash), blockGhostdagDataBytes)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
gds.cache.Add(&hash, blockGHOSTDAGData)
|
|
}
|
|
|
|
gds.Discard()
|
|
return nil
|
|
}
|
|
|
|
// Get gets the blockGHOSTDAGData associated with the given blockHash
|
|
func (gds *ghostdagDataStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*model.BlockGHOSTDAGData, error) {
|
|
if blockGHOSTDAGData, ok := gds.staging[*blockHash]; ok {
|
|
return blockGHOSTDAGData, nil
|
|
}
|
|
|
|
if blockGHOSTDAGData, ok := gds.cache.Get(blockHash); ok {
|
|
return blockGHOSTDAGData.(*model.BlockGHOSTDAGData), nil
|
|
}
|
|
|
|
blockGHOSTDAGDataBytes, err := dbContext.Get(gds.hashAsKey(blockHash))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
blockGHOSTDAGData, err := gds.deserializeBlockGHOSTDAGData(blockGHOSTDAGDataBytes)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
gds.cache.Add(blockHash, blockGHOSTDAGData)
|
|
return blockGHOSTDAGData, nil
|
|
}
|
|
|
|
func (gds *ghostdagDataStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
|
return bucket.Key(hash.ByteSlice())
|
|
}
|
|
|
|
func (gds *ghostdagDataStore) serializeBlockGHOSTDAGData(blockGHOSTDAGData *model.BlockGHOSTDAGData) ([]byte, error) {
|
|
return proto.Marshal(serialization.BlockGHOSTDAGDataToDBBlockGHOSTDAGData(blockGHOSTDAGData))
|
|
}
|
|
|
|
func (gds *ghostdagDataStore) deserializeBlockGHOSTDAGData(blockGHOSTDAGDataBytes []byte) (*model.BlockGHOSTDAGData, error) {
|
|
dbBlockGHOSTDAGData := &serialization.DbBlockGhostdagData{}
|
|
err := proto.Unmarshal(blockGHOSTDAGDataBytes, dbBlockGHOSTDAGData)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return serialization.DBBlockGHOSTDAGDataToBlockGHOSTDAGData(dbBlockGHOSTDAGData)
|
|
}
|