Add database prefix (#1750)

* Add prefix to stores

* Add prefix to forgotten stores

* Add a special type for prefix

* Rename transaction->dbTx

* Change error message

* Use countKeyName

* Rename Temporary Consesnsus to Staging

* Add DeleteStagingConsensus to Domain interface

* Add lock to staging consensus

* Make prefix type-safer

* Use ioutil.TempDir instead of t.TempDir
This commit is contained in:
Ori Newman 2021-06-15 17:47:17 +03:00 committed by GitHub
parent 70399dae2a
commit 4207c82f5a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 727 additions and 174 deletions

View File

@ -130,6 +130,22 @@ type fakeRelayInvsContext struct {
rwLock sync.RWMutex
}
func (f *fakeRelayInvsContext) DeleteStagingConsensus() error {
panic("implement me")
}
func (f *fakeRelayInvsContext) StagingConsensus() externalapi.Consensus {
panic("implement me")
}
func (f *fakeRelayInvsContext) InitStagingConsensus() error {
panic("implement me")
}
func (f *fakeRelayInvsContext) CommitStagingConsensus() error {
panic("implement me")
}
func (f *fakeRelayInvsContext) EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error) {
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
}

View File

@ -23,6 +23,22 @@ type fakeDomain struct {
testapi.TestConsensus
}
func (d fakeDomain) DeleteStagingConsensus() error {
panic("implement me")
}
func (d fakeDomain) StagingConsensus() externalapi.Consensus {
panic("implement me")
}
func (d fakeDomain) InitStagingConsensus() error {
panic("implement me")
}
func (d fakeDomain) CommitStagingConsensus() error {
panic("implement me")
}
func (d fakeDomain) Consensus() externalapi.Consensus { return d }
func (d fakeDomain) MiningManager() miningmanager.MiningManager { return nil }

View File

@ -6,20 +6,23 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
"google.golang.org/protobuf/proto"
)
var bucket = database.MakeBucket([]byte("acceptance-data"))
var bucketName = []byte("acceptance-data")
// acceptanceDataStore represents a store of AcceptanceData
type acceptanceDataStore struct {
cache *lrucache.LRUCache
bucket model.DBBucket
}
// New instantiates a new AcceptanceDataStore
func New(cacheSize int, preallocate bool) model.AcceptanceDataStore {
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.AcceptanceDataStore {
return &acceptanceDataStore{
cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
}
}
@ -84,5 +87,5 @@ func (ads *acceptanceDataStore) deserializeAcceptanceData(acceptanceDataBytes []
}
func (ads *acceptanceDataStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash.ByteSlice())
return ads.bucket.Key(hash.ByteSlice())
}

View File

@ -56,7 +56,7 @@ func (bhss *blockHeaderStagingShard) commitCount(dbTx model.DBTransaction) error
if err != nil {
return err
}
err = dbTx.Put(countKey, countBytes)
err = dbTx.Put(bhss.store.countKey, countBytes)
if err != nil {
return err
}

View File

@ -7,21 +7,26 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
)
var bucket = database.MakeBucket([]byte("block-headers"))
var countKey = database.MakeBucket(nil).Key([]byte("block-headers-count"))
var bucketName = []byte("block-headers")
var countKeyName = []byte("block-headers-count")
// blockHeaderStore represents a store of blocks
type blockHeaderStore struct {
cache *lrucache.LRUCache
countCached uint64
bucket model.DBBucket
countKey model.DBKey
}
// New instantiates a new BlockHeaderStore
func New(dbContext model.DBReader, cacheSize int, preallocate bool) (model.BlockHeaderStore, error) {
func New(dbContext model.DBReader, prefix *prefix.Prefix, cacheSize int, preallocate bool) (model.BlockHeaderStore, error) {
blockHeaderStore := &blockHeaderStore{
cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
countKey: database.MakeBucket(prefix.Serialize()).Key(countKeyName),
}
err := blockHeaderStore.initializeCount(dbContext)
@ -34,12 +39,12 @@ func New(dbContext model.DBReader, cacheSize int, preallocate bool) (model.Block
func (bhs *blockHeaderStore) initializeCount(dbContext model.DBReader) error {
count := uint64(0)
hasCountBytes, err := dbContext.Has(countKey)
hasCountBytes, err := dbContext.Has(bhs.countKey)
if err != nil {
return err
}
if hasCountBytes {
countBytes, err := dbContext.Get(countKey)
countBytes, err := dbContext.Get(bhs.countKey)
if err != nil {
return err
}
@ -144,7 +149,7 @@ func (bhs *blockHeaderStore) Delete(stagingArea *model.StagingArea, blockHash *e
}
func (bhs *blockHeaderStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash.ByteSlice())
return bhs.bucket.Key(hash.ByteSlice())
}
func (bhs *blockHeaderStore) serializeHeader(header externalapi.BlockHeader) ([]byte, error) {

View File

@ -7,19 +7,22 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
)
var bucket = database.MakeBucket([]byte("block-relations"))
var bucketName = []byte("block-relations")
// blockRelationStore represents a store of BlockRelations
type blockRelationStore struct {
cache *lrucache.LRUCache
bucket model.DBBucket
}
// New instantiates a new BlockRelationStore
func New(cacheSize int, preallocate bool) model.BlockRelationStore {
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.BlockRelationStore {
return &blockRelationStore{
cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
}
}
@ -72,7 +75,7 @@ func (brs *blockRelationStore) Has(dbContext model.DBReader, stagingArea *model.
}
func (brs *blockRelationStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash.ByteSlice())
return brs.bucket.Key(hash.ByteSlice())
}
func (brs *blockRelationStore) serializeBlockRelations(blockRelations *model.BlockRelations) ([]byte, error) {

View File

@ -7,19 +7,22 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
)
var bucket = database.MakeBucket([]byte("block-statuses"))
var bucketName = []byte("block-statuses")
// blockStatusStore represents a store of BlockStatuses
type blockStatusStore struct {
cache *lrucache.LRUCache
bucket model.DBBucket
}
// New instantiates a new BlockStatusStore
func New(cacheSize int, preallocate bool) model.BlockStatusStore {
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.BlockStatusStore {
return &blockStatusStore{
cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
}
}
@ -93,5 +96,5 @@ func (bss *blockStatusStore) deserializeBlockStatus(statusBytes []byte) (externa
}
func (bss *blockStatusStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash.ByteSlice())
return bss.bucket.Key(hash.ByteSlice())
}

View File

@ -56,7 +56,7 @@ func (bss *blockStagingShard) commitCount(dbTx model.DBTransaction) error {
if err != nil {
return err
}
err = dbTx.Put(countKey, countBytes)
err = dbTx.Put(bss.store.countKey, countBytes)
if err != nil {
return err
}

View File

@ -7,22 +7,26 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
"github.com/pkg/errors"
)
var bucket = database.MakeBucket([]byte("blocks"))
var countKey = database.MakeBucket(nil).Key([]byte("blocks-count"))
var bucketName = []byte("blocks")
// blockStore represents a store of blocks
type blockStore struct {
cache *lrucache.LRUCache
countCached uint64
bucket model.DBBucket
countKey model.DBKey
}
// New instantiates a new BlockStore
func New(dbContext model.DBReader, cacheSize int, preallocate bool) (model.BlockStore, error) {
func New(dbContext model.DBReader, prefix *prefix.Prefix, cacheSize int, preallocate bool) (model.BlockStore, error) {
blockStore := &blockStore{
cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
countKey: database.MakeBucket(prefix.Serialize()).Key([]byte("blocks-count")),
}
err := blockStore.initializeCount(dbContext)
@ -35,12 +39,12 @@ func New(dbContext model.DBReader, cacheSize int, preallocate bool) (model.Block
func (bs *blockStore) initializeCount(dbContext model.DBReader) error {
count := uint64(0)
hasCountBytes, err := dbContext.Has(countKey)
hasCountBytes, err := dbContext.Has(bs.countKey)
if err != nil {
return err
}
if hasCountBytes {
countBytes, err := dbContext.Get(countKey)
countBytes, err := dbContext.Get(bs.countKey)
if err != nil {
return err
}
@ -153,7 +157,7 @@ func (bs *blockStore) deserializeBlock(blockBytes []byte) (*externalapi.DomainBl
}
func (bs *blockStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash.ByteSlice())
return bs.bucket.Key(hash.ByteSlice())
}
func (bs *blockStore) Count(stagingArea *model.StagingArea) uint64 {
@ -225,7 +229,7 @@ func (a allBlockHashesIterator) Close() error {
}
func (bs *blockStore) AllBlockHashesIterator(dbContext model.DBReader) (model.BlockIterator, error) {
cursor, err := dbContext.Cursor(bucket)
cursor, err := dbContext.Cursor(bs.bucket)
if err != nil {
return nil, err
}

View File

@ -1,22 +1,31 @@
package consensusstatestore
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxolrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
)
var importingPruningPointUTXOSetKeyName = []byte("importing-pruning-point-utxo-set")
// consensusStateStore represents a store for the current consensus state
type consensusStateStore struct {
virtualUTXOSetCache *utxolrucache.LRUCache
tipsCache []*externalapi.DomainHash
tipsKey model.DBKey
utxoSetBucket model.DBBucket
importingPruningPointUTXOSetKey model.DBKey
}
// New instantiates a new ConsensusStateStore
func New(utxoSetCacheSize int, preallocate bool) model.ConsensusStateStore {
func New(prefix *prefix.Prefix, utxoSetCacheSize int, preallocate bool) model.ConsensusStateStore {
return &consensusStateStore{
virtualUTXOSetCache: utxolrucache.New(utxoSetCacheSize, preallocate),
tipsKey: database.MakeBucket(prefix.Serialize()).Key(tipsKeyName),
importingPruningPointUTXOSetKey: database.MakeBucket(prefix.Serialize()).Key(importingPruningPointUTXOSetKeyName),
utxoSetBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoSetBucketName),
}
}

View File

@ -2,13 +2,12 @@ package consensusstatestore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
var tipsKey = database.MakeBucket(nil).Key([]byte("tips"))
var tipsKeyName = []byte("tips")
func (css *consensusStateStore) Tips(stagingArea *model.StagingArea, dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
stagingShard := css.stagingShard(stagingArea)
@ -21,7 +20,7 @@ func (css *consensusStateStore) Tips(stagingArea *model.StagingArea, dbContext m
return externalapi.CloneHashes(css.tipsCache), nil
}
tipsBytes, err := dbContext.Get(tipsKey)
tipsBytes, err := dbContext.Get(css.tipsKey)
if err != nil {
return nil, err
}
@ -66,7 +65,7 @@ func (csss *consensusStateStagingShard) commitTips(dbTx model.DBTransaction) err
if err != nil {
return err
}
err = dbTx.Put(tipsKey, tipsBytes)
err = dbTx.Put(csss.store.tipsKey, tipsBytes)
if err != nil {
return err
}

View File

@ -1,22 +1,21 @@
package consensusstatestore
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/pkg/errors"
)
var utxoSetBucket = database.MakeBucket([]byte("virtual-utxo-set"))
var utxoSetBucketName = []byte("virtual-utxo-set")
func utxoKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) {
func (css *consensusStateStore) utxoKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) {
serializedOutpoint, err := serializeOutpoint(outpoint)
if err != nil {
return nil, err
}
return utxoSetBucket.Key(serializedOutpoint), nil
return css.utxoSetBucket.Key(serializedOutpoint), nil
}
func (css *consensusStateStore) StageVirtualUTXODiff(stagingArea *model.StagingArea, virtualUTXODiff externalapi.UTXODiff) {
@ -48,7 +47,7 @@ func (csss *consensusStateStagingShard) commitVirtualUTXODiff(dbTx model.DBTrans
csss.store.virtualUTXOSetCache.Remove(toRemoveOutpoint)
dbKey, err := utxoKey(toRemoveOutpoint)
dbKey, err := csss.store.utxoKey(toRemoveOutpoint)
if err != nil {
return err
}
@ -68,7 +67,7 @@ func (csss *consensusStateStagingShard) commitVirtualUTXODiff(dbTx model.DBTrans
csss.store.virtualUTXOSetCache.Add(toAddOutpoint, toAddEntry)
dbKey, err := utxoKey(toAddOutpoint)
dbKey, err := csss.store.utxoKey(toAddOutpoint)
if err != nil {
return err
}
@ -111,7 +110,7 @@ func (css *consensusStateStore) utxoByOutpointFromStagedVirtualUTXODiff(dbContex
return entry, nil
}
key, err := utxoKey(outpoint)
key, err := css.utxoKey(outpoint)
if err != nil {
return nil, err
}
@ -150,7 +149,7 @@ func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbCon
}
}
key, err := utxoKey(outpoint)
key, err := css.utxoKey(outpoint)
if err != nil {
return false, err
}
@ -161,7 +160,7 @@ func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbCon
func (css *consensusStateStore) VirtualUTXOs(dbContext model.DBReader, fromOutpoint *externalapi.DomainOutpoint, limit int) (
[]*externalapi.OutpointAndUTXOEntryPair, error) {
cursor, err := dbContext.Cursor(utxoSetBucket)
cursor, err := dbContext.Cursor(css.utxoSetBucket)
if err != nil {
return nil, err
}
@ -172,7 +171,7 @@ func (css *consensusStateStore) VirtualUTXOs(dbContext model.DBReader, fromOutpo
if err != nil {
return nil, err
}
seekKey := utxoSetBucket.Key(serializedFromOutpoint)
seekKey := css.utxoSetBucket.Key(serializedFromOutpoint)
err = cursor.Seek(seekKey)
if err != nil {
return nil, err
@ -201,7 +200,7 @@ func (css *consensusStateStore) VirtualUTXOSetIterator(dbContext model.DBReader,
stagingShard := css.stagingShard(stagingArea)
cursor, err := dbContext.Cursor(utxoSetBucket)
cursor, err := dbContext.Cursor(css.utxoSetBucket)
if err != nil {
return nil, err
}

View File

@ -1,24 +1,21 @@
package consensusstatestore
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/pkg/errors"
)
var importingPruningPointUTXOSetKey = database.MakeBucket(nil).Key([]byte("importing-pruning-point-utxo-set"))
func (css *consensusStateStore) StartImportingPruningPointUTXOSet(dbContext model.DBWriter) error {
return dbContext.Put(importingPruningPointUTXOSetKey, []byte{0})
return dbContext.Put(css.importingPruningPointUTXOSetKey, []byte{0})
}
func (css *consensusStateStore) HadStartedImportingPruningPointUTXOSet(dbContext model.DBWriter) (bool, error) {
return dbContext.Has(importingPruningPointUTXOSetKey)
return dbContext.Has(css.importingPruningPointUTXOSetKey)
}
func (css *consensusStateStore) FinishImportingPruningPointUTXOSet(dbContext model.DBWriter) error {
return dbContext.Delete(importingPruningPointUTXOSetKey)
return dbContext.Delete(css.importingPruningPointUTXOSetKey)
}
func (css *consensusStateStore) ImportPruningPointUTXOSetIntoVirtualUTXOSet(dbContext model.DBWriter,
@ -37,7 +34,7 @@ func (css *consensusStateStore) ImportPruningPointUTXOSetIntoVirtualUTXOSet(dbCo
css.virtualUTXOSetCache.Clear()
// Delete all the old UTXOs from the database
deleteCursor, err := dbContext.Cursor(utxoSetBucket)
deleteCursor, err := dbContext.Cursor(css.utxoSetBucket)
if err != nil {
return err
}
@ -60,7 +57,7 @@ func (css *consensusStateStore) ImportPruningPointUTXOSetIntoVirtualUTXOSet(dbCo
return err
}
key, err := utxoKey(outpoint)
key, err := css.utxoKey(outpoint)
if err != nil {
return err
}

View File

@ -6,22 +6,27 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
)
var daaScoreBucket = database.MakeBucket([]byte("daa-score"))
var daaAddedBlocksBucket = database.MakeBucket([]byte("daa-added-blocks"))
var daaScoreBucketName = []byte("daa-score")
var daaAddedBlocksBucketName = []byte("daa-added-blocks")
// daaBlocksStore represents a store of DAABlocksStore
type daaBlocksStore struct {
daaScoreLRUCache *lrucache.LRUCache
daaAddedBlocksLRUCache *lrucache.LRUCache
daaScoreBucket model.DBBucket
daaAddedBlocksBucket model.DBBucket
}
// New instantiates a new DAABlocksStore
func New(daaScoreCacheSize int, daaAddedBlocksCacheSize int, preallocate bool) model.DAABlocksStore {
func New(prefix *prefix.Prefix, daaScoreCacheSize int, daaAddedBlocksCacheSize int, preallocate bool) model.DAABlocksStore {
return &daaBlocksStore{
daaScoreLRUCache: lrucache.New(daaScoreCacheSize, preallocate),
daaAddedBlocksLRUCache: lrucache.New(daaAddedBlocksCacheSize, preallocate),
daaScoreBucket: database.MakeBucket(prefix.Serialize()).Bucket(daaScoreBucketName),
daaAddedBlocksBucket: database.MakeBucket(prefix.Serialize()).Bucket(daaAddedBlocksBucketName),
}
}
@ -90,11 +95,11 @@ func (daas *daaBlocksStore) DAAAddedBlocks(dbContext model.DBReader, stagingArea
}
func (daas *daaBlocksStore) daaScoreHashAsKey(hash *externalapi.DomainHash) model.DBKey {
return daaScoreBucket.Key(hash.ByteSlice())
return daas.daaScoreBucket.Key(hash.ByteSlice())
}
func (daas *daaBlocksStore) daaAddedBlocksHashAsKey(hash *externalapi.DomainHash) model.DBKey {
return daaAddedBlocksBucket.Key(hash.ByteSlice())
return daas.daaAddedBlocksBucket.Key(hash.ByteSlice())
}
func (daas *daaBlocksStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) {

View File

@ -5,18 +5,21 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
)
var bucket = database.MakeBucket([]byte("finality-points"))
var bucketName = []byte("finality-points")
type finalityStore struct {
cache *lrucache.LRUCache
bucket model.DBBucket
}
// New instantiates a new FinalityStore
func New(cacheSize int, preallocate bool) model.FinalityStore {
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.FinalityStore {
return &finalityStore{
cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
}
}
@ -55,5 +58,5 @@ func (fs *finalityStore) IsStaged(stagingArea *model.StagingArea) bool {
}
func (fs *finalityStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash.ByteSlice())
return fs.bucket.Key(hash.ByteSlice())
}

View File

@ -7,19 +7,22 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
)
var bucket = database.MakeBucket([]byte("block-ghostdag-data"))
var bucketName = []byte("block-ghostdag-data")
// ghostdagDataStore represents a store of BlockGHOSTDAGData
type ghostdagDataStore struct {
cache *lrucache.LRUCache
bucket model.DBBucket
}
// New instantiates a new GHOSTDAGDataStore
func New(cacheSize int, preallocate bool) model.GHOSTDAGDataStore {
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.GHOSTDAGDataStore {
return &ghostdagDataStore{
cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
}
}
@ -60,7 +63,7 @@ func (gds *ghostdagDataStore) Get(dbContext model.DBReader, stagingArea *model.S
}
func (gds *ghostdagDataStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash.ByteSlice())
return gds.bucket.Key(hash.ByteSlice())
}
func (gds *ghostdagDataStore) serializeBlockGHOSTDAGData(blockGHOSTDAGData *model.BlockGHOSTDAGData) ([]byte, error) {

View File

@ -69,7 +69,7 @@ func (hscss *headersSelectedChainStagingShard) Commit(dbTx model.DBTransaction)
}
}
err := dbTx.Put(highestChainBlockIndexKey, hscss.store.serializeIndex(highestIndex))
err := dbTx.Put(hscss.store.highestChainBlockIndexKey, hscss.store.serializeIndex(highestIndex))
if err != nil {
return err
}

View File

@ -2,6 +2,7 @@ package headersselectedchainstore
import (
"encoding/binary"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
@ -12,21 +13,27 @@ import (
"github.com/pkg/errors"
)
var bucketChainBlockHashByIndex = database.MakeBucket([]byte("chain-block-hash-by-index"))
var bucketChainBlockIndexByHash = database.MakeBucket([]byte("chain-block-index-by-hash"))
var highestChainBlockIndexKey = database.MakeBucket(nil).Key([]byte("highest-chain-block-index"))
var bucketChainBlockHashByIndexName = []byte("chain-block-hash-by-index")
var bucketChainBlockIndexByHashName = []byte("chain-block-index-by-hash")
var highestChainBlockIndexKeyName = []byte("highest-chain-block-index")
type headersSelectedChainStore struct {
cacheByIndex *lrucacheuint64tohash.LRUCache
cacheByHash *lrucache.LRUCache
cacheHighestChainBlockIndex uint64
bucketChainBlockHashByIndex model.DBBucket
bucketChainBlockIndexByHash model.DBBucket
highestChainBlockIndexKey model.DBKey
}
// New instantiates a new HeadersSelectedChainStore
func New(cacheSize int, preallocate bool) model.HeadersSelectedChainStore {
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.HeadersSelectedChainStore {
return &headersSelectedChainStore{
cacheByIndex: lrucacheuint64tohash.New(cacheSize, preallocate),
cacheByHash: lrucache.New(cacheSize, preallocate),
bucketChainBlockHashByIndex: database.MakeBucket(prefix.Serialize()).Bucket(bucketChainBlockHashByIndexName),
bucketChainBlockIndexByHash: database.MakeBucket(prefix.Serialize()).Bucket(bucketChainBlockIndexByHashName),
highestChainBlockIndexKey: database.MakeBucket(prefix.Serialize()).Key(highestChainBlockIndexKeyName),
}
}
@ -138,13 +145,13 @@ func (hscs *headersSelectedChainStore) deserializeIndex(indexBytes []byte) (uint
}
func (hscs *headersSelectedChainStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucketChainBlockIndexByHash.Key(hash.ByteSlice())
return hscs.bucketChainBlockIndexByHash.Key(hash.ByteSlice())
}
func (hscs *headersSelectedChainStore) indexAsKey(index uint64) model.DBKey {
var keyBytes [8]byte
binary.BigEndian.PutUint64(keyBytes[:], index)
return bucketChainBlockHashByIndex.Key(keyBytes[:])
return hscs.bucketChainBlockHashByIndex.Key(keyBytes[:])
}
func (hscs *headersSelectedChainStore) highestChainBlockIndex(dbContext model.DBReader) (uint64, bool, error) {
@ -152,7 +159,7 @@ func (hscs *headersSelectedChainStore) highestChainBlockIndex(dbContext model.DB
return hscs.cacheHighestChainBlockIndex, true, nil
}
indexBytes, err := dbContext.Get(highestChainBlockIndexKey)
indexBytes, err := dbContext.Get(hscs.highestChainBlockIndexKey)
if err != nil {
if errors.Is(err, database.ErrNotFound) {
return 0, false, nil

View File

@ -28,7 +28,7 @@ func (hstss *headersSelectedTipStagingShard) Commit(dbTx model.DBTransaction) er
if err != nil {
return err
}
err = dbTx.Put(headerSelectedTipKey, selectedTipBytes)
err = dbTx.Put(hstss.store.key, selectedTipBytes)
if err != nil {
return err
}

View File

@ -6,17 +6,21 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
)
var headerSelectedTipKey = database.MakeBucket(nil).Key([]byte("headers-selected-tip"))
var keyName = []byte("headers-selected-tip")
type headerSelectedTipStore struct {
cache *externalapi.DomainHash
key model.DBKey
}
// New instantiates a new HeaderSelectedTipStore
func New() model.HeaderSelectedTipStore {
return &headerSelectedTipStore{}
func New(prefix *prefix.Prefix) model.HeaderSelectedTipStore {
return &headerSelectedTipStore{
key: database.MakeBucket(prefix.Serialize()).Key(keyName),
}
}
func (hsts *headerSelectedTipStore) Has(dbContext model.DBReader, stagingArea *model.StagingArea) (bool, error) {
@ -30,7 +34,7 @@ func (hsts *headerSelectedTipStore) Has(dbContext model.DBReader, stagingArea *m
return true, nil
}
return dbContext.Has(headerSelectedTipKey)
return dbContext.Has(hsts.key)
}
func (hsts *headerSelectedTipStore) Stage(stagingArea *model.StagingArea, selectedTip *externalapi.DomainHash) {
@ -55,7 +59,7 @@ func (hsts *headerSelectedTipStore) HeadersSelectedTip(dbContext model.DBReader,
return hsts.cache, nil
}
selectedTipBytes, err := dbContext.Get(headerSelectedTipKey)
selectedTipBytes, err := dbContext.Get(hsts.key)
if err != nil {
return nil, err
}

View File

@ -7,19 +7,22 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
)
var bucket = database.MakeBucket([]byte("multisets"))
var bucketName = []byte("multisets")
// multisetStore represents a store of Multisets
type multisetStore struct {
cache *lrucache.LRUCache
bucket model.DBBucket
}
// New instantiates a new MultisetStore
func New(cacheSize int, preallocate bool) model.MultisetStore {
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.MultisetStore {
return &multisetStore{
cache: lrucache.New(cacheSize, preallocate),
bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName),
}
}
@ -71,7 +74,7 @@ func (ms *multisetStore) Delete(stagingArea *model.StagingArea, blockHash *exter
}
func (ms *multisetStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash.ByteSlice())
return ms.bucket.Key(hash.ByteSlice())
}
func (ms *multisetStore) serializeMultiset(multiset model.Multiset) ([]byte, error) {

View File

@ -2,18 +2,17 @@ package pruningstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/pkg/errors"
)
var importedPruningPointUTXOsBucket = database.MakeBucket([]byte("imported-pruning-point-utxos"))
var importedPruningPointMultiset = database.MakeBucket(nil).Key([]byte("imported-pruning-point-multiset"))
var importedPruningPointUTXOsBucketName = []byte("imported-pruning-point-utxos")
var importedPruningPointMultisetKeyName = []byte("imported-pruning-point-multiset")
func (ps *pruningStore) ClearImportedPruningPointUTXOs(dbContext model.DBWriter) error {
cursor, err := dbContext.Cursor(importedPruningPointUTXOsBucket)
cursor, err := dbContext.Cursor(ps.importedPruningPointUTXOsBucket)
if err != nil {
return err
}
@ -54,7 +53,7 @@ func (ps *pruningStore) AppendImportedPruningPointUTXOs(dbTx model.DBTransaction
}
func (ps *pruningStore) ImportedPruningPointUTXOIterator(dbContext model.DBReader) (externalapi.ReadOnlyUTXOSetIterator, error) {
cursor, err := dbContext.Cursor(importedPruningPointUTXOsBucket)
cursor, err := dbContext.Cursor(ps.importedPruningPointUTXOsBucket)
if err != nil {
return nil, err
}
@ -130,7 +129,7 @@ func (ps *pruningStore) importedPruningPointUTXOKey(outpoint *externalapi.Domain
return nil, err
}
return importedPruningPointUTXOsBucket.Key(serializedOutpoint), nil
return ps.importedPruningPointUTXOsBucket.Key(serializedOutpoint), nil
}
func serializeOutpoint(outpoint *externalapi.DomainOutpoint) ([]byte, error) {
@ -161,11 +160,11 @@ func deserializeUTXOEntry(entryBytes []byte) (externalapi.UTXOEntry, error) {
}
func (ps *pruningStore) ClearImportedPruningPointMultiset(dbContext model.DBWriter) error {
return dbContext.Delete(importedPruningPointMultiset)
return dbContext.Delete(ps.importedPruningPointMultisetKey)
}
func (ps *pruningStore) ImportedPruningPointMultiset(dbContext model.DBReader) (model.Multiset, error) {
multisetBytes, err := dbContext.Get(importedPruningPointMultiset)
multisetBytes, err := dbContext.Get(ps.importedPruningPointMultisetKey)
if err != nil {
return nil, err
}
@ -177,7 +176,7 @@ func (ps *pruningStore) UpdateImportedPruningPointMultiset(dbTx model.DBTransact
if err != nil {
return err
}
return dbTx.Put(importedPruningPointMultiset, multisetBytes)
return dbTx.Put(ps.importedPruningPointMultisetKey, multisetBytes)
}
func (ps *pruningStore) serializeMultiset(multiset model.Multiset) ([]byte, error) {
@ -196,7 +195,7 @@ func (ps *pruningStore) deserializeMultiset(multisetBytes []byte) (model.Multise
func (ps *pruningStore) CommitImportedPruningPointUTXOSet(dbContext model.DBWriter) error {
// Delete all the old UTXOs from the database
deleteCursor, err := dbContext.Cursor(pruningPointUTXOSetBucket)
deleteCursor, err := dbContext.Cursor(ps.pruningPointUTXOSetBucket)
if err != nil {
return err
}
@ -213,7 +212,7 @@ func (ps *pruningStore) CommitImportedPruningPointUTXOSet(dbContext model.DBWrit
}
// Insert all the new UTXOs into the database
insertCursor, err := dbContext.Cursor(importedPruningPointUTXOsBucket)
insertCursor, err := dbContext.Cursor(ps.importedPruningPointUTXOsBucket)
if err != nil {
return err
}
@ -223,7 +222,7 @@ func (ps *pruningStore) CommitImportedPruningPointUTXOSet(dbContext model.DBWrit
if err != nil {
return err
}
pruningPointUTXOSetKey := pruningPointUTXOSetBucket.Key(importedPruningPointUTXOSetKey.Suffix())
pruningPointUTXOSetKey := ps.pruningPointUTXOSetBucket.Key(importedPruningPointUTXOSetKey.Suffix())
serializedUTXOEntry, err := insertCursor.Value()
if err != nil {

View File

@ -32,7 +32,7 @@ func (mss *pruningStagingShard) Commit(dbTx model.DBTransaction) error {
if err != nil {
return err
}
err = dbTx.Put(pruningBlockHashKey, pruningPointBytes)
err = dbTx.Put(mss.store.pruningBlockHashKey, pruningPointBytes)
if err != nil {
return err
}
@ -44,7 +44,7 @@ func (mss *pruningStagingShard) Commit(dbTx model.DBTransaction) error {
if err != nil {
return err
}
err = dbTx.Put(previousPruningBlockHashKey, oldPruningPointBytes)
err = dbTx.Put(mss.store.previousPruningBlockHashKey, oldPruningPointBytes)
if err != nil {
return err
}
@ -56,7 +56,7 @@ func (mss *pruningStagingShard) Commit(dbTx model.DBTransaction) error {
if err != nil {
return err
}
err = dbTx.Put(candidatePruningPointHashKey, candidateBytes)
err = dbTx.Put(mss.store.candidatePruningPointHashKey, candidateBytes)
if err != nil {
return err
}
@ -64,7 +64,7 @@ func (mss *pruningStagingShard) Commit(dbTx model.DBTransaction) error {
}
if mss.startUpdatingPruningPointUTXOSet {
err := dbTx.Put(updatingPruningPointUTXOSetKey, []byte{0})
err := dbTx.Put(mss.store.updatingPruningPointUTXOSetKey, []byte{0})
if err != nil {
return err
}

View File

@ -6,24 +6,41 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
)
var pruningBlockHashKey = database.MakeBucket(nil).Key([]byte("pruning-block-hash"))
var previousPruningBlockHashKey = database.MakeBucket(nil).Key([]byte("previous-pruning-block-hash"))
var candidatePruningPointHashKey = database.MakeBucket(nil).Key([]byte("candidate-pruning-point-hash"))
var pruningPointUTXOSetBucket = database.MakeBucket([]byte("pruning-point-utxo-set"))
var updatingPruningPointUTXOSetKey = database.MakeBucket(nil).Key([]byte("updating-pruning-point-utxo-set"))
var pruningBlockHashKeyName = []byte("pruning-block-hash")
var previousPruningBlockHashKeyName = []byte("previous-pruning-block-hash")
var candidatePruningPointHashKeyName = []byte("candidate-pruning-point-hash")
var pruningPointUTXOSetBucketName = []byte("pruning-point-utxo-set")
var updatingPruningPointUTXOSetKeyName = []byte("updating-pruning-point-utxo-set")
// pruningStore represents a store for the current pruning state
type pruningStore struct {
pruningPointCache *externalapi.DomainHash
oldPruningPointCache *externalapi.DomainHash
pruningPointCandidateCache *externalapi.DomainHash
pruningBlockHashKey model.DBKey
previousPruningBlockHashKey model.DBKey
candidatePruningPointHashKey model.DBKey
pruningPointUTXOSetBucket model.DBBucket
updatingPruningPointUTXOSetKey model.DBKey
importedPruningPointUTXOsBucket model.DBBucket
importedPruningPointMultisetKey model.DBKey
}
// New instantiates a new PruningStore
func New() model.PruningStore {
return &pruningStore{}
func New(prefix *prefix.Prefix) model.PruningStore {
return &pruningStore{
pruningBlockHashKey: database.MakeBucket(prefix.Serialize()).Key(pruningBlockHashKeyName),
previousPruningBlockHashKey: database.MakeBucket(prefix.Serialize()).Key(previousPruningBlockHashKeyName),
candidatePruningPointHashKey: database.MakeBucket(prefix.Serialize()).Key(candidatePruningPointHashKeyName),
pruningPointUTXOSetBucket: database.MakeBucket(prefix.Serialize()).Bucket(pruningPointUTXOSetBucketName),
importedPruningPointUTXOsBucket: database.MakeBucket(prefix.Serialize()).Bucket(importedPruningPointUTXOsBucketName),
updatingPruningPointUTXOSetKey: database.MakeBucket(prefix.Serialize()).Key(updatingPruningPointUTXOSetKeyName),
importedPruningPointMultisetKey: database.MakeBucket(prefix.Serialize()).Key(importedPruningPointMultisetKeyName),
}
}
func (ps *pruningStore) StagePruningPointCandidate(stagingArea *model.StagingArea, candidate *externalapi.DomainHash) {
@ -43,7 +60,7 @@ func (ps *pruningStore) PruningPointCandidate(dbContext model.DBReader, stagingA
return ps.pruningPointCandidateCache, nil
}
candidateBytes, err := dbContext.Get(pruningBlockHashKey)
candidateBytes, err := dbContext.Get(ps.pruningBlockHashKey)
if err != nil {
return nil, err
}
@ -67,7 +84,7 @@ func (ps *pruningStore) HasPruningPointCandidate(dbContext model.DBReader, stagi
return true, nil
}
return dbContext.Has(candidatePruningPointHashKey)
return dbContext.Has(ps.candidatePruningPointHashKey)
}
// Stage stages the pruning state
@ -98,7 +115,7 @@ func (ps *pruningStore) UpdatePruningPointUTXOSet(dbContext model.DBWriter, diff
if err != nil {
return err
}
err = dbContext.Delete(pruningPointUTXOSetBucket.Key(serializedOutpoint))
err = dbContext.Delete(ps.pruningPointUTXOSetBucket.Key(serializedOutpoint))
if err != nil {
return err
}
@ -119,7 +136,7 @@ func (ps *pruningStore) UpdatePruningPointUTXOSet(dbContext model.DBWriter, diff
if err != nil {
return err
}
err = dbContext.Put(pruningPointUTXOSetBucket.Key(serializedOutpoint), serializedUTXOEntry)
err = dbContext.Put(ps.pruningPointUTXOSetBucket.Key(serializedOutpoint), serializedUTXOEntry)
if err != nil {
return err
}
@ -139,7 +156,7 @@ func (ps *pruningStore) PruningPoint(dbContext model.DBReader, stagingArea *mode
return ps.pruningPointCache, nil
}
pruningPointBytes, err := dbContext.Get(pruningBlockHashKey)
pruningPointBytes, err := dbContext.Get(ps.pruningBlockHashKey)
if err != nil {
return nil, err
}
@ -163,7 +180,7 @@ func (ps *pruningStore) PreviousPruningPoint(dbContext model.DBReader, stagingAr
return ps.oldPruningPointCache, nil
}
oldPruningPointBytes, err := dbContext.Get(previousPruningBlockHashKey)
oldPruningPointBytes, err := dbContext.Get(ps.previousPruningBlockHashKey)
if err != nil {
return nil, err
}
@ -201,11 +218,11 @@ func (ps *pruningStore) HasPruningPoint(dbContext model.DBReader, stagingArea *m
return true, nil
}
return dbContext.Has(pruningBlockHashKey)
return dbContext.Has(ps.pruningBlockHashKey)
}
func (ps *pruningStore) PruningPointUTXOIterator(dbContext model.DBReader) (externalapi.ReadOnlyUTXOSetIterator, error) {
cursor, err := dbContext.Cursor(pruningPointUTXOSetBucket)
cursor, err := dbContext.Cursor(ps.pruningPointUTXOSetBucket)
if err != nil {
return nil, err
}
@ -215,7 +232,7 @@ func (ps *pruningStore) PruningPointUTXOIterator(dbContext model.DBReader) (exte
func (ps *pruningStore) PruningPointUTXOs(dbContext model.DBReader,
fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) {
cursor, err := dbContext.Cursor(pruningPointUTXOSetBucket)
cursor, err := dbContext.Cursor(ps.pruningPointUTXOSetBucket)
if err != nil {
return nil, err
}
@ -226,7 +243,7 @@ func (ps *pruningStore) PruningPointUTXOs(dbContext model.DBReader,
if err != nil {
return nil, err
}
seekKey := pruningPointUTXOSetBucket.Key(serializedFromOutpoint)
seekKey := ps.pruningPointUTXOSetBucket.Key(serializedFromOutpoint)
err = cursor.Seek(seekKey)
if err != nil {
return nil, err
@ -257,9 +274,9 @@ func (ps *pruningStore) StageStartUpdatingPruningPointUTXOSet(stagingArea *model
}
func (ps *pruningStore) HadStartedUpdatingPruningPointUTXOSet(dbContext model.DBWriter) (bool, error) {
return dbContext.Has(updatingPruningPointUTXOSetKey)
return dbContext.Has(ps.updatingPruningPointUTXOSetKey)
}
func (ps *pruningStore) FinishUpdatingPruningPointUTXOSet(dbContext model.DBWriter) error {
return dbContext.Delete(updatingPruningPointUTXOSetKey)
return dbContext.Delete(ps.updatingPruningPointUTXOSetKey)
}

View File

@ -27,7 +27,7 @@ func (rdss *reachabilityDataStagingShard) Commit(dbTx model.DBTransaction) error
if err != nil {
return err
}
err = dbTx.Put(reachabilityReindexRootKey, reachabilityReindexRootBytes)
err = dbTx.Put(rdss.store.reachabilityReindexRootKey, reachabilityReindexRootBytes)
if err != nil {
return err
}

View File

@ -7,21 +7,27 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
)
var reachabilityDataBucket = database.MakeBucket([]byte("reachability-data"))
var reachabilityReindexRootKey = database.MakeBucket(nil).Key([]byte("reachability-reindex-root"))
var reachabilityDataBucketName = []byte("reachability-data")
var reachabilityReindexRootKeyName = []byte("reachability-reindex-root")
// reachabilityDataStore represents a store of ReachabilityData
type reachabilityDataStore struct {
reachabilityDataCache *lrucache.LRUCache
reachabilityReindexRootCache *externalapi.DomainHash
reachabilityDataBucket model.DBBucket
reachabilityReindexRootKey model.DBKey
}
// New instantiates a new ReachabilityDataStore
func New(cacheSize int, preallocate bool) model.ReachabilityDataStore {
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.ReachabilityDataStore {
return &reachabilityDataStore{
reachabilityDataCache: lrucache.New(cacheSize, preallocate),
reachabilityDataBucket: database.MakeBucket(prefix.Serialize()).Bucket(reachabilityDataBucketName),
reachabilityReindexRootKey: database.MakeBucket(prefix.Serialize()).Key(reachabilityReindexRootKeyName),
}
}
@ -94,7 +100,7 @@ func (rds *reachabilityDataStore) ReachabilityReindexRoot(dbContext model.DBRead
return rds.reachabilityReindexRootCache, nil
}
reachabilityReindexRootBytes, err := dbContext.Get(reachabilityReindexRootKey)
reachabilityReindexRootBytes, err := dbContext.Get(rds.reachabilityReindexRootKey)
if err != nil {
return nil, err
}
@ -108,7 +114,7 @@ func (rds *reachabilityDataStore) ReachabilityReindexRoot(dbContext model.DBRead
}
func (rds *reachabilityDataStore) reachabilityDataBlockHashAsKey(hash *externalapi.DomainHash) model.DBKey {
return reachabilityDataBucket.Key(hash.ByteSlice())
return rds.reachabilityDataBucket.Key(hash.ByteSlice())
}
func (rds *reachabilityDataStore) serializeReachabilityData(reachabilityData model.ReachabilityData) ([]byte, error) {

View File

@ -7,23 +7,28 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
"github.com/pkg/errors"
)
var utxoDiffBucket = database.MakeBucket([]byte("utxo-diffs"))
var utxoDiffChildBucket = database.MakeBucket([]byte("utxo-diff-children"))
var utxoDiffBucketName = []byte("utxo-diffs")
var utxoDiffChildBucketName = []byte("utxo-diff-children")
// utxoDiffStore represents a store of UTXODiffs
type utxoDiffStore struct {
utxoDiffCache *lrucache.LRUCache
utxoDiffChildCache *lrucache.LRUCache
utxoDiffBucket model.DBBucket
utxoDiffChildBucket model.DBBucket
}
// New instantiates a new UTXODiffStore
func New(cacheSize int, preallocate bool) model.UTXODiffStore {
func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.UTXODiffStore {
return &utxoDiffStore{
utxoDiffCache: lrucache.New(cacheSize, preallocate),
utxoDiffChildCache: lrucache.New(cacheSize, preallocate),
utxoDiffBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoDiffBucketName),
utxoDiffChildBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoDiffChildBucketName),
}
}
@ -134,11 +139,11 @@ func (uds *utxoDiffStore) Delete(stagingArea *model.StagingArea, blockHash *exte
}
func (uds *utxoDiffStore) utxoDiffHashAsKey(hash *externalapi.DomainHash) model.DBKey {
return utxoDiffBucket.Key(hash.ByteSlice())
return uds.utxoDiffBucket.Key(hash.ByteSlice())
}
func (uds *utxoDiffStore) utxoDiffChildHashAsKey(hash *externalapi.DomainHash) model.DBKey {
return utxoDiffChildBucket.Key(hash.ByteSlice())
return uds.utxoDiffChildBucket.Key(hash.ByteSlice())
}
func (uds *utxoDiffStore) serializeUTXODiff(utxoDiff externalapi.UTXODiff) ([]byte, error) {

View File

@ -2,6 +2,7 @@ package consensus
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
"github.com/pkg/errors"
"io/ioutil"
"os"
@ -64,7 +65,7 @@ type Config struct {
// Factory instantiates new Consensuses
type Factory interface {
NewConsensus(config *Config, db infrastructuredatabase.Database) (
NewConsensus(config *Config, db infrastructuredatabase.Database, dbPrefix *prefix.Prefix) (
externalapi.Consensus, error)
NewTestConsensus(config *Config, testName string) (
tc testapi.TestConsensus, teardown func(keepDataDir bool), err error)
@ -96,7 +97,7 @@ func NewFactory() Factory {
}
// NewConsensus instantiates a new Consensus
func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Database) (
func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Database, dbPrefix *prefix.Prefix) (
externalapi.Consensus, error) {
dbManager := consensusdatabase.New(db)
@ -115,23 +116,23 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
pruningWindowSizePlusFinalityDepthForCache := int(config.PruningDepth() + config.FinalityDepth())
// Data Structures
acceptanceDataStore := acceptancedatastore.New(200, preallocateCaches)
blockStore, err := blockstore.New(dbManager, 200, preallocateCaches)
acceptanceDataStore := acceptancedatastore.New(dbPrefix, 200, preallocateCaches)
blockStore, err := blockstore.New(dbManager, dbPrefix, 200, preallocateCaches)
if err != nil {
return nil, err
}
blockHeaderStore, err := blockheaderstore.New(dbManager, 10_000, preallocateCaches)
blockHeaderStore, err := blockheaderstore.New(dbManager, dbPrefix, 10_000, preallocateCaches)
if err != nil {
return nil, err
}
blockRelationStore := blockrelationstore.New(pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
blockRelationStore := blockrelationstore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
blockStatusStore := blockstatusstore.New(pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
multisetStore := multisetstore.New(200, preallocateCaches)
pruningStore := pruningstore.New()
reachabilityDataStore := reachabilitydatastore.New(pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
utxoDiffStore := utxodiffstore.New(200, preallocateCaches)
consensusStateStore := consensusstatestore.New(10_000, preallocateCaches)
blockStatusStore := blockstatusstore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
multisetStore := multisetstore.New(dbPrefix, 200, preallocateCaches)
pruningStore := pruningstore.New(dbPrefix)
reachabilityDataStore := reachabilitydatastore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
utxoDiffStore := utxodiffstore.New(dbPrefix, 200, preallocateCaches)
consensusStateStore := consensusstatestore.New(dbPrefix, 10_000, preallocateCaches)
// Some tests artificially decrease the pruningWindowSize, thus making the GhostDagStore cache too small for a
// a single DifficultyAdjustmentWindow. To alleviate this problem we make sure that the cache size is at least
@ -140,12 +141,12 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize {
ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize
}
ghostdagDataStore := ghostdagdatastore.New(ghostdagDataCacheSize, preallocateCaches)
ghostdagDataStore := ghostdagdatastore.New(dbPrefix, ghostdagDataCacheSize, preallocateCaches)
headersSelectedTipStore := headersselectedtipstore.New()
finalityStore := finalitystore.New(200, preallocateCaches)
headersSelectedChainStore := headersselectedchainstore.New(pruningWindowSizeForCaches, preallocateCaches)
daaBlocksStore := daablocksstore.New(pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches)
headersSelectedTipStore := headersselectedtipstore.New(dbPrefix)
finalityStore := finalitystore.New(dbPrefix, 200, preallocateCaches)
headersSelectedChainStore := headersselectedchainstore.New(dbPrefix, pruningWindowSizeForCaches, preallocateCaches)
daaBlocksStore := daablocksstore.New(dbPrefix, pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches)
// Processes
reachabilityManager := reachabilitymanager.New(
@ -469,7 +470,9 @@ func (f *factory) NewTestConsensus(config *Config, testName string) (
if err != nil {
return nil, nil, err
}
consensusAsInterface, err := f.NewConsensus(config, db)
testConsensusDBPrefix := &prefix.Prefix{}
consensusAsInterface, err := f.NewConsensus(config, db, testConsensusDBPrefix)
if err != nil {
return nil, nil, err
}

View File

@ -1,6 +1,7 @@
package consensus
import (
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
"io/ioutil"
"testing"
@ -23,7 +24,7 @@ func TestNewConsensus(t *testing.T) {
t.Fatalf("error in NewLevelDB: %s", err)
}
_, err = f.NewConsensus(config, db)
_, err = f.NewConsensus(config, db, &prefix.Prefix{})
if err != nil {
t.Fatalf("error in NewConsensus: %+v", err)
}

View File

@ -4,32 +4,180 @@ import (
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/miningmanager"
"github.com/kaspanet/kaspad/domain/prefixmanager"
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors"
"sync"
"sync/atomic"
"unsafe"
)
// Domain provides a reference to the domain's external aps
type Domain interface {
MiningManager() miningmanager.MiningManager
Consensus() externalapi.Consensus
StagingConsensus() externalapi.Consensus
InitStagingConsensus() error
CommitStagingConsensus() error
DeleteStagingConsensus() error
}
type domain struct {
miningManager miningmanager.MiningManager
consensus externalapi.Consensus
consensus *externalapi.Consensus
stagingConsensus *externalapi.Consensus
stagingConsensusLock sync.RWMutex
consensusConfig *consensus.Config
db infrastructuredatabase.Database
}
func (d domain) Consensus() externalapi.Consensus {
return d.consensus
func (d *domain) Consensus() externalapi.Consensus {
return *d.consensus
}
func (d domain) MiningManager() miningmanager.MiningManager {
func (d *domain) StagingConsensus() externalapi.Consensus {
d.stagingConsensusLock.RLock()
defer d.stagingConsensusLock.RUnlock()
return *d.stagingConsensus
}
func (d *domain) MiningManager() miningmanager.MiningManager {
return d.miningManager
}
func (d *domain) InitStagingConsensus() error {
d.stagingConsensusLock.Lock()
defer d.stagingConsensusLock.Unlock()
_, hasInactivePrefix, err := prefixmanager.InactivePrefix(d.db)
if err != nil {
return err
}
if hasInactivePrefix {
return errors.Errorf("cannot create staging consensus when a staging consensus already exists")
}
activePrefix, exists, err := prefixmanager.ActivePrefix(d.db)
if err != nil {
return err
}
if !exists {
return errors.Errorf("cannot create a staging consensus when there's " +
"no active consensus")
}
inactivePrefix := activePrefix.Flip()
err = prefixmanager.SetPrefixAsInactive(d.db, inactivePrefix)
if err != nil {
return err
}
consensusFactory := consensus.NewFactory()
consensusInstance, err := consensusFactory.NewConsensus(d.consensusConfig, d.db, inactivePrefix)
if err != nil {
return err
}
d.stagingConsensus = &consensusInstance
return nil
}
func (d *domain) CommitStagingConsensus() error {
d.stagingConsensusLock.Lock()
defer d.stagingConsensusLock.Unlock()
dbTx, err := d.db.Begin()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
inactivePrefix, hasInactivePrefix, err := prefixmanager.InactivePrefix(d.db)
if err != nil {
return err
}
if !hasInactivePrefix {
return errors.Errorf("there's no inactive prefix to commit")
}
activePrefix, exists, err := prefixmanager.ActivePrefix(dbTx)
if err != nil {
return err
}
if !exists {
return errors.Errorf("cannot commit a staging consensus when there's " +
"no active consensus")
}
err = prefixmanager.SetPrefixAsActive(dbTx, inactivePrefix)
if err != nil {
return err
}
err = prefixmanager.SetPrefixAsInactive(dbTx, activePrefix)
if err != nil {
return err
}
err = dbTx.Commit()
if err != nil {
return err
}
// We delete anything associated with the old prefix outside
// of the transaction in order to save memory.
err = prefixmanager.DeleteInactivePrefix(d.db)
if err != nil {
return err
}
tempConsensusPointer := unsafe.Pointer(d.stagingConsensus)
consensusPointer := (*unsafe.Pointer)(unsafe.Pointer(&d.consensus))
atomic.StorePointer(consensusPointer, tempConsensusPointer)
d.stagingConsensus = nil
return nil
}
func (d *domain) DeleteStagingConsensus() error {
d.stagingConsensusLock.Lock()
defer d.stagingConsensusLock.Unlock()
err := prefixmanager.DeleteInactivePrefix(d.db)
if err != nil {
return err
}
d.stagingConsensus = nil
return nil
}
// New instantiates a new instance of a Domain object
func New(consensusConfig *consensus.Config, db infrastructuredatabase.Database) (Domain, error) {
err := prefixmanager.DeleteInactivePrefix(db)
if err != nil {
return nil, err
}
activePrefix, exists, err := prefixmanager.ActivePrefix(db)
if err != nil {
return nil, err
}
if !exists {
activePrefix = &prefix.Prefix{}
err = prefixmanager.SetPrefixAsActive(db, activePrefix)
if err != nil {
return nil, err
}
}
consensusFactory := consensus.NewFactory()
consensusInstance, err := consensusFactory.NewConsensus(consensusConfig, db)
consensusInstance, err := consensusFactory.NewConsensus(consensusConfig, db, activePrefix)
if err != nil {
return nil, err
}
@ -38,7 +186,9 @@ func New(consensusConfig *consensus.Config, db infrastructuredatabase.Database)
miningManager := miningManagerFactory.NewMiningManager(consensusInstance, &consensusConfig.Params)
return &domain{
consensus: consensusInstance,
consensus: &consensusInstance,
miningManager: miningManager,
consensusConfig: consensusConfig,
db: db,
}, nil
}

132
domain/domain_test.go Normal file
View File

@ -0,0 +1,132 @@
package domain_test
import (
"fmt"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
"io/ioutil"
"os"
"strings"
"testing"
)
func TestCreateStagingConsensus(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
dataDir, err := ioutil.TempDir("", fmt.Sprintf("TestCreateStagingConsensus-%s", consensusConfig.Name))
if err != nil {
t.Fatalf("ioutil.TempDir: %+v", err)
}
defer os.RemoveAll(dataDir)
db, err := ldb.NewLevelDB(dataDir, 8)
if err != nil {
t.Fatalf("NewLevelDB: %+v", err)
}
domainInstance, err := domain.New(consensusConfig, db)
if err != nil {
t.Fatalf("New: %+v", err)
}
err = domainInstance.InitStagingConsensus()
if err != nil {
t.Fatalf("InitStagingConsensus: %+v", err)
}
err = domainInstance.InitStagingConsensus()
if !strings.Contains(err.Error(), "cannot create staging consensus when a staging consensus already exists") {
t.Fatalf("unexpected error: %+v", err)
}
coinbaseData := &externalapi.DomainCoinbaseData{
ScriptPublicKey: &externalapi.ScriptPublicKey{},
ExtraData: []byte{},
}
block, err := domainInstance.StagingConsensus().BuildBlock(coinbaseData, nil)
if err != nil {
t.Fatalf("BuildBlock: %+v", err)
}
_, err = domainInstance.StagingConsensus().ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
blockHash := consensushashing.BlockHash(block)
blockInfo, err := domainInstance.StagingConsensus().GetBlockInfo(blockHash)
if err != nil {
t.Fatalf("GetBlockInfo: %+v", err)
}
if !blockInfo.Exists {
t.Fatalf("block not found on staging consensus")
}
blockInfo, err = domainInstance.Consensus().GetBlockInfo(blockHash)
if err != nil {
t.Fatalf("GetBlockInfo: %+v", err)
}
if blockInfo.Exists {
t.Fatalf("a block from staging consensus was found on consensus")
}
err = domainInstance.CommitStagingConsensus()
if err != nil {
t.Fatalf("CommitStagingConsensus: %+v", err)
}
blockInfo, err = domainInstance.Consensus().GetBlockInfo(blockHash)
if err != nil {
t.Fatalf("GetBlockInfo: %+v", err)
}
if !blockInfo.Exists {
t.Fatalf("a block from staging consensus was not found on consensus after commit")
}
// Now we create a new staging consensus and check that it's deleted once we init a new domain. We also
// validate that the main consensus persisted the data from the committed temp consensus.
err = domainInstance.InitStagingConsensus()
if err != nil {
t.Fatalf("InitStagingConsensus: %+v", err)
}
_, err = domainInstance.StagingConsensus().ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
domainInstance2, err := domain.New(consensusConfig, db)
if err != nil {
t.Fatalf("New: %+v", err)
}
blockInfo, err = domainInstance2.Consensus().GetBlockInfo(blockHash)
if err != nil {
t.Fatalf("GetBlockInfo: %+v", err)
}
if !blockInfo.Exists {
t.Fatalf("a block from committed staging consensus was not persisted to the active consensus")
}
err = domainInstance2.InitStagingConsensus()
if err != nil {
t.Fatalf("InitStagingConsensus: %+v", err)
}
blockInfo, err = domainInstance2.StagingConsensus().GetBlockInfo(blockHash)
if err != nil {
t.Fatalf("GetBlockInfo: %+v", err)
}
if blockInfo.Exists {
t.Fatalf("block from previous temp consensus shouldn't be found on a fresh temp consensus")
}
})
}

View File

@ -0,0 +1,9 @@
package prefixmanager
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("PRFX")
var spawn = panics.GoroutineWrapperFunc(log)

View File

@ -0,0 +1,106 @@
package prefixmanager
import (
"github.com/kaspanet/kaspad/domain/prefixmanager/prefix"
"github.com/kaspanet/kaspad/infrastructure/db/database"
)
var activePrefixKey = database.MakeBucket(nil).Key([]byte("active-prefix"))
var inactivePrefixKey = database.MakeBucket(nil).Key([]byte("inactive-prefix"))
// ActivePrefix returns the current active database prefix, and whether it exists
func ActivePrefix(dataAccessor database.DataAccessor) (*prefix.Prefix, bool, error) {
prefixBytes, err := dataAccessor.Get(activePrefixKey)
if database.IsNotFoundError(err) {
return nil, false, nil
}
if err != nil {
return nil, false, err
}
prefix, err := prefix.Deserialize(prefixBytes)
if err != nil {
return nil, false, err
}
return prefix, true, nil
}
// InactivePrefix returns the current inactive database prefix, and whether it exists
func InactivePrefix(dataAccessor database.DataAccessor) (*prefix.Prefix, bool, error) {
prefixBytes, err := dataAccessor.Get(inactivePrefixKey)
if database.IsNotFoundError(err) {
return nil, false, nil
}
if err != nil {
return nil, false, err
}
prefix, err := prefix.Deserialize(prefixBytes)
if err != nil {
return nil, false, err
}
return prefix, true, nil
}
// DeleteInactivePrefix deletes all data associated with the inactive database prefix, including itself.
func DeleteInactivePrefix(dataAccessor database.DataAccessor) error {
prefixBytes, err := dataAccessor.Get(inactivePrefixKey)
if database.IsNotFoundError(err) {
return nil
}
if err != nil {
return err
}
prefix, err := prefix.Deserialize(prefixBytes)
if err != nil {
return err
}
err = deletePrefix(dataAccessor, prefix)
if err != nil {
return err
}
return dataAccessor.Delete(inactivePrefixKey)
}
func deletePrefix(dataAccessor database.DataAccessor, prefix *prefix.Prefix) error {
log.Infof("Deleting database prefix %x", prefix)
prefixBucket := database.MakeBucket(prefix.Serialize())
cursor, err := dataAccessor.Cursor(prefixBucket)
if err != nil {
return err
}
defer cursor.Close()
for ok := cursor.First(); ok; ok = cursor.Next() {
key, err := cursor.Key()
if err != nil {
return err
}
err = dataAccessor.Delete(key)
if err != nil {
return err
}
}
return nil
}
// SetPrefixAsActive sets the given prefix as the active prefix
func SetPrefixAsActive(dataAccessor database.DataAccessor, prefix *prefix.Prefix) error {
return dataAccessor.Put(activePrefixKey, prefix.Serialize())
}
// SetPrefixAsInactive sets the given prefix as the inactive prefix
func SetPrefixAsInactive(dataAccessor database.DataAccessor, prefix *prefix.Prefix) error {
return dataAccessor.Put(inactivePrefixKey, prefix.Serialize())
}

View File

@ -0,0 +1,46 @@
package prefix
import "github.com/pkg/errors"
const (
prefixZero byte = 0
prefixOne byte = 1
)
// Prefix is a database prefix that is used to manage more than one database at once.
type Prefix struct {
value byte
}
// Serialize serializes the prefix into a byte slice
func (p *Prefix) Serialize() []byte {
return []byte{p.value}
}
// Equal returns whether p equals to other
func (p *Prefix) Equal(other *Prefix) bool {
return p.value == other.value
}
// Flip returns the opposite of the current prefix
func (p *Prefix) Flip() *Prefix {
value := prefixZero
if p.value == prefixZero {
value = prefixOne
}
return &Prefix{value: value}
}
// Deserialize deserializes a prefix from a byte slice
func Deserialize(prefixBytes []byte) (*Prefix, error) {
if len(prefixBytes) > 1 {
return nil, errors.Errorf("invalid length %d for prefix", len(prefixBytes))
}
if prefixBytes[0] != prefixZero && prefixBytes[0] != prefixOne {
return nil, errors.Errorf("invalid prefix %x", prefixBytes)
}
return &Prefix{value: prefixBytes[0]}, nil
}