[NOD-1492] Implement GHOSTDAGDataStore, MultisetStore, PruningStore, ReachabilityDataStore, and UTXODiffStore (#977)

* [NOD-1492] Implement GHOSTDAGDataStore.

* [NOD-1492] Implement MultisetStore.

* [NOD-1492] Implement PruningStore.

* [NOD-1492] Implement ReachabilityDataStore.

* [NOD-1492] Implement UTXODiffStore.

* [NOD-1492] Pluralize the multiset bucket name.

* [NOD-1492] In PruningPoint and PruningPointSerializedUTXOSet, don't use IsStaged.

* [NOD-1492] Leave pruning point serialization/deserialization for future implementation.

* [NOD-1492] Leave reachability reindex root serialization/deserialization for future implementation.

* [NOD-1492] Leave utxo diff child serialization/deserialization for future implementation.

* [NOD-1492] Add Serialize() to Multiset.

* [NOD-1492] Also check serializedUTXOSetStaging in IsStaged.

* [NOD-1492] Also check utxoDiffChildStaging in IsStaged.

* [NOD-1492] Fix UTXODiffStore.Delete.
This commit is contained in:
stasatdaglabs 2020-10-28 17:13:14 +02:00 committed by GitHub
parent 7402f3fb0e
commit c88266afed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 321 additions and 41 deletions

View File

@ -3,35 +3,70 @@ package ghostdagdatastore
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var bucket = dbkeys.MakeBucket([]byte("block-ghostdag-data"))
// ghostdagDataStore represents a store of BlockGHOSTDAGData
type ghostdagDataStore struct {
staging map[externalapi.DomainHash]*model.BlockGHOSTDAGData
}
// New instantiates a new GHOSTDAGDataStore
func New() model.GHOSTDAGDataStore {
return &ghostdagDataStore{}
return &ghostdagDataStore{
staging: make(map[externalapi.DomainHash]*model.BlockGHOSTDAGData),
}
}
// Stage stages the given blockGHOSTDAGData for the given blockHash
func (gds *ghostdagDataStore) Stage(blockHash *externalapi.DomainHash, blockGHOSTDAGData *model.BlockGHOSTDAGData) {
panic("implement me")
gds.staging[*blockHash] = blockGHOSTDAGData
}
func (gds *ghostdagDataStore) IsStaged() bool {
panic("implement me")
return len(gds.staging) != 0
}
func (gds *ghostdagDataStore) Discard() {
panic("implement me")
gds.staging = make(map[externalapi.DomainHash]*model.BlockGHOSTDAGData)
}
func (gds *ghostdagDataStore) Commit(dbTx model.DBTransaction) error {
panic("implement me")
for hash, blockGHOSTDAGData := range gds.staging {
err := dbTx.Put(gds.hashAsKey(&hash), gds.serializeBlockGHOSTDAGData(blockGHOSTDAGData))
if err != nil {
return err
}
}
gds.Discard()
return nil
}
// Get gets the blockGHOSTDAGData associated with the given blockHash
func (gds *ghostdagDataStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*model.BlockGHOSTDAGData, error) {
return nil, nil
if blockGHOSTDAGData, ok := gds.staging[*blockHash]; ok {
return blockGHOSTDAGData, nil
}
blockGHOSTDAGDataBytes, err := dbContext.Get(gds.hashAsKey(blockHash))
if err != nil {
return nil, err
}
return gds.deserializeBlockGHOSTDAGData(blockGHOSTDAGDataBytes)
}
func (gds *ghostdagDataStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash[:])
}
func (gds *ghostdagDataStore) serializeBlockGHOSTDAGData(blockGHOSTDAGData *model.BlockGHOSTDAGData) []byte {
panic("implement me")
}
func (gds *ghostdagDataStore) deserializeBlockGHOSTDAGData(blockGHOSTDAGDataBytes []byte) (*model.BlockGHOSTDAGData, error) {
panic("implement me")
}

View File

@ -3,40 +3,79 @@ package multisetstore
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var bucket = dbkeys.MakeBucket([]byte("multisets"))
// multisetStore represents a store of Multisets
type multisetStore struct {
staging map[externalapi.DomainHash]model.Multiset
}
// New instantiates a new MultisetStore
func New() model.MultisetStore {
return &multisetStore{}
return &multisetStore{
staging: make(map[externalapi.DomainHash]model.Multiset),
}
}
// Stage stages the given multiset for the given blockHash
func (ms *multisetStore) Stage(blockHash *externalapi.DomainHash, multiset model.Multiset) {
panic("implement me")
ms.staging[*blockHash] = multiset
}
func (ms *multisetStore) IsStaged() bool {
panic("implement me")
return len(ms.staging) != 0
}
func (ms *multisetStore) Discard() {
panic("implement me")
ms.staging = make(map[externalapi.DomainHash]model.Multiset)
}
func (ms *multisetStore) Commit(dbTx model.DBTransaction) error {
panic("implement me")
for hash, multiset := range ms.staging {
err := dbTx.Put(ms.hashAsKey(&hash), ms.serializeMultiset(multiset))
if err != nil {
return err
}
}
ms.Discard()
return nil
}
// Get gets the multiset associated with the given blockHash
func (ms *multisetStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (model.Multiset, error) {
return nil, nil
if multiset, ok := ms.staging[*blockHash]; ok {
return multiset, nil
}
multisetBytes, err := dbContext.Get(ms.hashAsKey(blockHash))
if err != nil {
return nil, err
}
return ms.deserializeMultiset(multisetBytes)
}
// Delete deletes the multiset associated with the given blockHash
func (ms *multisetStore) Delete(dbTx model.DBTransaction, blockHash *externalapi.DomainHash) error {
return nil
if _, ok := ms.staging[*blockHash]; ok {
delete(ms.staging, *blockHash)
return nil
}
return dbTx.Delete(ms.hashAsKey(blockHash))
}
func (ms *multisetStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucket.Key(hash[:])
}
func (ms *multisetStore) serializeMultiset(multiset model.Multiset) []byte {
panic("implement me")
}
func (ms *multisetStore) deserializeMultiset(multisetBytes []byte) (model.Multiset, error) {
panic("implement me")
}

View File

@ -3,40 +3,88 @@ package pruningstore
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var pruningBlockHashKey = dbkeys.MakeBucket().Key([]byte("pruning-block-hash"))
var pruningSerializedUTXOSetkey = dbkeys.MakeBucket().Key([]byte("pruning-utxo-set"))
// pruningStore represents a store for the current pruning state
type pruningStore struct {
blockHashStaging *externalapi.DomainHash
serializedUTXOSetStaging []byte
}
// New instantiates a new PruningStore
func New() model.PruningStore {
return &pruningStore{}
return &pruningStore{
blockHashStaging: nil,
serializedUTXOSetStaging: nil,
}
}
// Stage stages the pruning state
func (pps *pruningStore) Stage(pruningPointBlockHash *externalapi.DomainHash, pruningPointUTXOSet model.ReadOnlyUTXOSet) {
panic("implement me")
func (ps *pruningStore) Stage(pruningPointBlockHash *externalapi.DomainHash, pruningPointUTXOSet model.ReadOnlyUTXOSet) {
ps.blockHashStaging = pruningPointBlockHash
ps.serializedUTXOSetStaging = ps.serializeUTXOSet(pruningPointUTXOSet)
}
func (pps *pruningStore) IsStaged() bool {
panic("implement me")
func (ps *pruningStore) IsStaged() bool {
return ps.blockHashStaging != nil || ps.serializedUTXOSetStaging != nil
}
func (pps *pruningStore) Discard() {
panic("implement me")
func (ps *pruningStore) Discard() {
ps.blockHashStaging = nil
ps.serializedUTXOSetStaging = nil
}
func (pps *pruningStore) Commit(dbTx model.DBTransaction) error {
panic("implement me")
func (ps *pruningStore) Commit(dbTx model.DBTransaction) error {
err := dbTx.Put(pruningBlockHashKey, ps.serializePruningPoint(ps.blockHashStaging))
if err != nil {
return err
}
err = dbTx.Put(pruningSerializedUTXOSetkey, ps.serializedUTXOSetStaging)
if err != nil {
return err
}
ps.Discard()
return nil
}
// PruningPoint gets the current pruning point
func (pps *pruningStore) PruningPoint(dbContext model.DBReader) (*externalapi.DomainHash, error) {
return nil, nil
func (ps *pruningStore) PruningPoint(dbContext model.DBReader) (*externalapi.DomainHash, error) {
if ps.blockHashStaging != nil {
return ps.blockHashStaging, nil
}
blockHashBytes, err := dbContext.Get(pruningBlockHashKey)
if err != nil {
return nil, err
}
blockHash, err := ps.deserializePruningPoint(blockHashBytes)
if err != nil {
return nil, err
}
return blockHash, nil
}
// PruningPointSerializedUTXOSet returns the serialized UTXO set of the current pruning point
func (pps *pruningStore) PruningPointSerializedUTXOSet(dbContext model.DBReader) ([]byte, error) {
return nil, nil
func (ps *pruningStore) PruningPointSerializedUTXOSet(dbContext model.DBReader) ([]byte, error) {
if ps.serializedUTXOSetStaging != nil {
return ps.serializedUTXOSetStaging, nil
}
return dbContext.Get(pruningSerializedUTXOSetkey)
}
func (ps *pruningStore) serializePruningPoint(pruningPoint *externalapi.DomainHash) []byte {
panic("implement me")
}
func (ps *pruningStore) deserializePruningPoint(pruningPointBytes []byte) (*externalapi.DomainHash, error) {
panic("implement me")
}
func (ps *pruningStore) serializeUTXOSet(utxoSet model.ReadOnlyUTXOSet) []byte {
panic("implement me")
}

View File

@ -3,47 +3,112 @@ package reachabilitydatastore
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var reachabilityDataBucket = dbkeys.MakeBucket([]byte("reachability-data"))
var reachabilityReindexRootKey = dbkeys.MakeBucket().Key([]byte("reachability-reindex-root"))
// reachabilityDataStore represents a store of ReachabilityData
type reachabilityDataStore struct {
reachabilityDataStaging map[externalapi.DomainHash]*model.ReachabilityData
reachabilityReindexRootStaging *externalapi.DomainHash
}
// New instantiates a new ReachabilityDataStore
func New() model.ReachabilityDataStore {
return &reachabilityDataStore{}
return &reachabilityDataStore{
reachabilityDataStaging: make(map[externalapi.DomainHash]*model.ReachabilityData),
reachabilityReindexRootStaging: nil,
}
}
// StageReachabilityData stages the given reachabilityData for the given blockHash
func (rds *reachabilityDataStore) StageReachabilityData(blockHash *externalapi.DomainHash, reachabilityData *model.ReachabilityData) {
panic("implement me")
rds.reachabilityDataStaging[*blockHash] = reachabilityData
}
// StageReachabilityReindexRoot stages the given reachabilityReindexRoot
func (rds *reachabilityDataStore) StageReachabilityReindexRoot(reachabilityReindexRoot *externalapi.DomainHash) {
panic("implement me")
rds.reachabilityReindexRootStaging = reachabilityReindexRoot
}
func (rds *reachabilityDataStore) IsAnythingStaged() bool {
panic("implement me")
return len(rds.reachabilityDataStaging) != 0 || rds.reachabilityReindexRootStaging != nil
}
func (rds *reachabilityDataStore) Discard() {
panic("implement me")
rds.reachabilityDataStaging = make(map[externalapi.DomainHash]*model.ReachabilityData)
rds.reachabilityReindexRootStaging = nil
}
func (rds *reachabilityDataStore) Commit(dbTx model.DBTransaction) error {
panic("implement me")
if rds.reachabilityReindexRootStaging != nil {
err := dbTx.Put(reachabilityReindexRootKey, rds.serializeReachabilityReindexRoot(rds.reachabilityReindexRootStaging))
if err != nil {
return err
}
}
for hash, reachabilityData := range rds.reachabilityDataStaging {
err := dbTx.Put(rds.reachabilityDataBlockHashAsKey(&hash), rds.serializeReachabilityData(reachabilityData))
if err != nil {
return err
}
}
rds.Discard()
return nil
}
// ReachabilityData returns the reachabilityData associated with the given blockHash
func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader,
blockHash *externalapi.DomainHash) (*model.ReachabilityData, error) {
panic("implement me")
if reachabilityData, ok := rds.reachabilityDataStaging[*blockHash]; ok {
return reachabilityData, nil
}
reachabilityDataBytes, err := dbContext.Get(rds.reachabilityDataBlockHashAsKey(blockHash))
if err != nil {
return nil, err
}
return rds.deserializeReachabilityData(reachabilityDataBytes)
}
// ReachabilityReindexRoot returns the current reachability reindex root
func (rds *reachabilityDataStore) ReachabilityReindexRoot(dbContext model.DBReader) (*externalapi.DomainHash, error) {
if rds.reachabilityReindexRootStaging != nil {
return rds.reachabilityReindexRootStaging, nil
}
reachabilityReindexRootBytes, err := dbContext.Get(reachabilityReindexRootKey)
if err != nil {
return nil, err
}
reachabilityReindexRoot, err := rds.deserializeReachabilityReindexRoot(reachabilityReindexRootBytes)
if err != nil {
return nil, err
}
return reachabilityReindexRoot, nil
}
func (rds *reachabilityDataStore) reachabilityDataBlockHashAsKey(hash *externalapi.DomainHash) model.DBKey {
return reachabilityDataBucket.Key(hash[:])
}
func (rds *reachabilityDataStore) serializeReachabilityData(reachabilityData *model.ReachabilityData) []byte {
panic("implement me")
}
func (rds *reachabilityDataStore) deserializeReachabilityData(reachabilityDataBytes []byte) (*model.ReachabilityData, error) {
panic("implement me")
}
func (rds *reachabilityDataStore) serializeReachabilityReindexRoot(reachabilityReindexRoot *externalapi.DomainHash) []byte {
panic("implement me")
}
func (rds *reachabilityDataStore) deserializeReachabilityReindexRoot(reachabilityReindexRootBytes []byte) (*externalapi.DomainHash, error) {
panic("implement me")
}

View File

@ -3,45 +3,137 @@ package utxodiffstore
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var utxoDiffBucket = dbkeys.MakeBucket([]byte("utxo-diffs"))
var utxoDiffChildBucket = dbkeys.MakeBucket([]byte("utxo-diff-children"))
// utxoDiffStore represents a store of UTXODiffs
type utxoDiffStore struct {
utxoDiffStaging map[externalapi.DomainHash]*model.UTXODiff
utxoDiffChildStaging map[externalapi.DomainHash]*externalapi.DomainHash
}
// New instantiates a new UTXODiffStore
func New() model.UTXODiffStore {
return &utxoDiffStore{}
return &utxoDiffStore{
utxoDiffStaging: make(map[externalapi.DomainHash]*model.UTXODiff),
utxoDiffChildStaging: make(map[externalapi.DomainHash]*externalapi.DomainHash),
}
}
// Stage stages the given utxoDiff for the given blockHash
func (uds *utxoDiffStore) Stage(blockHash *externalapi.DomainHash, utxoDiff *model.UTXODiff, utxoDiffChild *externalapi.DomainHash) {
panic("implement me")
uds.utxoDiffStaging[*blockHash] = utxoDiff
uds.utxoDiffChildStaging[*blockHash] = utxoDiffChild
}
func (uds *utxoDiffStore) IsStaged() bool {
panic("implement me")
return len(uds.utxoDiffStaging) != 0 || len(uds.utxoDiffChildStaging) != 0
}
func (uds *utxoDiffStore) IsBlockHashStaged(blockHash *externalapi.DomainHash) bool {
if _, ok := uds.utxoDiffStaging[*blockHash]; ok {
return true
}
_, ok := uds.utxoDiffChildStaging[*blockHash]
return ok
}
func (uds *utxoDiffStore) Discard() {
panic("implement me")
uds.utxoDiffStaging = make(map[externalapi.DomainHash]*model.UTXODiff)
uds.utxoDiffChildStaging = make(map[externalapi.DomainHash]*externalapi.DomainHash)
}
func (uds *utxoDiffStore) Commit(dbTx model.DBTransaction) error {
panic("implement me")
for hash, utxoDiff := range uds.utxoDiffStaging {
err := dbTx.Put(uds.utxoDiffHashAsKey(&hash), uds.serializeUTXODiff(utxoDiff))
if err != nil {
return err
}
}
for hash, utxoDiffChild := range uds.utxoDiffChildStaging {
err := dbTx.Put(uds.utxoDiffHashAsKey(&hash), uds.serializeUTXODiffChild(utxoDiffChild))
if err != nil {
return err
}
}
uds.Discard()
return nil
}
// UTXODiff gets the utxoDiff associated with the given blockHash
func (uds *utxoDiffStore) UTXODiff(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*model.UTXODiff, error) {
return nil, nil
if utxoDiff, ok := uds.utxoDiffStaging[*blockHash]; ok {
return utxoDiff, nil
}
utxoDiffBytes, err := dbContext.Get(uds.utxoDiffHashAsKey(blockHash))
if err != nil {
return nil, err
}
return uds.deserializeUTXODiff(utxoDiffBytes)
}
// UTXODiffChild gets the utxoDiff child associated with the given blockHash
func (uds *utxoDiffStore) UTXODiffChild(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
return nil, nil
if utxoDiffChild, ok := uds.utxoDiffChildStaging[*blockHash]; ok {
return utxoDiffChild, nil
}
utxoDiffChildBytes, err := dbContext.Get(uds.utxoDiffChildHashAsKey(blockHash))
if err != nil {
return nil, err
}
utxoDiffChild, err := uds.deserializeUTXODiffChild(utxoDiffChildBytes)
if err != nil {
return nil, err
}
return utxoDiffChild, nil
}
// Delete deletes the utxoDiff associated with the given blockHash
func (uds *utxoDiffStore) Delete(dbTx model.DBTransaction, blockHash *externalapi.DomainHash) error {
return nil
if uds.IsBlockHashStaged(blockHash) {
if _, ok := uds.utxoDiffStaging[*blockHash]; ok {
delete(uds.utxoDiffStaging, *blockHash)
}
if _, ok := uds.utxoDiffChildStaging[*blockHash]; ok {
delete(uds.utxoDiffChildStaging, *blockHash)
}
return nil
}
err := dbTx.Delete(uds.utxoDiffHashAsKey(blockHash))
if err != nil {
return err
}
return dbTx.Delete(uds.utxoDiffChildHashAsKey(blockHash))
}
func (uds *utxoDiffStore) utxoDiffHashAsKey(hash *externalapi.DomainHash) model.DBKey {
return utxoDiffBucket.Key(hash[:])
}
func (uds *utxoDiffStore) utxoDiffChildHashAsKey(hash *externalapi.DomainHash) model.DBKey {
return utxoDiffChildBucket.Key(hash[:])
}
func (uds *utxoDiffStore) serializeUTXODiff(utxoDiff *model.UTXODiff) []byte {
panic("implement me")
}
func (uds *utxoDiffStore) deserializeUTXODiff(utxoDiffBytes []byte) (*model.UTXODiff, error) {
panic("implement me")
}
func (uds *utxoDiffStore) serializeUTXODiffChild(utxoDiffChild *externalapi.DomainHash) []byte {
panic("implement me")
}
func (uds *utxoDiffStore) deserializeUTXODiffChild(utxoDiffChildBytes []byte) (*externalapi.DomainHash, error) {
panic("implement me")
}

View File

@ -7,4 +7,5 @@ type Multiset interface {
Add(data []byte)
Remove(data []byte)
Hash() *externalapi.DomainHash
Serialize() []byte
}