mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-03-30 15:08:33 +00:00
[NOD-1566] Add caching to all stores (#1152)
* [NOD-1566] Add a dependency to golang-lru. * [NOD-1566] Add caching to blockstore.go. * [NOD-1566] Add LRUCache to all store objects and initialize them. * [NOD-1566] Add caching to acceptanceDataStore. * [NOD-1566] Add caching to blockHeaderStore. * [NOD-1566] Implement a simpler LRU cache. * [NOD-1566] Use the simpler cache implementation everywhere. * [NOD-1566] Remove dependency in golang-lru. * [NOD-1566] Fix object reuse issues in store Get functions. * [NOD-1566] Add caching to blockRelationStore. * [NOD-1566] Add caching to blockStatusStore. * [NOD-1566] Add caching to ghostdagDataStore. * [NOD-1566] Add caching to multisetStore. * [NOD-1566] Add caching to reachabilityDataStore. * [NOD-1566] Add caching to utxoDiffStore. * [NOD-1566] Add caching to reachabilityReindexRoot. * [NOD-1566] Add caching to pruningStore. * [NOD-1566] Add caching to headerTipsStore. * [NOD-1566] Add caching to consensusStateStore. * [NOD-1566] Add comments explaining why we don't discard staging at the normal location in consensusStateStore. * [NOD-1566] Make go vet happy. * [NOD-1566] Fix merge errors. * [NOD-1566] Add a missing break statement. * [NOD-1566] Run go mod tidy. * [NOD-1566] Remove serializedUTXOSetCache.
This commit is contained in:
parent
5b2fae0457
commit
0fa13357c3
@ -5,6 +5,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@ -14,13 +15,15 @@ var bucket = dbkeys.MakeBucket([]byte("acceptance-data"))
|
||||
type acceptanceDataStore struct {
|
||||
staging map[externalapi.DomainHash]model.AcceptanceData
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new AcceptanceDataStore
|
||||
func New() model.AcceptanceDataStore {
|
||||
func New(cacheSize int) model.AcceptanceDataStore {
|
||||
return &acceptanceDataStore{
|
||||
staging: make(map[externalapi.DomainHash]model.AcceptanceData),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
@ -48,6 +51,7 @@ func (ads *acceptanceDataStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ads.cache.Add(&hash, acceptanceData)
|
||||
}
|
||||
|
||||
for hash := range ads.toDelete {
|
||||
@ -55,6 +59,7 @@ func (ads *acceptanceDataStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ads.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
ads.Discard()
|
||||
@ -64,7 +69,11 @@ func (ads *acceptanceDataStore) Commit(dbTx model.DBTransaction) error {
|
||||
// Get gets the acceptanceData associated with the given blockHash
|
||||
func (ads *acceptanceDataStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (model.AcceptanceData, error) {
|
||||
if acceptanceData, ok := ads.staging[*blockHash]; ok {
|
||||
return acceptanceData, nil
|
||||
return acceptanceData.Clone(), nil
|
||||
}
|
||||
|
||||
if acceptanceData, ok := ads.cache.Get(blockHash); ok {
|
||||
return acceptanceData.(model.AcceptanceData).Clone(), nil
|
||||
}
|
||||
|
||||
acceptanceDataBytes, err := dbContext.Get(ads.hashAsKey(blockHash))
|
||||
@ -72,7 +81,12 @@ func (ads *acceptanceDataStore) Get(dbContext model.DBReader, blockHash *externa
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ads.deserializeAcceptanceData(acceptanceDataBytes)
|
||||
acceptanceData, err := ads.deserializeAcceptanceData(acceptanceDataBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ads.cache.Add(blockHash, acceptanceData)
|
||||
return acceptanceData.Clone(), nil
|
||||
}
|
||||
|
||||
// Delete deletes the acceptanceData associated with the given blockHash
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
)
|
||||
|
||||
var bucket = dbkeys.MakeBucket([]byte("block-headers"))
|
||||
@ -15,14 +16,16 @@ var countKey = dbkeys.MakeBucket().Key([]byte("block-headers-count"))
|
||||
type blockHeaderStore struct {
|
||||
staging map[externalapi.DomainHash]*externalapi.DomainBlockHeader
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
cache *lrucache.LRUCache
|
||||
count uint64
|
||||
}
|
||||
|
||||
// New instantiates a new BlockHeaderStore
|
||||
func New(dbContext model.DBReader) (model.BlockHeaderStore, error) {
|
||||
func New(dbContext model.DBReader, cacheSize int) (model.BlockHeaderStore, error) {
|
||||
blockHeaderStore := &blockHeaderStore{
|
||||
staging: make(map[externalapi.DomainHash]*externalapi.DomainBlockHeader),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
|
||||
err := blockHeaderStore.initializeCount(dbContext)
|
||||
@ -77,6 +80,7 @@ func (bhs *blockHeaderStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bhs.cache.Add(&hash, header)
|
||||
}
|
||||
|
||||
for hash := range bhs.toDelete {
|
||||
@ -84,6 +88,7 @@ func (bhs *blockHeaderStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bhs.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
err := bhs.commitCount(dbTx)
|
||||
@ -98,7 +103,11 @@ func (bhs *blockHeaderStore) Commit(dbTx model.DBTransaction) error {
|
||||
// BlockHeader gets the block header associated with the given blockHash
|
||||
func (bhs *blockHeaderStore) BlockHeader(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainBlockHeader, error) {
|
||||
if header, ok := bhs.staging[*blockHash]; ok {
|
||||
return header, nil
|
||||
return header.Clone(), nil
|
||||
}
|
||||
|
||||
if header, ok := bhs.cache.Get(blockHash); ok {
|
||||
return header.(*externalapi.DomainBlockHeader).Clone(), nil
|
||||
}
|
||||
|
||||
headerBytes, err := dbContext.Get(bhs.hashAsKey(blockHash))
|
||||
@ -106,7 +115,12 @@ func (bhs *blockHeaderStore) BlockHeader(dbContext model.DBReader, blockHash *ex
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bhs.deserializeHeader(headerBytes)
|
||||
header, err := bhs.deserializeHeader(headerBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bhs.cache.Add(blockHash, header)
|
||||
return header.Clone(), nil
|
||||
}
|
||||
|
||||
// HasBlock returns whether a block header with a given hash exists in the store.
|
||||
@ -115,6 +129,10 @@ func (bhs *blockHeaderStore) HasBlockHeader(dbContext model.DBReader, blockHash
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if bhs.cache.Has(blockHash) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
exists, err := dbContext.Has(bhs.hashAsKey(blockHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
)
|
||||
|
||||
var bucket = dbkeys.MakeBucket([]byte("block-relations"))
|
||||
@ -13,12 +14,14 @@ var bucket = dbkeys.MakeBucket([]byte("block-relations"))
|
||||
// blockRelationStore represents a store of BlockRelations
|
||||
type blockRelationStore struct {
|
||||
staging map[externalapi.DomainHash]*model.BlockRelations
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new BlockRelationStore
|
||||
func New() model.BlockRelationStore {
|
||||
func New(cacheSize int) model.BlockRelationStore {
|
||||
return &blockRelationStore{
|
||||
staging: make(map[externalapi.DomainHash]*model.BlockRelations),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
@ -44,6 +47,7 @@ func (brs *blockRelationStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
brs.cache.Add(&hash, blockRelations)
|
||||
}
|
||||
|
||||
brs.Discard()
|
||||
@ -52,7 +56,11 @@ func (brs *blockRelationStore) Commit(dbTx model.DBTransaction) error {
|
||||
|
||||
func (brs *blockRelationStore) BlockRelation(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*model.BlockRelations, error) {
|
||||
if blockRelations, ok := brs.staging[*blockHash]; ok {
|
||||
return blockRelations, nil
|
||||
return blockRelations.Clone(), nil
|
||||
}
|
||||
|
||||
if blockRelations, ok := brs.cache.Get(blockHash); ok {
|
||||
return blockRelations.(*model.BlockRelations).Clone(), nil
|
||||
}
|
||||
|
||||
blockRelationsBytes, err := dbContext.Get(brs.hashAsKey(blockHash))
|
||||
@ -60,7 +68,12 @@ func (brs *blockRelationStore) BlockRelation(dbContext model.DBReader, blockHash
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return brs.deserializeBlockRelations(blockRelationsBytes)
|
||||
blockRelations, err := brs.deserializeBlockRelations(blockRelationsBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
brs.cache.Add(blockHash, blockRelations)
|
||||
return blockRelations.Clone(), nil
|
||||
}
|
||||
|
||||
func (brs *blockRelationStore) Has(dbContext model.DBReader, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
@ -68,6 +81,10 @@ func (brs *blockRelationStore) Has(dbContext model.DBReader, blockHash *external
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if brs.cache.Has(blockHash) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return dbContext.Has(brs.hashAsKey(blockHash))
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
)
|
||||
|
||||
var bucket = dbkeys.MakeBucket([]byte("block-statuses"))
|
||||
@ -13,12 +14,14 @@ var bucket = dbkeys.MakeBucket([]byte("block-statuses"))
|
||||
// blockStatusStore represents a store of BlockStatuses
|
||||
type blockStatusStore struct {
|
||||
staging map[externalapi.DomainHash]externalapi.BlockStatus
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new BlockStatusStore
|
||||
func New() model.BlockStatusStore {
|
||||
func New(cacheSize int) model.BlockStatusStore {
|
||||
return &blockStatusStore{
|
||||
staging: make(map[externalapi.DomainHash]externalapi.BlockStatus),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
@ -45,6 +48,7 @@ func (bss *blockStatusStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bss.cache.Add(&hash, status)
|
||||
}
|
||||
|
||||
bss.Discard()
|
||||
@ -57,12 +61,21 @@ func (bss *blockStatusStore) Get(dbContext model.DBReader, blockHash *externalap
|
||||
return status, nil
|
||||
}
|
||||
|
||||
if status, ok := bss.cache.Get(blockHash); ok {
|
||||
return status.(externalapi.BlockStatus), nil
|
||||
}
|
||||
|
||||
statusBytes, err := dbContext.Get(bss.hashAsKey(blockHash))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return bss.deserializeBlockStatus(statusBytes)
|
||||
status, err := bss.deserializeBlockStatus(statusBytes)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
bss.cache.Add(blockHash, status)
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// Exists returns true if the blockStatus for the given blockHash exists
|
||||
@ -71,6 +84,10 @@ func (bss *blockStatusStore) Exists(dbContext model.DBReader, blockHash *externa
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if bss.cache.Has(blockHash) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
exists, err := dbContext.Has(bss.hashAsKey(blockHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
)
|
||||
|
||||
var bucket = dbkeys.MakeBucket([]byte("blocks"))
|
||||
@ -15,14 +16,16 @@ var countKey = dbkeys.MakeBucket().Key([]byte("blocks-count"))
|
||||
type blockStore struct {
|
||||
staging map[externalapi.DomainHash]*externalapi.DomainBlock
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
cache *lrucache.LRUCache
|
||||
count uint64
|
||||
}
|
||||
|
||||
// New instantiates a new BlockStore
|
||||
func New(dbContext model.DBReader) (model.BlockStore, error) {
|
||||
func New(dbContext model.DBReader, cacheSize int) (model.BlockStore, error) {
|
||||
blockStore := &blockStore{
|
||||
staging: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
|
||||
err := blockStore.initializeCount(dbContext)
|
||||
@ -77,6 +80,7 @@ func (bs *blockStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bs.cache.Add(&hash, block)
|
||||
}
|
||||
|
||||
for hash := range bs.toDelete {
|
||||
@ -84,6 +88,7 @@ func (bs *blockStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bs.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
err := bs.commitCount(dbTx)
|
||||
@ -98,7 +103,11 @@ func (bs *blockStore) Commit(dbTx model.DBTransaction) error {
|
||||
// Block gets the block associated with the given blockHash
|
||||
func (bs *blockStore) Block(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) {
|
||||
if block, ok := bs.staging[*blockHash]; ok {
|
||||
return block, nil
|
||||
return block.Clone(), nil
|
||||
}
|
||||
|
||||
if block, ok := bs.cache.Get(blockHash); ok {
|
||||
return block.(*externalapi.DomainBlock).Clone(), nil
|
||||
}
|
||||
|
||||
blockBytes, err := dbContext.Get(bs.hashAsKey(blockHash))
|
||||
@ -106,7 +115,12 @@ func (bs *blockStore) Block(dbContext model.DBReader, blockHash *externalapi.Dom
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bs.deserializeBlock(blockBytes)
|
||||
block, err := bs.deserializeBlock(blockBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bs.cache.Add(blockHash, block)
|
||||
return block.Clone(), nil
|
||||
}
|
||||
|
||||
// HasBlock returns whether a block with a given hash exists in the store.
|
||||
@ -115,6 +129,10 @@ func (bs *blockStore) HasBlock(dbContext model.DBReader, blockHash *externalapi.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if bs.cache.Has(blockHash) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
exists, err := dbContext.Has(bs.hashAsKey(blockHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -7,10 +7,13 @@ import (
|
||||
|
||||
// consensusStateStore represents a store for the current consensus state
|
||||
type consensusStateStore struct {
|
||||
stagedTips []*externalapi.DomainHash
|
||||
stagedVirtualDiffParents []*externalapi.DomainHash
|
||||
stagedVirtualUTXODiff *model.UTXODiff
|
||||
stagedVirtualUTXOSet model.UTXOCollection
|
||||
tipsStaging []*externalapi.DomainHash
|
||||
virtualDiffParentsStaging []*externalapi.DomainHash
|
||||
virtualUTXODiffStaging *model.UTXODiff
|
||||
virtualUTXOSetStaging model.UTXOCollection
|
||||
|
||||
tipsCache []*externalapi.DomainHash
|
||||
virtualDiffParentsCache []*externalapi.DomainHash
|
||||
}
|
||||
|
||||
// New instantiates a new ConsensusStateStore
|
||||
@ -18,40 +21,40 @@ func New() model.ConsensusStateStore {
|
||||
return &consensusStateStore{}
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) Discard() {
|
||||
c.stagedTips = nil
|
||||
c.stagedVirtualUTXODiff = nil
|
||||
c.stagedVirtualDiffParents = nil
|
||||
c.stagedVirtualUTXOSet = nil
|
||||
func (css *consensusStateStore) Discard() {
|
||||
css.tipsStaging = nil
|
||||
css.virtualUTXODiffStaging = nil
|
||||
css.virtualDiffParentsStaging = nil
|
||||
css.virtualUTXOSetStaging = nil
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) Commit(dbTx model.DBTransaction) error {
|
||||
err := c.commitTips(dbTx)
|
||||
func (css *consensusStateStore) Commit(dbTx model.DBTransaction) error {
|
||||
err := css.commitTips(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.commitVirtualDiffParents(dbTx)
|
||||
err = css.commitVirtualDiffParents(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.commitVirtualUTXODiff(dbTx)
|
||||
err = css.commitVirtualUTXODiff(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.commitVirtualUTXOSet(dbTx)
|
||||
err = css.commitVirtualUTXOSet(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Discard()
|
||||
css.Discard()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) IsStaged() bool {
|
||||
return c.stagedTips != nil ||
|
||||
c.stagedVirtualDiffParents != nil ||
|
||||
c.stagedVirtualUTXODiff != nil
|
||||
func (css *consensusStateStore) IsStaged() bool {
|
||||
return css.tipsStaging != nil ||
|
||||
css.virtualDiffParentsStaging != nil ||
|
||||
css.virtualUTXODiffStaging != nil
|
||||
}
|
||||
|
@ -10,9 +10,13 @@ import (
|
||||
|
||||
var tipsKey = dbkeys.MakeBucket().Key([]byte("tips"))
|
||||
|
||||
func (c *consensusStateStore) Tips(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
|
||||
if c.stagedTips != nil {
|
||||
return c.stagedTips, nil
|
||||
func (css *consensusStateStore) Tips(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
|
||||
if css.tipsStaging != nil {
|
||||
return externalapi.CloneHashes(css.tipsStaging), nil
|
||||
}
|
||||
|
||||
if css.tipsCache != nil {
|
||||
return externalapi.CloneHashes(css.tipsCache), nil
|
||||
}
|
||||
|
||||
tipsBytes, err := dbContext.Get(tipsKey)
|
||||
@ -20,37 +24,44 @@ func (c *consensusStateStore) Tips(dbContext model.DBReader) ([]*externalapi.Dom
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.deserializeTips(tipsBytes)
|
||||
tips, err := css.deserializeTips(tipsBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
css.tipsCache = tips
|
||||
return externalapi.CloneHashes(tips), nil
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) StageTips(tipHashes []*externalapi.DomainHash) {
|
||||
c.stagedTips = externalapi.CloneHashes(tipHashes)
|
||||
func (css *consensusStateStore) StageTips(tipHashes []*externalapi.DomainHash) {
|
||||
css.tipsStaging = externalapi.CloneHashes(tipHashes)
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) commitTips(dbTx model.DBTransaction) error {
|
||||
if c.stagedTips == nil {
|
||||
func (css *consensusStateStore) commitTips(dbTx model.DBTransaction) error {
|
||||
if css.tipsStaging == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tipsBytes, err := c.serializeTips(c.stagedTips)
|
||||
tipsBytes, err := css.serializeTips(css.tipsStaging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(tipsKey, tipsBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
css.tipsCache = css.tipsStaging
|
||||
|
||||
// Note: we don't discard the staging here since that's
|
||||
// being done at the end of Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) serializeTips(tips []*externalapi.DomainHash) ([]byte, error) {
|
||||
func (css *consensusStateStore) serializeTips(tips []*externalapi.DomainHash) ([]byte, error) {
|
||||
dbTips := serialization.TipsToDBTips(tips)
|
||||
return proto.Marshal(dbTips)
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) deserializeTips(tipsBytes []byte) ([]*externalapi.DomainHash,
|
||||
func (css *consensusStateStore) deserializeTips(tipsBytes []byte) ([]*externalapi.DomainHash,
|
||||
error) {
|
||||
|
||||
dbTips := &serialization.DbTips{}
|
||||
|
@ -18,21 +18,21 @@ func utxoKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) {
|
||||
return utxoSetBucket.Key(serializedOutpoint), nil
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) StageVirtualUTXODiff(virtualUTXODiff *model.UTXODiff) error {
|
||||
if c.stagedVirtualUTXOSet != nil {
|
||||
func (css *consensusStateStore) StageVirtualUTXODiff(virtualUTXODiff *model.UTXODiff) error {
|
||||
if css.virtualUTXOSetStaging != nil {
|
||||
return errors.New("cannot stage virtual UTXO diff while virtual UTXO set is staged")
|
||||
}
|
||||
|
||||
c.stagedVirtualUTXODiff = virtualUTXODiff.Clone()
|
||||
css.virtualUTXODiffStaging = virtualUTXODiff.Clone()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction) error {
|
||||
if c.stagedVirtualUTXODiff == nil {
|
||||
func (css *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction) error {
|
||||
if css.virtualUTXODiffStaging == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for toRemoveOutpoint := range c.stagedVirtualUTXODiff.ToRemove {
|
||||
for toRemoveOutpoint := range css.virtualUTXODiffStaging.ToRemove {
|
||||
dbKey, err := utxoKey(&toRemoveOutpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -43,7 +43,7 @@ func (c *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction) er
|
||||
}
|
||||
}
|
||||
|
||||
for toAddOutpoint, toAddEntry := range c.stagedVirtualUTXODiff.ToAdd {
|
||||
for toAddOutpoint, toAddEntry := range css.virtualUTXODiffStaging.ToAdd {
|
||||
dbKey, err := utxoKey(&toAddOutpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -58,15 +58,17 @@ func (c *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction) er
|
||||
}
|
||||
}
|
||||
|
||||
// Note: we don't discard the staging here since that's
|
||||
// being done at the end of Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) commitVirtualUTXOSet(dbTx model.DBTransaction) error {
|
||||
if c.stagedVirtualUTXOSet == nil {
|
||||
func (css *consensusStateStore) commitVirtualUTXOSet(dbTx model.DBTransaction) error {
|
||||
if css.virtualUTXOSetStaging == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for outpoint, utxoEntry := range c.stagedVirtualUTXOSet {
|
||||
for outpoint, utxoEntry := range css.virtualUTXOSetStaging {
|
||||
dbKey, err := utxoKey(&outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -81,28 +83,30 @@ func (c *consensusStateStore) commitVirtualUTXOSet(dbTx model.DBTransaction) err
|
||||
}
|
||||
}
|
||||
|
||||
// Note: we don't discard the staging here since that's
|
||||
// being done at the end of Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) UTXOByOutpoint(dbContext model.DBReader, outpoint *externalapi.DomainOutpoint) (
|
||||
func (css *consensusStateStore) UTXOByOutpoint(dbContext model.DBReader, outpoint *externalapi.DomainOutpoint) (
|
||||
*externalapi.UTXOEntry, error) {
|
||||
|
||||
if c.stagedVirtualUTXOSet != nil {
|
||||
return c.utxoByOutpointFromStagedVirtualUTXOSet(outpoint)
|
||||
if css.virtualUTXOSetStaging != nil {
|
||||
return css.utxoByOutpointFromStagedVirtualUTXOSet(outpoint)
|
||||
}
|
||||
|
||||
return c.utxoByOutpointFromStagedVirtualUTXODiff(dbContext, outpoint)
|
||||
return css.utxoByOutpointFromStagedVirtualUTXODiff(dbContext, outpoint)
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) utxoByOutpointFromStagedVirtualUTXODiff(dbContext model.DBReader,
|
||||
func (css *consensusStateStore) utxoByOutpointFromStagedVirtualUTXODiff(dbContext model.DBReader,
|
||||
outpoint *externalapi.DomainOutpoint) (
|
||||
*externalapi.UTXOEntry, error) {
|
||||
|
||||
if c.stagedVirtualUTXODiff != nil {
|
||||
if _, ok := c.stagedVirtualUTXODiff.ToRemove[*outpoint]; ok {
|
||||
if css.virtualUTXODiffStaging != nil {
|
||||
if _, ok := css.virtualUTXODiffStaging.ToRemove[*outpoint]; ok {
|
||||
return nil, errors.Errorf("outpoint was not found")
|
||||
}
|
||||
if utxoEntry, ok := c.stagedVirtualUTXODiff.ToAdd[*outpoint]; ok {
|
||||
if utxoEntry, ok := css.virtualUTXODiffStaging.ToAdd[*outpoint]; ok {
|
||||
return utxoEntry, nil
|
||||
}
|
||||
}
|
||||
@ -120,31 +124,31 @@ func (c *consensusStateStore) utxoByOutpointFromStagedVirtualUTXODiff(dbContext
|
||||
return deserializeUTXOEntry(serializedUTXOEntry)
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) utxoByOutpointFromStagedVirtualUTXOSet(outpoint *externalapi.DomainOutpoint) (
|
||||
func (css *consensusStateStore) utxoByOutpointFromStagedVirtualUTXOSet(outpoint *externalapi.DomainOutpoint) (
|
||||
*externalapi.UTXOEntry, error) {
|
||||
if utxoEntry, ok := c.stagedVirtualUTXOSet[*outpoint]; ok {
|
||||
if utxoEntry, ok := css.virtualUTXOSetStaging[*outpoint]; ok {
|
||||
return utxoEntry, nil
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("outpoint was not found")
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) HasUTXOByOutpoint(dbContext model.DBReader, outpoint *externalapi.DomainOutpoint) (bool, error) {
|
||||
if c.stagedVirtualUTXOSet != nil {
|
||||
return c.hasUTXOByOutpointFromStagedVirtualUTXOSet(outpoint), nil
|
||||
func (css *consensusStateStore) HasUTXOByOutpoint(dbContext model.DBReader, outpoint *externalapi.DomainOutpoint) (bool, error) {
|
||||
if css.virtualUTXOSetStaging != nil {
|
||||
return css.hasUTXOByOutpointFromStagedVirtualUTXOSet(outpoint), nil
|
||||
}
|
||||
|
||||
return c.hasUTXOByOutpointFromStagedVirtualUTXODiff(dbContext, outpoint)
|
||||
return css.hasUTXOByOutpointFromStagedVirtualUTXODiff(dbContext, outpoint)
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbContext model.DBReader,
|
||||
func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbContext model.DBReader,
|
||||
outpoint *externalapi.DomainOutpoint) (bool, error) {
|
||||
|
||||
if c.stagedVirtualUTXODiff != nil {
|
||||
if _, ok := c.stagedVirtualUTXODiff.ToRemove[*outpoint]; ok {
|
||||
if css.virtualUTXODiffStaging != nil {
|
||||
if _, ok := css.virtualUTXODiffStaging.ToRemove[*outpoint]; ok {
|
||||
return false, nil
|
||||
}
|
||||
if _, ok := c.stagedVirtualUTXODiff.ToAdd[*outpoint]; ok {
|
||||
if _, ok := css.virtualUTXODiffStaging.ToAdd[*outpoint]; ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
@ -157,12 +161,12 @@ func (c *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbConte
|
||||
return dbContext.Has(key)
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXOSet(outpoint *externalapi.DomainOutpoint) bool {
|
||||
_, ok := c.stagedVirtualUTXOSet[*outpoint]
|
||||
func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXOSet(outpoint *externalapi.DomainOutpoint) bool {
|
||||
_, ok := css.virtualUTXOSetStaging[*outpoint]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) VirtualUTXOSetIterator(dbContext model.DBReader) (model.ReadOnlyUTXOSetIterator, error) {
|
||||
func (css *consensusStateStore) VirtualUTXOSetIterator(dbContext model.DBReader) (model.ReadOnlyUTXOSetIterator, error) {
|
||||
cursor, err := dbContext.Cursor(utxoSetBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -207,22 +211,22 @@ func (u utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry
|
||||
return outpoint, utxoEntry, nil
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) StageVirtualUTXOSet(virtualUTXOSetIterator model.ReadOnlyUTXOSetIterator) error {
|
||||
if c.stagedVirtualUTXODiff != nil {
|
||||
func (css *consensusStateStore) StageVirtualUTXOSet(virtualUTXOSetIterator model.ReadOnlyUTXOSetIterator) error {
|
||||
if css.virtualUTXODiffStaging != nil {
|
||||
return errors.New("cannot stage virtual UTXO set while virtual UTXO diff is staged")
|
||||
}
|
||||
|
||||
c.stagedVirtualUTXOSet = make(model.UTXOCollection)
|
||||
css.virtualUTXOSetStaging = make(model.UTXOCollection)
|
||||
for virtualUTXOSetIterator.Next() {
|
||||
outpoint, entry, err := virtualUTXOSetIterator.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, exists := c.stagedVirtualUTXOSet[*outpoint]; exists {
|
||||
if _, exists := css.virtualUTXOSetStaging[*outpoint]; exists {
|
||||
return errors.Errorf("outpoint %s is found more than once in the given iterator", outpoint)
|
||||
}
|
||||
c.stagedVirtualUTXOSet[*outpoint] = entry
|
||||
css.virtualUTXOSetStaging[*outpoint] = entry
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -10,9 +10,13 @@ import (
|
||||
|
||||
var virtualDiffParentsKey = dbkeys.MakeBucket().Key([]byte("virtual-diff-parents"))
|
||||
|
||||
func (c *consensusStateStore) VirtualDiffParents(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
|
||||
if c.stagedVirtualDiffParents != nil {
|
||||
return c.stagedVirtualDiffParents, nil
|
||||
func (css *consensusStateStore) VirtualDiffParents(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
|
||||
if css.virtualDiffParentsStaging != nil {
|
||||
return externalapi.CloneHashes(css.virtualDiffParentsStaging), nil
|
||||
}
|
||||
|
||||
if css.virtualDiffParentsCache != nil {
|
||||
return externalapi.CloneHashes(css.virtualDiffParentsCache), nil
|
||||
}
|
||||
|
||||
virtualDiffParentsBytes, err := dbContext.Get(virtualDiffParentsKey)
|
||||
@ -20,37 +24,44 @@ func (c *consensusStateStore) VirtualDiffParents(dbContext model.DBReader) ([]*e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.deserializeVirtualDiffParents(virtualDiffParentsBytes)
|
||||
virtualDiffParents, err := css.deserializeVirtualDiffParents(virtualDiffParentsBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
css.virtualDiffParentsCache = virtualDiffParents
|
||||
return externalapi.CloneHashes(virtualDiffParents), nil
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) StageVirtualDiffParents(tipHashes []*externalapi.DomainHash) {
|
||||
c.stagedVirtualDiffParents = externalapi.CloneHashes(tipHashes)
|
||||
func (css *consensusStateStore) StageVirtualDiffParents(tipHashes []*externalapi.DomainHash) {
|
||||
css.virtualDiffParentsStaging = externalapi.CloneHashes(tipHashes)
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) commitVirtualDiffParents(dbTx model.DBTransaction) error {
|
||||
if c.stagedVirtualDiffParents == nil {
|
||||
func (css *consensusStateStore) commitVirtualDiffParents(dbTx model.DBTransaction) error {
|
||||
if css.virtualDiffParentsStaging == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
virtualDiffParentsBytes, err := c.serializeVirtualDiffParents(c.stagedVirtualDiffParents)
|
||||
virtualDiffParentsBytes, err := css.serializeVirtualDiffParents(css.virtualDiffParentsStaging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(virtualDiffParentsKey, virtualDiffParentsBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
css.virtualDiffParentsCache = css.virtualDiffParentsStaging
|
||||
|
||||
// Note: we don't discard the staging here since that's
|
||||
// being done at the end of Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) serializeVirtualDiffParents(virtualDiffParentsBytes []*externalapi.DomainHash) ([]byte, error) {
|
||||
func (css *consensusStateStore) serializeVirtualDiffParents(virtualDiffParentsBytes []*externalapi.DomainHash) ([]byte, error) {
|
||||
virtualDiffParents := serialization.VirtualDiffParentsToDBHeaderVirtualDiffParents(virtualDiffParentsBytes)
|
||||
return proto.Marshal(virtualDiffParents)
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) deserializeVirtualDiffParents(virtualDiffParentsBytes []byte) ([]*externalapi.DomainHash,
|
||||
func (css *consensusStateStore) deserializeVirtualDiffParents(virtualDiffParentsBytes []byte) ([]*externalapi.DomainHash,
|
||||
error) {
|
||||
|
||||
dbVirtualDiffParents := &serialization.DbVirtualDiffParents{}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
)
|
||||
|
||||
var bucket = dbkeys.MakeBucket([]byte("block-ghostdag-data"))
|
||||
@ -13,12 +14,14 @@ var bucket = dbkeys.MakeBucket([]byte("block-ghostdag-data"))
|
||||
// ghostdagDataStore represents a store of BlockGHOSTDAGData
|
||||
type ghostdagDataStore struct {
|
||||
staging map[externalapi.DomainHash]*model.BlockGHOSTDAGData
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new GHOSTDAGDataStore
|
||||
func New() model.GHOSTDAGDataStore {
|
||||
func New(cacheSize int) model.GHOSTDAGDataStore {
|
||||
return &ghostdagDataStore{
|
||||
staging: make(map[externalapi.DomainHash]*model.BlockGHOSTDAGData),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
@ -41,11 +44,11 @@ func (gds *ghostdagDataStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(gds.hashAsKey(&hash), blockGhostdagDataBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gds.cache.Add(&hash, blockGHOSTDAGData)
|
||||
}
|
||||
|
||||
gds.Discard()
|
||||
@ -55,7 +58,11 @@ func (gds *ghostdagDataStore) Commit(dbTx model.DBTransaction) error {
|
||||
// Get gets the blockGHOSTDAGData associated with the given blockHash
|
||||
func (gds *ghostdagDataStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*model.BlockGHOSTDAGData, error) {
|
||||
if blockGHOSTDAGData, ok := gds.staging[*blockHash]; ok {
|
||||
return blockGHOSTDAGData, nil
|
||||
return blockGHOSTDAGData.Clone(), nil
|
||||
}
|
||||
|
||||
if blockGHOSTDAGData, ok := gds.cache.Get(blockHash); ok {
|
||||
return blockGHOSTDAGData.(*model.BlockGHOSTDAGData).Clone(), nil
|
||||
}
|
||||
|
||||
blockGHOSTDAGDataBytes, err := dbContext.Get(gds.hashAsKey(blockHash))
|
||||
@ -63,7 +70,12 @@ func (gds *ghostdagDataStore) Get(dbContext model.DBReader, blockHash *externala
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return gds.deserializeBlockGHOSTDAGData(blockGHOSTDAGDataBytes)
|
||||
blockGHOSTDAGData, err := gds.deserializeBlockGHOSTDAGData(blockGHOSTDAGDataBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gds.cache.Add(blockHash, blockGHOSTDAGData)
|
||||
return blockGHOSTDAGData.Clone(), nil
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
||||
|
@ -12,50 +12,64 @@ var headerTipsKey = dbkeys.MakeBucket().Key([]byte("header-tips"))
|
||||
|
||||
type headerTipsStore struct {
|
||||
staging []*externalapi.DomainHash
|
||||
cache []*externalapi.DomainHash
|
||||
}
|
||||
|
||||
func (h *headerTipsStore) HasTips(dbContext model.DBReader) (bool, error) {
|
||||
if h.staging != nil {
|
||||
return len(h.staging) > 0, nil
|
||||
// New instantiates a new HeaderTipsStore
|
||||
func New() model.HeaderTipsStore {
|
||||
return &headerTipsStore{}
|
||||
}
|
||||
|
||||
func (hts *headerTipsStore) HasTips(dbContext model.DBReader) (bool, error) {
|
||||
if len(hts.staging) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if len(hts.cache) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return dbContext.Has(headerTipsKey)
|
||||
}
|
||||
|
||||
func (h *headerTipsStore) Discard() {
|
||||
h.staging = nil
|
||||
func (hts *headerTipsStore) Discard() {
|
||||
hts.staging = nil
|
||||
}
|
||||
|
||||
func (h *headerTipsStore) Commit(dbTx model.DBTransaction) error {
|
||||
if h.staging == nil {
|
||||
func (hts *headerTipsStore) Commit(dbTx model.DBTransaction) error {
|
||||
if hts.staging == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tipsBytes, err := h.serializeTips(h.staging)
|
||||
tipsBytes, err := hts.serializeTips(hts.staging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(headerTipsKey, tipsBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hts.cache = hts.staging
|
||||
|
||||
h.Discard()
|
||||
hts.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *headerTipsStore) Stage(tips []*externalapi.DomainHash) {
|
||||
h.staging = externalapi.CloneHashes(tips)
|
||||
func (hts *headerTipsStore) Stage(tips []*externalapi.DomainHash) {
|
||||
hts.staging = externalapi.CloneHashes(tips)
|
||||
}
|
||||
|
||||
func (h *headerTipsStore) IsStaged() bool {
|
||||
return h.staging != nil
|
||||
func (hts *headerTipsStore) IsStaged() bool {
|
||||
return hts.staging != nil
|
||||
}
|
||||
|
||||
func (h *headerTipsStore) Tips(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
|
||||
if h.staging != nil {
|
||||
return h.staging, nil
|
||||
func (hts *headerTipsStore) Tips(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
|
||||
if hts.staging != nil {
|
||||
return externalapi.CloneHashes(hts.staging), nil
|
||||
}
|
||||
|
||||
if hts.cache != nil {
|
||||
return externalapi.CloneHashes(hts.cache), nil
|
||||
}
|
||||
|
||||
tipsBytes, err := dbContext.Get(headerTipsKey)
|
||||
@ -63,15 +77,20 @@ func (h *headerTipsStore) Tips(dbContext model.DBReader) ([]*externalapi.DomainH
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return h.deserializeTips(tipsBytes)
|
||||
tips, err := hts.deserializeTips(tipsBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hts.cache = tips
|
||||
return externalapi.CloneHashes(tips), nil
|
||||
}
|
||||
|
||||
func (h *headerTipsStore) serializeTips(tips []*externalapi.DomainHash) ([]byte, error) {
|
||||
func (hts *headerTipsStore) serializeTips(tips []*externalapi.DomainHash) ([]byte, error) {
|
||||
dbTips := serialization.HeaderTipsToDBHeaderTips(tips)
|
||||
return proto.Marshal(dbTips)
|
||||
}
|
||||
|
||||
func (h *headerTipsStore) deserializeTips(tipsBytes []byte) ([]*externalapi.DomainHash, error) {
|
||||
func (hts *headerTipsStore) deserializeTips(tipsBytes []byte) ([]*externalapi.DomainHash, error) {
|
||||
dbTips := &serialization.DbHeaderTips{}
|
||||
err := proto.Unmarshal(tipsBytes, dbTips)
|
||||
if err != nil {
|
||||
@ -80,8 +99,3 @@ func (h *headerTipsStore) deserializeTips(tipsBytes []byte) ([]*externalapi.Doma
|
||||
|
||||
return serialization.DBHeaderTipsToHeaderTips(dbTips)
|
||||
}
|
||||
|
||||
// New instantiates a new HeaderTipsStore
|
||||
func New() model.HeaderTipsStore {
|
||||
return &headerTipsStore{}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
)
|
||||
|
||||
var bucket = dbkeys.MakeBucket([]byte("multisets"))
|
||||
@ -14,13 +15,15 @@ var bucket = dbkeys.MakeBucket([]byte("multisets"))
|
||||
type multisetStore struct {
|
||||
staging map[externalapi.DomainHash]model.Multiset
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new MultisetStore
|
||||
func New() model.MultisetStore {
|
||||
func New(cacheSize int) model.MultisetStore {
|
||||
return &multisetStore{
|
||||
staging: make(map[externalapi.DomainHash]model.Multiset),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
@ -44,11 +47,11 @@ func (ms *multisetStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(ms.hashAsKey(&hash), multisetBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ms.cache.Add(&hash, multiset)
|
||||
}
|
||||
|
||||
for hash := range ms.toDelete {
|
||||
@ -56,6 +59,7 @@ func (ms *multisetStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ms.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
ms.Discard()
|
||||
@ -68,12 +72,21 @@ func (ms *multisetStore) Get(dbContext model.DBReader, blockHash *externalapi.Do
|
||||
return multiset.Clone(), nil
|
||||
}
|
||||
|
||||
if multiset, ok := ms.cache.Get(blockHash); ok {
|
||||
return multiset.(model.Multiset).Clone(), nil
|
||||
}
|
||||
|
||||
multisetBytes, err := dbContext.Get(ms.hashAsKey(blockHash))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ms.deserializeMultiset(multisetBytes)
|
||||
multiset, err := ms.deserializeMultiset(multisetBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ms.cache.Add(blockHash, multiset)
|
||||
return multiset.Clone(), nil
|
||||
}
|
||||
|
||||
// Delete deletes the multiset associated with the given blockHash
|
||||
|
@ -15,14 +15,12 @@ var pruningSerializedUTXOSetkey = dbkeys.MakeBucket().Key([]byte("pruning-utxo-s
|
||||
type pruningStore struct {
|
||||
pruningPointStaging *externalapi.DomainHash
|
||||
serializedUTXOSetStaging []byte
|
||||
pruningPointCache *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// New instantiates a new PruningStore
|
||||
func New() model.PruningStore {
|
||||
return &pruningStore{
|
||||
pruningPointStaging: nil,
|
||||
serializedUTXOSetStaging: nil,
|
||||
}
|
||||
return &pruningStore{}
|
||||
}
|
||||
|
||||
// Stage stages the pruning state
|
||||
@ -50,6 +48,7 @@ func (ps *pruningStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ps.pruningPointCache = ps.pruningPointStaging
|
||||
}
|
||||
|
||||
if ps.serializedUTXOSetStaging != nil {
|
||||
@ -57,7 +56,6 @@ func (ps *pruningStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(pruningSerializedUTXOSetkey, utxoSetBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -74,16 +72,21 @@ func (ps *pruningStore) PruningPoint(dbContext model.DBReader) (*externalapi.Dom
|
||||
return ps.pruningPointStaging, nil
|
||||
}
|
||||
|
||||
blockHashBytes, err := dbContext.Get(pruningBlockHashKey)
|
||||
if ps.pruningPointCache != nil {
|
||||
return ps.pruningPointCache, nil
|
||||
}
|
||||
|
||||
pruningPointBytes, err := dbContext.Get(pruningBlockHashKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockHash, err := ps.deserializePruningPoint(blockHashBytes)
|
||||
pruningPoint, err := ps.deserializePruningPoint(pruningPointBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockHash, nil
|
||||
ps.pruningPointCache = pruningPoint
|
||||
return pruningPoint, nil
|
||||
}
|
||||
|
||||
// PruningPointSerializedUTXOSet returns the serialized UTXO set of the current pruning point
|
||||
@ -97,7 +100,11 @@ func (ps *pruningStore) PruningPointSerializedUTXOSet(dbContext model.DBReader)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ps.deserializeUTXOSetBytes(dbPruningPointUTXOSetBytes)
|
||||
pruningPointUTXOSet, err := ps.deserializeUTXOSetBytes(dbPruningPointUTXOSetBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pruningPointUTXOSet, nil
|
||||
}
|
||||
|
||||
func (ps *pruningStore) serializePruningPoint(pruningPoint *externalapi.DomainHash) ([]byte, error) {
|
||||
@ -133,5 +140,9 @@ func (ps *pruningStore) HasPruningPoint(dbContext model.DBReader) (bool, error)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if ps.pruningPointCache != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return dbContext.Has(pruningBlockHashKey)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
)
|
||||
|
||||
var reachabilityDataBucket = dbkeys.MakeBucket([]byte("reachability-data"))
|
||||
@ -15,13 +16,15 @@ var reachabilityReindexRootKey = dbkeys.MakeBucket().Key([]byte("reachability-re
|
||||
type reachabilityDataStore struct {
|
||||
reachabilityDataStaging map[externalapi.DomainHash]*model.ReachabilityData
|
||||
reachabilityReindexRootStaging *externalapi.DomainHash
|
||||
reachabilityDataCache *lrucache.LRUCache
|
||||
reachabilityReindexRootCache *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// New instantiates a new ReachabilityDataStore
|
||||
func New() model.ReachabilityDataStore {
|
||||
func New(cacheSize int) model.ReachabilityDataStore {
|
||||
return &reachabilityDataStore{
|
||||
reachabilityDataStaging: make(map[externalapi.DomainHash]*model.ReachabilityData),
|
||||
reachabilityReindexRootStaging: nil,
|
||||
reachabilityDataStaging: make(map[externalapi.DomainHash]*model.ReachabilityData),
|
||||
reachabilityDataCache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
@ -51,22 +54,22 @@ func (rds *reachabilityDataStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(reachabilityReindexRootKey, reachabilityReindexRootBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rds.reachabilityReindexRootCache = rds.reachabilityReindexRootStaging
|
||||
}
|
||||
for hash, reachabilityData := range rds.reachabilityDataStaging {
|
||||
reachabilityDataBytes, err := rds.serializeReachabilityData(reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(rds.reachabilityDataBlockHashAsKey(&hash), reachabilityDataBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rds.reachabilityDataCache.Add(&hash, reachabilityData)
|
||||
}
|
||||
|
||||
rds.Discard()
|
||||
@ -78,7 +81,11 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader,
|
||||
blockHash *externalapi.DomainHash) (*model.ReachabilityData, error) {
|
||||
|
||||
if reachabilityData, ok := rds.reachabilityDataStaging[*blockHash]; ok {
|
||||
return reachabilityData, nil
|
||||
return reachabilityData.Clone(), nil
|
||||
}
|
||||
|
||||
if reachabilityData, ok := rds.reachabilityDataCache.Get(blockHash); ok {
|
||||
return reachabilityData.(*model.ReachabilityData).Clone(), nil
|
||||
}
|
||||
|
||||
reachabilityDataBytes, err := dbContext.Get(rds.reachabilityDataBlockHashAsKey(blockHash))
|
||||
@ -86,7 +93,12 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rds.deserializeReachabilityData(reachabilityDataBytes)
|
||||
reachabilityData, err := rds.deserializeReachabilityData(reachabilityDataBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rds.reachabilityDataCache.Add(blockHash, reachabilityData)
|
||||
return reachabilityData.Clone(), nil
|
||||
}
|
||||
|
||||
func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
@ -94,6 +106,10 @@ func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader,
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if rds.reachabilityDataCache.Has(blockHash) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return dbContext.Has(rds.reachabilityDataBlockHashAsKey(blockHash))
|
||||
}
|
||||
|
||||
@ -102,6 +118,11 @@ func (rds *reachabilityDataStore) ReachabilityReindexRoot(dbContext model.DBRead
|
||||
if rds.reachabilityReindexRootStaging != nil {
|
||||
return rds.reachabilityReindexRootStaging, nil
|
||||
}
|
||||
|
||||
if rds.reachabilityReindexRootCache != nil {
|
||||
return rds.reachabilityReindexRootCache, nil
|
||||
}
|
||||
|
||||
reachabilityReindexRootBytes, err := dbContext.Get(reachabilityReindexRootKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -111,6 +132,7 @@ func (rds *reachabilityDataStore) ReachabilityReindexRoot(dbContext model.DBRead
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rds.reachabilityReindexRootCache = reachabilityReindexRoot
|
||||
return reachabilityReindexRoot, nil
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -17,14 +18,18 @@ type utxoDiffStore struct {
|
||||
utxoDiffStaging map[externalapi.DomainHash]*model.UTXODiff
|
||||
utxoDiffChildStaging map[externalapi.DomainHash]*externalapi.DomainHash
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
utxoDiffCache *lrucache.LRUCache
|
||||
utxoDiffChildCache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new UTXODiffStore
|
||||
func New() model.UTXODiffStore {
|
||||
func New(cacheSize int) model.UTXODiffStore {
|
||||
return &utxoDiffStore{
|
||||
utxoDiffStaging: make(map[externalapi.DomainHash]*model.UTXODiff),
|
||||
utxoDiffChildStaging: make(map[externalapi.DomainHash]*externalapi.DomainHash),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
utxoDiffCache: lrucache.New(cacheSize),
|
||||
utxoDiffChildCache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,11 +66,11 @@ func (uds *utxoDiffStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(uds.utxoDiffHashAsKey(&hash), utxoDiffBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uds.utxoDiffCache.Add(&hash, utxoDiff)
|
||||
}
|
||||
for hash, utxoDiffChild := range uds.utxoDiffChildStaging {
|
||||
if utxoDiffChild == nil {
|
||||
@ -80,6 +85,7 @@ func (uds *utxoDiffStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uds.utxoDiffChildCache.Add(&hash, utxoDiffChild)
|
||||
}
|
||||
|
||||
for hash := range uds.toDelete {
|
||||
@ -87,11 +93,13 @@ func (uds *utxoDiffStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uds.utxoDiffCache.Remove(&hash)
|
||||
|
||||
err = dbTx.Delete(uds.utxoDiffChildHashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uds.utxoDiffChildCache.Remove(&hash)
|
||||
}
|
||||
|
||||
uds.Discard()
|
||||
@ -101,7 +109,11 @@ func (uds *utxoDiffStore) Commit(dbTx model.DBTransaction) error {
|
||||
// UTXODiff gets the utxoDiff associated with the given blockHash
|
||||
func (uds *utxoDiffStore) UTXODiff(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*model.UTXODiff, error) {
|
||||
if utxoDiff, ok := uds.utxoDiffStaging[*blockHash]; ok {
|
||||
return utxoDiff, nil
|
||||
return utxoDiff.Clone(), nil
|
||||
}
|
||||
|
||||
if utxoDiff, ok := uds.utxoDiffCache.Get(blockHash); ok {
|
||||
return utxoDiff.(*model.UTXODiff).Clone(), nil
|
||||
}
|
||||
|
||||
utxoDiffBytes, err := dbContext.Get(uds.utxoDiffHashAsKey(blockHash))
|
||||
@ -109,13 +121,22 @@ func (uds *utxoDiffStore) UTXODiff(dbContext model.DBReader, blockHash *external
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return uds.deserializeUTXODiff(utxoDiffBytes)
|
||||
utxoDiff, err := uds.deserializeUTXODiff(utxoDiffBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uds.utxoDiffCache.Add(blockHash, utxoDiff)
|
||||
return utxoDiff.Clone(), nil
|
||||
}
|
||||
|
||||
// UTXODiffChild gets the utxoDiff child associated with the given blockHash
|
||||
func (uds *utxoDiffStore) UTXODiffChild(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
|
||||
if utxoDiffChild, ok := uds.utxoDiffChildStaging[*blockHash]; ok {
|
||||
return utxoDiffChild, nil
|
||||
return utxoDiffChild.Clone(), nil
|
||||
}
|
||||
|
||||
if utxoDiffChild, ok := uds.utxoDiffChildCache.Get(blockHash); ok {
|
||||
return utxoDiffChild.(*externalapi.DomainHash).Clone(), nil
|
||||
}
|
||||
|
||||
utxoDiffChildBytes, err := dbContext.Get(uds.utxoDiffChildHashAsKey(blockHash))
|
||||
@ -127,7 +148,8 @@ func (uds *utxoDiffStore) UTXODiffChild(dbContext model.DBReader, blockHash *ext
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utxoDiffChild, nil
|
||||
uds.utxoDiffChildCache.Add(blockHash, utxoDiffChild)
|
||||
return utxoDiffChild.Clone(), nil
|
||||
}
|
||||
|
||||
// HasUTXODiffChild returns true if the given blockHash has a UTXODiffChild
|
||||
@ -136,6 +158,10 @@ func (uds *utxoDiffStore) HasUTXODiffChild(dbContext model.DBReader, blockHash *
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if uds.utxoDiffChildCache.Has(blockHash) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return dbContext.Has(uds.utxoDiffChildHashAsKey(blockHash))
|
||||
}
|
||||
|
||||
|
@ -62,23 +62,24 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
dbManager := consensusdatabase.New(db)
|
||||
|
||||
// Data Structures
|
||||
acceptanceDataStore := acceptancedatastore.New()
|
||||
blockStore, err := blockstore.New(dbManager)
|
||||
storeCacheSize := int(dagParams.FinalityDepth())
|
||||
acceptanceDataStore := acceptancedatastore.New(storeCacheSize)
|
||||
blockStore, err := blockstore.New(dbManager, storeCacheSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockHeaderStore, err := blockheaderstore.New(dbManager)
|
||||
blockHeaderStore, err := blockheaderstore.New(dbManager, storeCacheSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockRelationStore := blockrelationstore.New()
|
||||
blockStatusStore := blockstatusstore.New()
|
||||
multisetStore := multisetstore.New()
|
||||
blockRelationStore := blockrelationstore.New(storeCacheSize)
|
||||
blockStatusStore := blockstatusstore.New(storeCacheSize)
|
||||
multisetStore := multisetstore.New(storeCacheSize)
|
||||
pruningStore := pruningstore.New()
|
||||
reachabilityDataStore := reachabilitydatastore.New()
|
||||
utxoDiffStore := utxodiffstore.New()
|
||||
reachabilityDataStore := reachabilitydatastore.New(storeCacheSize)
|
||||
utxoDiffStore := utxodiffstore.New(storeCacheSize)
|
||||
consensusStateStore := consensusstatestore.New()
|
||||
ghostdagDataStore := ghostdagdatastore.New()
|
||||
ghostdagDataStore := ghostdagdatastore.New(storeCacheSize)
|
||||
headerTipsStore := headertipsstore.New()
|
||||
|
||||
// Processes
|
||||
|
59
domain/consensus/utils/lrucache/lrucache.go
Normal file
59
domain/consensus/utils/lrucache/lrucache.go
Normal file
@ -0,0 +1,59 @@
|
||||
package lrucache
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// LRUCache is a least-recently-used cache for any type
|
||||
// that's able to be indexed by DomainHash
|
||||
type LRUCache struct {
|
||||
cache map[externalapi.DomainHash]interface{}
|
||||
capacity int
|
||||
}
|
||||
|
||||
// New creates a new LRUCache
|
||||
func New(capacity int) *LRUCache {
|
||||
return &LRUCache{
|
||||
cache: make(map[externalapi.DomainHash]interface{}, capacity+1),
|
||||
capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds an entry to the LRUCache
|
||||
func (c *LRUCache) Add(key *externalapi.DomainHash, value interface{}) {
|
||||
c.cache[*key] = value
|
||||
|
||||
if len(c.cache) > c.capacity {
|
||||
c.evictRandom()
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the entry for the given key, or (nil, false) otherwise
|
||||
func (c *LRUCache) Get(key *externalapi.DomainHash) (interface{}, bool) {
|
||||
value, ok := c.cache[*key]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
|
||||
// Has returns whether the LRUCache contains the given key
|
||||
func (c *LRUCache) Has(key *externalapi.DomainHash) bool {
|
||||
_, ok := c.cache[*key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Remove removes the entry for the the given key. Does nothing if
|
||||
// the entry does not exist
|
||||
func (c *LRUCache) Remove(key *externalapi.DomainHash) {
|
||||
delete(c.cache, *key)
|
||||
}
|
||||
|
||||
func (c *LRUCache) evictRandom() {
|
||||
var keyToEvict externalapi.DomainHash
|
||||
for key := range c.cache {
|
||||
keyToEvict = key
|
||||
break
|
||||
}
|
||||
c.Remove(&keyToEvict)
|
||||
}
|
2
go.sum
2
go.sum
@ -39,8 +39,6 @@ github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
Loading…
x
Reference in New Issue
Block a user